From eea13ed6a6d6ea48de84211d054edb04d1754878 Mon Sep 17 00:00:00 2001 From: Stephen Greene Date: Fri, 11 Dec 2020 10:53:59 -0500 Subject: [PATCH 1/3] go.mod: Bump for kube 1.20 --- go.mod | 12 ++-- go.sum | 173 +++++++++++++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 169 insertions(+), 16 deletions(-) diff --git a/go.mod b/go.mod index 9b8d1e0be..06fa790d9 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/openshift/router -go 1.13 +go 1.15 require ( github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e @@ -11,7 +11,7 @@ require ( github.com/getsentry/raven-go v0.2.0 // indirect github.com/go-logr/logr v0.2.1 // indirect github.com/gocarina/gocsv v0.0.0-20190927101021-3ecffd272576 - github.com/google/go-cmp v0.4.0 + github.com/google/go-cmp v0.5.2 github.com/openshift/api v0.0.0-20200827090112-c05698d102cf github.com/openshift/client-go v0.0.0-20200827190008-3062137373b5 github.com/openshift/library-go v0.0.0-20200921120329-c803a7b7bb2c @@ -20,9 +20,9 @@ require ( github.com/prometheus/common v0.10.0 github.com/spf13/cobra v1.0.0 github.com/spf13/pflag v1.0.5 - k8s.io/api v0.19.2 - k8s.io/apimachinery v0.19.2 - k8s.io/apiserver v0.19.2 - k8s.io/client-go v0.19.2 + k8s.io/api v0.20.0 + k8s.io/apimachinery v0.20.0 + k8s.io/apiserver v0.20.0 + k8s.io/client-go v0.20.0 k8s.io/klog v1.0.0 ) diff --git a/go.sum b/go.sum index eaf3c68eb..7ac40f0a8 100644 --- a/go.sum +++ b/go.sum @@ -5,29 +5,51 @@ cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6A cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e h1:eb0Pzkt15Bm7f2FFYv7sjY7NPFi3cPkS3tv1CcrFBWA= github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -56,6 +78,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20180905225744-ee1a9a0726d2 h1:MmeatFT1pTPSVb4nkPmBFN/LRZ97vPjsFKsZrU3KKTs= github.com/certifi/gocertifi v0.0.0-20180905225744-ee1a9a0726d2/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= @@ -69,6 +93,7 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cockroachdb/cmux v0.0.0-20170110192607-30d10be49292 h1:dzj1/xcivGjNPwwifh/dWTczkwcuqsXXFHY1X/TZMtw= github.com/cockroachdb/cmux v0.0.0-20170110192607-30d10be49292/go.mod h1:qRiX68mZX1lGBkTWyp3CLcenw9I94W2dLeRvMzcn9N4= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= @@ -76,11 +101,14 @@ github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= @@ -89,6 +117,7 @@ github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/docker/distribution v0.0.0-20180920194744-16128bbac47f/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= @@ -102,6 +131,7 @@ github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNE github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= @@ -115,6 +145,7 @@ github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= @@ -130,7 +161,9 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= @@ -210,9 +243,13 @@ github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= @@ -220,6 +257,7 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -228,6 +266,8 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= github.com/gonum/graph v0.0.0-20170401004347-50b27dea7ebb/go.mod h1:ye018NnX1zrbOLqwBvs2HqyyTouQgnL8C+qzYk1snPY= @@ -235,6 +275,7 @@ github.com/gonum/internal v0.0.0-20181124074243-f884aa714029/go.mod h1:Pu4dmpkhS github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2EAE789SSiSJNqxPaC0aE9J8NTOI0Jo/A= github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= @@ -243,6 +284,9 @@ github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -252,10 +296,14 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k= @@ -269,11 +317,16 @@ github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEo github.com/gorilla/mux v0.0.0-20191024121256-f395758b854c/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= @@ -288,6 +341,7 @@ github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -346,6 +400,7 @@ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9 github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= @@ -412,6 +467,8 @@ github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURm github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= @@ -426,6 +483,7 @@ github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4 github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= @@ -456,8 +514,11 @@ github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= @@ -465,24 +526,32 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -501,13 +570,20 @@ golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947 golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -516,12 +592,16 @@ golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -542,20 +622,29 @@ golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -583,27 +672,39 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 h1:HmbHVPwrPEKPGLAcHSrMe6+hqSUlvZU0rab6x5EXfGU= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4 h1:5/PjkGUjvEU5Gl6BxmvKRPpqo2uNMv4rcHBMwzk/st8= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -611,6 +712,8 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZe golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -635,14 +738,28 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= @@ -650,7 +767,12 @@ google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEt google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= @@ -667,9 +789,20 @@ google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a h1:pOwg4OoaRYScjmR4LlLgdtnyoHYTSAVhhqe5uPdpII8= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -681,6 +814,8 @@ google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -691,6 +826,8 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -705,6 +842,7 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ldap.v2 v2.5.1/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk= +gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= @@ -721,6 +859,8 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -728,37 +868,38 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI= k8s.io/api v0.18.0-beta.2/go.mod h1:2oeNnWEqcSmaM/ibSh3t7xcIqbkGXhzZdn4ezV9T4m0= k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= -k8s.io/api v0.19.2 h1:q+/krnHWKsL7OBZg/rxnycsl9569Pud76UJ77MvKXms= -k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= +k8s.io/api v0.20.0 h1:WwrYoZNM1W1aQEbyl8HNG+oWGzLpZQBlcerS9BQw9yI= +k8s.io/api v0.20.0/go.mod h1:HyLC5l5eoS/ygQYl1BXBgFzWNlkHiAuyNAbevIn+FKg= k8s.io/apiextensions-apiserver v0.17.0/go.mod h1:XiIFUakZywkUl54fVXa7QTEHcqQz9HG55nHd1DCoHj8= k8s.io/apiextensions-apiserver v0.18.0-beta.2/go.mod h1:Hnrg5jx8/PbxRbUoqDGxtQkULjwx8FDW4WYJaKNK+fk= k8s.io/apiextensions-apiserver v0.19.0/go.mod h1:znfQxNpjqz/ZehvbfMg5N6fvBJW5Lqu5HVLTJQdP4Fs= k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/apimachinery v0.18.0-beta.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= -k8s.io/apimachinery v0.19.2 h1:5Gy9vQpAGTKHPVOh5c4plE274X8D/6cuEiTO2zve7tc= -k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apimachinery v0.20.0 h1:jjzbTJRXk0unNS71L7h3lxGDH/2HPxMPaQY+MjECKL8= +k8s.io/apimachinery v0.20.0/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apiserver v0.17.0/go.mod h1:ABM+9x/prjINN6iiffRVNCBR2Wk7uY4z+EtEGZD48cg= k8s.io/apiserver v0.18.0-beta.2/go.mod h1:bnblMkMoCFnIfVnVftd0SXJPzyvrk3RtaqSbblphF/A= k8s.io/apiserver v0.19.0/go.mod h1:XvzqavYj73931x7FLtyagh8WibHpePJ1QwWrSJs2CLk= -k8s.io/apiserver v0.19.2 h1:xq2dXAzsAoHv7S4Xc/p7PKhiowdHV/PgdePWo3MxIYM= -k8s.io/apiserver v0.19.2/go.mod h1:FreAq0bJ2vtZFj9Ago/X0oNGC51GfubKK/ViOKfVAOA= +k8s.io/apiserver v0.20.0 h1:0MwO4xCoqZwhoLbFyyBSJdu55CScp4V4sAgX6z4oPBY= +k8s.io/apiserver v0.20.0/go.mod h1:6gRIWiOkvGvQt12WTYmsiYoUyYW0FXSiMdNl4m+sxY8= k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k= k8s.io/client-go v0.18.0-beta.2/go.mod h1:UvuVxHjKWIcgy0iMvF+bwNDW7l0mskTNOaOW1Qv5BMA= k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= -k8s.io/client-go v0.19.2 h1:gMJuU3xJZs86L1oQ99R4EViAADUPMHHtS9jFshasHSc= -k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA= +k8s.io/client-go v0.20.0 h1:Xlax8PKbZsjX4gFvNtt4F5MoJ1V5prDvCuoq9B7iax0= +k8s.io/client-go v0.20.0/go.mod h1:4KWh/g+Ocd8KkCwKF8vUNnmqgv+EVnQDK4MBF4oB5tY= k8s.io/code-generator v0.17.0/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= k8s.io/code-generator v0.18.0-beta.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= k8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1Yoc= k8s.io/component-base v0.18.0-beta.2/go.mod h1:HVk5FpRnyzQ/MjBr9//e/yEBjTVa2qjGXCTuUzcD7ks= k8s.io/component-base v0.19.0/go.mod h1:dKsY8BxkA+9dZIAh2aWJLL/UdASFDNtGYTCItL4LM7Y= -k8s.io/component-base v0.19.2 h1:jW5Y9RcZTb79liEhW3XDVTW7MuvEGP0tQZnfSX6/+gs= -k8s.io/component-base v0.19.2/go.mod h1:g5LrsiTiabMLZ40AR6Hl45f088DevyGY+cCE2agEIVo= +k8s.io/component-base v0.20.0 h1:BXGL8iitIQD+0NgW49UsM7MraNUUGDU3FBmrfUAtmVQ= +k8s.io/component-base v0.20.0/go.mod h1:wKPj+RHnAr8LW2EIBIK7AxOHPde4gme2lzXwVSoRXeA= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -773,6 +914,8 @@ k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.3.0 h1:WmkrnW7fdrm0/DMClc+HIxtftvxVIPAhlVwMQo5yLco= k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/kube-aggregator v0.18.0-beta.2/go.mod h1:O3Td9mheraINbLHH4pzoFP2gRzG0Wk1COqzdSL4rBPk= k8s.io/kube-aggregator v0.19.0/go.mod h1:1Ln45PQggFAG8xOqWPIYMxUq8WNtpPnYsbUJ39DpF/A= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= @@ -780,20 +923,28 @@ k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDN k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200229041039-0a110f9eb7ab/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7 h1:uuHDyjllyzRyCIvvn0OBjiRB0SgBZGqHNYAmjR7fO50= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9 h1:rusRLrDhjBp6aYtl9sGEvQJr6faoHoDLd0YcUBTZguI= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14 h1:TihvEz9MPj2u0KWds6E2OBUXfwaL4qRJ33c7HGiJpqk= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/controller-tools v0.2.8/go.mod h1:9VKHPszmf2DHz/QmHkcfZoewO6BL7pPs9uAiBVsaJSE= sigs.k8s.io/kube-storage-version-migrator v0.0.3/go.mod h1:mXfSLkx9xbJHQsgNDDUZK/iQTs2tMbx/hsJlWe6Fthw= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= @@ -804,6 +955,8 @@ sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJ sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= From db8f615c7cb4c35da10460a3ee2bc1ad62b1da6e Mon Sep 17 00:00:00 2001 From: Stephen Greene Date: Fri, 11 Dec 2020 10:54:08 -0500 Subject: [PATCH 2/3] vendor: Bump for kube 1.20 --- .../github.com/NYTimes/gziphandler/.gitignore | 1 + .../NYTimes/gziphandler/.travis.yml | 6 + .../NYTimes/gziphandler/CODE_OF_CONDUCT.md | 75 + .../NYTimes/gziphandler/CONTRIBUTING.md | 30 + .../github.com/NYTimes/gziphandler/LICENSE.md | 13 + .../github.com/NYTimes/gziphandler/README.md | 52 + vendor/github.com/NYTimes/gziphandler/gzip.go | 332 + .../NYTimes/gziphandler/gzip_go18.go | 43 + vendor/github.com/blang/semver/.travis.yml | 21 + vendor/github.com/blang/semver/README.md | 5 +- vendor/github.com/blang/semver/package.json | 2 +- vendor/github.com/coreos/go-semver/LICENSE | 202 + vendor/github.com/coreos/go-semver/NOTICE | 5 + .../coreos/go-semver/semver/semver.go | 296 + .../coreos/go-semver/semver/sort.go | 38 + vendor/github.com/coreos/go-systemd/LICENSE | 191 + vendor/github.com/coreos/go-systemd/NOTICE | 5 + .../coreos/go-systemd/daemon/sdnotify.go | 84 + .../coreos/go-systemd/daemon/watchdog.go | 73 + .../coreos/go-systemd/journal/journal.go | 225 + vendor/github.com/coreos/pkg/LICENSE | 202 + vendor/github.com/coreos/pkg/NOTICE | 5 + .../github.com/coreos/pkg/capnslog/README.md | 39 + .../coreos/pkg/capnslog/formatters.go | 157 + .../coreos/pkg/capnslog/glog_formatter.go | 96 + vendor/github.com/coreos/pkg/capnslog/init.go | 49 + .../coreos/pkg/capnslog/init_windows.go | 25 + .../coreos/pkg/capnslog/journald_formatter.go | 68 + .../coreos/pkg/capnslog/log_hijack.go | 39 + .../github.com/coreos/pkg/capnslog/logmap.go | 245 + .../coreos/pkg/capnslog/pkg_logger.go | 191 + .../coreos/pkg/capnslog/syslog_formatter.go | 65 + .../gogo/protobuf/gogoproto/Makefile | 37 + .../github.com/gogo/protobuf/gogoproto/doc.go | 169 + .../gogo/protobuf/gogoproto/gogo.pb.go | 874 + .../gogo/protobuf/gogoproto/gogo.pb.golden | 45 + .../gogo/protobuf/gogoproto/gogo.proto | 144 + .../gogo/protobuf/gogoproto/helper.go | 415 + .../protoc-gen-gogo/descriptor/Makefile | 36 + .../protoc-gen-gogo/descriptor/descriptor.go | 118 + .../descriptor/descriptor.pb.go | 2865 +++ .../descriptor/descriptor_gostring.gen.go | 752 + .../protoc-gen-gogo/descriptor/helper.go | 390 + vendor/github.com/golang/groupcache/LICENSE | 191 + .../github.com/golang/groupcache/lru/lru.go | 133 + .../golang/protobuf/proto/text_decode.go | 2 +- .../github.com/google/go-cmp/cmp/compare.go | 83 +- .../google/go-cmp/cmp/export_panic.go | 2 +- .../google/go-cmp/cmp/export_unsafe.go | 20 +- .../google/go-cmp/cmp/internal/diff/diff.go | 22 +- .../google/go-cmp/cmp/internal/value/name.go | 157 + .../cmp/internal/value/pointer_purego.go | 10 + .../cmp/internal/value/pointer_unsafe.go | 10 + .../github.com/google/go-cmp/cmp/options.go | 5 +- vendor/github.com/google/go-cmp/cmp/path.go | 7 +- vendor/github.com/google/go-cmp/cmp/report.go | 5 +- .../google/go-cmp/cmp/report_compare.go | 200 +- .../google/go-cmp/cmp/report_references.go | 264 + .../google/go-cmp/cmp/report_reflect.go | 292 +- .../google/go-cmp/cmp/report_slices.go | 135 +- .../google/go-cmp/cmp/report_text.go | 86 +- vendor/github.com/google/uuid/README.md | 2 +- vendor/github.com/google/uuid/marshal.go | 7 +- vendor/github.com/google/uuid/version1.go | 12 +- vendor/github.com/google/uuid/version4.go | 7 +- .../go-grpc-prometheus/.gitignore | 201 + .../go-grpc-prometheus/.travis.yml | 25 + .../go-grpc-prometheus/CHANGELOG.md | 24 + .../grpc-ecosystem/go-grpc-prometheus/LICENSE | 201 + .../go-grpc-prometheus/README.md | 247 + .../go-grpc-prometheus/client.go | 39 + .../go-grpc-prometheus/client_metrics.go | 170 + .../go-grpc-prometheus/client_reporter.go | 46 + .../go-grpc-prometheus/makefile | 16 + .../go-grpc-prometheus/metric_options.go | 41 + .../go-grpc-prometheus/server.go | 48 + .../go-grpc-prometheus/server_metrics.go | 185 + .../go-grpc-prometheus/server_reporter.go | 46 + .../grpc-ecosystem/go-grpc-prometheus/util.go | 50 + vendor/github.com/munnerz/goautoneg/LICENSE | 31 + vendor/github.com/munnerz/goautoneg/Makefile | 13 + .../github.com/munnerz/goautoneg/README.txt | 67 + .../github.com/munnerz/goautoneg/autoneg.go | 189 + .../client_golang/prometheus/testutil/lint.go | 46 + .../prometheus/testutil/promlint/promlint.go | 386 + .../prometheus/testutil/testutil.go | 230 + .../prometheus/procfs/CODE_OF_CONDUCT.md | 3 + .../github.com/prometheus/procfs/cpuinfo.go | 44 + .../{cpuinfo_arm.go => cpuinfo_armx.go} | 1 + .../prometheus/procfs/cpuinfo_mipsle.go | 18 - .../{cpuinfo_mips64le.go => cpuinfo_mipsx.go} | 1 + .../{cpuinfo_arm64.go => cpuinfo_others.go} | 4 +- .../prometheus/procfs/cpuinfo_ppc64le.go | 18 - .../{cpuinfo_ppc64.go => cpuinfo_ppcx.go} | 1 + .../{cpuinfo_default.go => cpuinfo_x86.go} | 0 .../prometheus/procfs/fixtures.ttar | 75 +- .../prometheus/procfs/kernel_random.go | 2 +- vendor/github.com/prometheus/procfs/mdstat.go | 5 +- .../prometheus/procfs/proc_status.go | 6 +- vendor/go.etcd.io/etcd/LICENSE | 202 + vendor/go.etcd.io/etcd/NOTICE | 5 + vendor/go.etcd.io/etcd/auth/authpb/auth.pb.go | 977 + vendor/go.etcd.io/etcd/auth/authpb/auth.proto | 42 + vendor/go.etcd.io/etcd/clientv3/README.md | 85 + vendor/go.etcd.io/etcd/clientv3/auth.go | 242 + .../etcd/clientv3/balancer/balancer.go | 293 + .../balancer/connectivity/connectivity.go | 93 + .../etcd/clientv3/balancer/picker/doc.go | 16 + .../etcd/clientv3/balancer/picker/err.go | 39 + .../etcd/clientv3/balancer/picker/picker.go | 91 + .../balancer/picker/roundrobin_balanced.go | 95 + .../balancer/resolver/endpoint/endpoint.go | 247 + .../etcd/clientv3/balancer/utils.go | 68 + vendor/go.etcd.io/etcd/clientv3/client.go | 664 + vendor/go.etcd.io/etcd/clientv3/cluster.go | 141 + vendor/go.etcd.io/etcd/clientv3/compact_op.go | 51 + vendor/go.etcd.io/etcd/clientv3/compare.go | 140 + vendor/go.etcd.io/etcd/clientv3/config.go | 88 + .../etcd/clientv3/credentials/credentials.go | 173 + vendor/go.etcd.io/etcd/clientv3/ctx.go | 64 + vendor/go.etcd.io/etcd/clientv3/doc.go | 106 + vendor/go.etcd.io/etcd/clientv3/kv.go | 177 + vendor/go.etcd.io/etcd/clientv3/lease.go | 596 + vendor/go.etcd.io/etcd/clientv3/logger.go | 101 + .../go.etcd.io/etcd/clientv3/maintenance.go | 243 + vendor/go.etcd.io/etcd/clientv3/op.go | 560 + vendor/go.etcd.io/etcd/clientv3/options.go | 65 + vendor/go.etcd.io/etcd/clientv3/retry.go | 298 + .../etcd/clientv3/retry_interceptor.go | 392 + vendor/go.etcd.io/etcd/clientv3/sort.go | 37 + vendor/go.etcd.io/etcd/clientv3/txn.go | 151 + vendor/go.etcd.io/etcd/clientv3/utils.go | 49 + vendor/go.etcd.io/etcd/clientv3/watch.go | 1035 + .../etcd/etcdserver/api/v3rpc/rpctypes/doc.go | 16 + .../etcdserver/api/v3rpc/rpctypes/error.go | 235 + .../etcd/etcdserver/api/v3rpc/rpctypes/md.go | 22 + .../api/v3rpc/rpctypes/metadatafields.go | 20 + .../etcdserver/etcdserverpb/etcdserver.pb.go | 1041 + .../etcdserver/etcdserverpb/etcdserver.proto | 34 + .../etcdserverpb/raft_internal.pb.go | 2127 ++ .../etcdserverpb/raft_internal.proto | 75 + .../etcdserverpb/raft_internal_stringer.go | 183 + .../etcd/etcdserver/etcdserverpb/rpc.pb.go | 20088 ++++++++++++++++ .../etcd/etcdserver/etcdserverpb/rpc.proto | 1146 + vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.pb.go | 718 + vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.proto | 49 + .../go.etcd.io/etcd/pkg/fileutil/dir_unix.go | 27 + .../etcd/pkg/fileutil/dir_windows.go | 51 + vendor/go.etcd.io/etcd/pkg/fileutil/doc.go | 16 + .../go.etcd.io/etcd/pkg/fileutil/fileutil.go | 129 + vendor/go.etcd.io/etcd/pkg/fileutil/lock.go | 26 + .../etcd/pkg/fileutil/lock_flock.go | 49 + .../etcd/pkg/fileutil/lock_linux.go | 97 + .../etcd/pkg/fileutil/lock_plan9.go | 45 + .../etcd/pkg/fileutil/lock_solaris.go | 62 + .../go.etcd.io/etcd/pkg/fileutil/lock_unix.go | 29 + .../etcd/pkg/fileutil/lock_windows.go | 125 + .../etcd/pkg/fileutil/preallocate.go | 54 + .../etcd/pkg/fileutil/preallocate_darwin.go | 65 + .../etcd/pkg/fileutil/preallocate_unix.go | 49 + .../pkg/fileutil/preallocate_unsupported.go | 25 + vendor/go.etcd.io/etcd/pkg/fileutil/purge.go | 98 + .../go.etcd.io/etcd/pkg/fileutil/read_dir.go | 70 + vendor/go.etcd.io/etcd/pkg/fileutil/sync.go | 29 + .../etcd/pkg/fileutil/sync_darwin.go | 40 + .../etcd/pkg/fileutil/sync_linux.go | 34 + .../etcd/pkg/logutil/discard_logger.go | 46 + .../etcd/pkg/logutil/doc.go} | 12 +- .../go.etcd.io/etcd/pkg/logutil/log_level.go | 70 + vendor/go.etcd.io/etcd/pkg/logutil/logger.go | 64 + .../etcd/pkg/logutil/merge_logger.go | 194 + .../etcd/pkg/logutil/package_logger.go | 60 + vendor/go.etcd.io/etcd/pkg/logutil/zap.go | 91 + .../go.etcd.io/etcd/pkg/logutil/zap_grpc.go | 111 + .../etcd/pkg/logutil/zap_journal.go | 92 + .../go.etcd.io/etcd/pkg/logutil/zap_raft.go | 102 + .../etcd/pkg/systemd/doc.go} | 12 +- vendor/go.etcd.io/etcd/pkg/systemd/journal.go | 29 + .../etcd/pkg/tlsutil/cipher_suites.go | 51 + vendor/go.etcd.io/etcd/pkg/tlsutil/doc.go | 16 + vendor/go.etcd.io/etcd/pkg/tlsutil/tlsutil.go | 73 + vendor/go.etcd.io/etcd/pkg/transport/doc.go | 17 + .../etcd/pkg/transport/keepalive_listener.go | 94 + .../etcd/pkg/transport/limit_listen.go | 80 + .../go.etcd.io/etcd/pkg/transport/listener.go | 449 + .../etcd/pkg/transport/listener_tls.go | 272 + .../etcd/pkg/transport/timeout_conn.go | 44 + .../etcd/pkg/transport/timeout_dialer.go | 36 + .../etcd/pkg/transport/timeout_listener.go | 57 + .../etcd/pkg/transport/timeout_transport.go | 51 + vendor/go.etcd.io/etcd/pkg/transport/tls.go | 49 + .../etcd/pkg/transport/transport.go | 71 + .../etcd/pkg/transport/unix_listener.go | 40 + vendor/go.etcd.io/etcd/pkg/types/doc.go | 17 + vendor/go.etcd.io/etcd/pkg/types/id.go | 39 + vendor/go.etcd.io/etcd/pkg/types/set.go | 195 + vendor/go.etcd.io/etcd/pkg/types/slice.go | 22 + vendor/go.etcd.io/etcd/pkg/types/urls.go | 82 + vendor/go.etcd.io/etcd/pkg/types/urlsmap.go | 107 + vendor/go.etcd.io/etcd/raft/OWNERS | 19 + vendor/go.etcd.io/etcd/raft/README.md | 197 + vendor/go.etcd.io/etcd/raft/bootstrap.go | 80 + .../etcd/raft/confchange/confchange.go | 425 + .../etcd/raft/confchange/restore.go | 155 + vendor/go.etcd.io/etcd/raft/design.md | 57 + vendor/go.etcd.io/etcd/raft/doc.go | 300 + vendor/go.etcd.io/etcd/raft/log.go | 372 + vendor/go.etcd.io/etcd/raft/log_unstable.go | 157 + vendor/go.etcd.io/etcd/raft/logger.go | 132 + vendor/go.etcd.io/etcd/raft/node.go | 584 + vendor/go.etcd.io/etcd/raft/quorum/joint.go | 75 + .../go.etcd.io/etcd/raft/quorum/majority.go | 210 + vendor/go.etcd.io/etcd/raft/quorum/quorum.go | 58 + .../etcd/raft/quorum/voteresult_string.go | 26 + vendor/go.etcd.io/etcd/raft/raft.go | 1656 ++ .../go.etcd.io/etcd/raft/raftpb/confchange.go | 170 + .../go.etcd.io/etcd/raft/raftpb/confstate.go | 45 + vendor/go.etcd.io/etcd/raft/raftpb/raft.pb.go | 2646 ++ vendor/go.etcd.io/etcd/raft/raftpb/raft.proto | 177 + vendor/go.etcd.io/etcd/raft/rawnode.go | 239 + vendor/go.etcd.io/etcd/raft/read_only.go | 121 + vendor/go.etcd.io/etcd/raft/status.go | 106 + vendor/go.etcd.io/etcd/raft/storage.go | 273 + .../go.etcd.io/etcd/raft/tracker/inflights.go | 132 + .../go.etcd.io/etcd/raft/tracker/progress.go | 259 + vendor/go.etcd.io/etcd/raft/tracker/state.go | 42 + .../go.etcd.io/etcd/raft/tracker/tracker.go | 288 + vendor/go.etcd.io/etcd/raft/util.go | 233 + vendor/go.etcd.io/etcd/version/version.go | 56 + vendor/go.uber.org/atomic/.codecov.yml | 15 + vendor/go.uber.org/atomic/.gitignore | 11 + vendor/go.uber.org/atomic/.travis.yml | 27 + vendor/go.uber.org/atomic/LICENSE.txt | 19 + vendor/go.uber.org/atomic/Makefile | 51 + vendor/go.uber.org/atomic/README.md | 36 + vendor/go.uber.org/atomic/atomic.go | 351 + vendor/go.uber.org/atomic/error.go | 55 + vendor/go.uber.org/atomic/glide.lock | 17 + vendor/go.uber.org/atomic/glide.yaml | 6 + vendor/go.uber.org/atomic/string.go | 49 + vendor/go.uber.org/multierr/.codecov.yml | 15 + vendor/go.uber.org/multierr/.gitignore | 1 + vendor/go.uber.org/multierr/.travis.yml | 33 + vendor/go.uber.org/multierr/CHANGELOG.md | 28 + vendor/go.uber.org/multierr/LICENSE.txt | 19 + vendor/go.uber.org/multierr/Makefile | 74 + vendor/go.uber.org/multierr/README.md | 23 + vendor/go.uber.org/multierr/error.go | 401 + vendor/go.uber.org/multierr/glide.lock | 19 + vendor/go.uber.org/multierr/glide.yaml | 8 + vendor/go.uber.org/zap/.codecov.yml | 17 + vendor/go.uber.org/zap/.gitignore | 28 + vendor/go.uber.org/zap/.readme.tmpl | 108 + vendor/go.uber.org/zap/.travis.yml | 21 + vendor/go.uber.org/zap/CHANGELOG.md | 327 + vendor/go.uber.org/zap/CODE_OF_CONDUCT.md | 75 + vendor/go.uber.org/zap/CONTRIBUTING.md | 81 + vendor/go.uber.org/zap/FAQ.md | 155 + vendor/go.uber.org/zap/LICENSE.txt | 19 + vendor/go.uber.org/zap/Makefile | 76 + vendor/go.uber.org/zap/README.md | 136 + vendor/go.uber.org/zap/array.go | 320 + vendor/go.uber.org/zap/buffer/buffer.go | 115 + vendor/go.uber.org/zap/buffer/pool.go | 49 + vendor/go.uber.org/zap/check_license.sh | 17 + vendor/go.uber.org/zap/config.go | 243 + vendor/go.uber.org/zap/doc.go | 113 + vendor/go.uber.org/zap/encoder.go | 75 + vendor/go.uber.org/zap/error.go | 80 + vendor/go.uber.org/zap/field.go | 310 + vendor/go.uber.org/zap/flag.go | 39 + vendor/go.uber.org/zap/glide.lock | 76 + vendor/go.uber.org/zap/glide.yaml | 35 + vendor/go.uber.org/zap/global.go | 168 + vendor/go.uber.org/zap/global_go112.go | 26 + vendor/go.uber.org/zap/global_prego112.go | 26 + vendor/go.uber.org/zap/http_handler.go | 81 + .../zap/internal/bufferpool/bufferpool.go | 31 + .../go.uber.org/zap/internal/color/color.go | 44 + vendor/go.uber.org/zap/internal/exit/exit.go | 64 + vendor/go.uber.org/zap/level.go | 132 + vendor/go.uber.org/zap/logger.go | 305 + vendor/go.uber.org/zap/options.go | 109 + vendor/go.uber.org/zap/sink.go | 161 + vendor/go.uber.org/zap/stacktrace.go | 126 + vendor/go.uber.org/zap/sugar.go | 304 + vendor/go.uber.org/zap/time.go | 27 + vendor/go.uber.org/zap/writer.go | 99 + .../zap/zapcore/console_encoder.go | 147 + vendor/go.uber.org/zap/zapcore/core.go | 113 + vendor/go.uber.org/zap/zapcore/doc.go | 24 + vendor/go.uber.org/zap/zapcore/encoder.go | 348 + vendor/go.uber.org/zap/zapcore/entry.go | 257 + vendor/go.uber.org/zap/zapcore/error.go | 120 + vendor/go.uber.org/zap/zapcore/field.go | 212 + vendor/go.uber.org/zap/zapcore/hook.go | 68 + .../go.uber.org/zap/zapcore/json_encoder.go | 505 + vendor/go.uber.org/zap/zapcore/level.go | 175 + .../go.uber.org/zap/zapcore/level_strings.go | 46 + vendor/go.uber.org/zap/zapcore/marshaler.go | 53 + .../go.uber.org/zap/zapcore/memory_encoder.go | 179 + vendor/go.uber.org/zap/zapcore/sampler.go | 134 + vendor/go.uber.org/zap/zapcore/tee.go | 81 + .../go.uber.org/zap/zapcore/write_syncer.go | 123 + vendor/golang.org/x/crypto/cryptobyte/asn1.go | 752 + .../x/crypto/cryptobyte/asn1/asn1.go | 46 + .../golang.org/x/crypto/cryptobyte/builder.go | 337 + .../golang.org/x/crypto/cryptobyte/string.go | 161 + .../x/crypto/internal/subtle/aliasing.go | 32 + .../internal/subtle/aliasing_appengine.go | 35 + .../x/crypto/nacl/secretbox/secretbox.go | 173 + .../x/crypto/poly1305/bits_compat.go | 39 + .../x/crypto/poly1305/bits_go1.13.go | 21 + .../golang.org/x/crypto/poly1305/mac_noasm.go | 9 + .../golang.org/x/crypto/poly1305/poly1305.go | 99 + .../golang.org/x/crypto/poly1305/sum_amd64.go | 47 + .../golang.org/x/crypto/poly1305/sum_amd64.s | 108 + .../x/crypto/poly1305/sum_generic.go | 310 + .../x/crypto/poly1305/sum_ppc64le.go | 47 + .../x/crypto/poly1305/sum_ppc64le.s | 181 + .../golang.org/x/crypto/poly1305/sum_s390x.go | 75 + .../golang.org/x/crypto/poly1305/sum_s390x.s | 503 + .../x/crypto/salsa20/salsa/hsalsa20.go | 144 + .../x/crypto/salsa20/salsa/salsa208.go | 199 + .../x/crypto/salsa20/salsa/salsa20_amd64.go | 23 + .../x/crypto/salsa20/salsa/salsa20_amd64.s | 883 + .../x/crypto/salsa20/salsa/salsa20_noasm.go | 14 + .../x/crypto/salsa20/salsa/salsa20_ref.go | 231 + vendor/golang.org/x/net/http2/server.go | 12 +- vendor/golang.org/x/net/http2/transport.go | 36 +- .../idna/{tables12.00.go => tables12.0.0.go} | 2 +- vendor/golang.org/x/net/idna/tables13.0.0.go | 4839 ++++ vendor/golang.org/x/oauth2/README.md | 13 +- vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s | 17 + vendor/golang.org/x/sys/cpu/byteorder.go | 65 + vendor/golang.org/x/sys/cpu/cpu.go | 287 + vendor/golang.org/x/sys/cpu/cpu_aix.go | 32 + vendor/golang.org/x/sys/cpu/cpu_arm.go | 73 + vendor/golang.org/x/sys/cpu/cpu_arm64.go | 172 + vendor/golang.org/x/sys/cpu/cpu_arm64.s | 31 + .../x/sys/cpu/cpu_gc_arm64.go} | 9 +- vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go | 21 + vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 16 + .../cpu_gccgo_arm64.go} | 10 +- .../golang.org/x/sys/cpu/cpu_gccgo_s390x.go | 22 + vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c | 43 + vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go | 26 + vendor/golang.org/x/sys/cpu/cpu_linux.go | 15 + vendor/golang.org/x/sys/cpu/cpu_linux_arm.go | 39 + .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 71 + .../golang.org/x/sys/cpu/cpu_linux_mips64x.go | 23 + .../cpu_linux_noinit.go} | 6 +- .../golang.org/x/sys/cpu/cpu_linux_ppc64x.go | 31 + .../golang.org/x/sys/cpu/cpu_linux_s390x.go | 40 + vendor/golang.org/x/sys/cpu/cpu_mips64x.go | 15 + vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 11 + .../golang.org/x/sys/cpu/cpu_netbsd_arm64.go | 173 + vendor/golang.org/x/sys/cpu/cpu_other_arm.go | 9 + .../cpu_other_arm64.go} | 9 +- .../golang.org/x/sys/cpu/cpu_other_mips64x.go | 12 + vendor/golang.org/x/sys/cpu/cpu_ppc64x.go | 16 + vendor/golang.org/x/sys/cpu/cpu_riscv64.go | 11 + vendor/golang.org/x/sys/cpu/cpu_s390x.go | 172 + vendor/golang.org/x/sys/cpu/cpu_s390x.s | 57 + vendor/golang.org/x/sys/cpu/cpu_wasm.go | 17 + vendor/golang.org/x/sys/cpu/cpu_x86.go | 135 + vendor/golang.org/x/sys/cpu/cpu_x86.s | 27 + vendor/golang.org/x/sys/cpu/cpu_zos.go | 10 + vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go | 25 + vendor/golang.org/x/sys/cpu/hwcap_linux.go | 56 + .../golang.org/x/sys/cpu/syscall_aix_gccgo.go | 27 + .../x/sys/cpu/syscall_aix_ppc64_gc.go | 36 + .../x/sys/unix/asm_openbsd_mips64.s | 29 + vendor/golang.org/x/sys/unix/fcntl_darwin.go | 6 + .../x/sys/unix/fcntl_linux_32bit.go | 4 +- vendor/golang.org/x/sys/unix/gccgo.go | 2 - vendor/golang.org/x/sys/unix/gccgo_c.c | 6 + vendor/golang.org/x/sys/unix/ioctl.go | 9 + vendor/golang.org/x/sys/unix/mkall.sh | 15 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 24 +- .../x/sys/unix/sockcmsg_unix_other.go | 6 +- vendor/golang.org/x/sys/unix/syscall.go | 43 +- vendor/golang.org/x/sys/unix/syscall_aix.go | 16 + vendor/golang.org/x/sys/unix/syscall_bsd.go | 36 +- .../x/sys/unix/syscall_darwin.1_12.go | 4 +- .../golang.org/x/sys/unix/syscall_darwin.go | 141 +- .../x/sys/unix/syscall_darwin_386.go | 11 +- .../x/sys/unix/syscall_darwin_amd64.1_11.go | 9 - .../x/sys/unix/syscall_darwin_amd64.go | 11 +- .../x/sys/unix/syscall_darwin_arm.go | 8 +- .../x/sys/unix/syscall_darwin_arm64.go | 13 +- .../x/sys/unix/syscall_dragonfly.go | 19 +- .../golang.org/x/sys/unix/syscall_freebsd.go | 19 +- .../golang.org/x/sys/unix/syscall_illumos.go | 41 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 142 +- .../x/sys/unix/syscall_linux_386.go | 3 - .../x/sys/unix/syscall_linux_arm.go | 5 - .../x/sys/unix/syscall_linux_gc_arm.go | 13 + .../golang.org/x/sys/unix/syscall_netbsd.go | 19 +- .../golang.org/x/sys/unix/syscall_openbsd.go | 19 +- .../x/sys/unix/syscall_openbsd_mips64.go | 35 + .../golang.org/x/sys/unix/syscall_solaris.go | 7 +- .../x/sys/unix/zerrors_darwin_386.go | 4 + .../x/sys/unix/zerrors_darwin_amd64.go | 4 + .../x/sys/unix/zerrors_darwin_arm.go | 4 + .../x/sys/unix/zerrors_darwin_arm64.go | 4 + .../x/sys/unix/zerrors_dragonfly_amd64.go | 138 +- .../x/sys/unix/zerrors_freebsd_386.go | 6 + .../x/sys/unix/zerrors_freebsd_amd64.go | 6 + .../x/sys/unix/zerrors_freebsd_arm.go | 6 + .../x/sys/unix/zerrors_freebsd_arm64.go | 6 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 236 +- .../x/sys/unix/zerrors_linux_386.go | 3 + .../x/sys/unix/zerrors_linux_amd64.go | 3 + .../x/sys/unix/zerrors_linux_arm.go | 3 + .../x/sys/unix/zerrors_linux_arm64.go | 4 + .../x/sys/unix/zerrors_linux_mips.go | 3 + .../x/sys/unix/zerrors_linux_mips64.go | 3 + .../x/sys/unix/zerrors_linux_mips64le.go | 3 + .../x/sys/unix/zerrors_linux_mipsle.go | 3 + .../x/sys/unix/zerrors_linux_ppc64.go | 3 + .../x/sys/unix/zerrors_linux_ppc64le.go | 3 + .../x/sys/unix/zerrors_linux_riscv64.go | 3 + .../x/sys/unix/zerrors_linux_s390x.go | 3 + .../x/sys/unix/zerrors_linux_sparc64.go | 3 + .../x/sys/unix/zerrors_netbsd_386.go | 6 + .../x/sys/unix/zerrors_netbsd_amd64.go | 6 + .../x/sys/unix/zerrors_netbsd_arm.go | 6 + .../x/sys/unix/zerrors_netbsd_arm64.go | 6 + .../x/sys/unix/zerrors_openbsd_386.go | 7 + .../x/sys/unix/zerrors_openbsd_amd64.go | 7 + .../x/sys/unix/zerrors_openbsd_arm.go | 7 + .../x/sys/unix/zerrors_openbsd_arm64.go | 7 + .../x/sys/unix/zerrors_openbsd_mips64.go | 1862 ++ .../x/sys/unix/zerrors_solaris_amd64.go | 22 +- .../x/sys/unix/zsyscall_darwin_386.1_11.go | 1809 -- .../x/sys/unix/zsyscall_darwin_386.go | 137 +- .../x/sys/unix/zsyscall_darwin_386.s | 18 +- .../x/sys/unix/zsyscall_darwin_amd64.1_11.go | 1809 -- .../x/sys/unix/zsyscall_darwin_amd64.go | 137 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 18 +- .../x/sys/unix/zsyscall_darwin_arm.1_11.go | 1782 -- .../x/sys/unix/zsyscall_darwin_arm.go | 107 +- .../x/sys/unix/zsyscall_darwin_arm.s | 14 +- .../x/sys/unix/zsyscall_darwin_arm64.go | 122 +- .../x/sys/unix/zsyscall_darwin_arm64.s | 16 +- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 32 +- .../x/sys/unix/zsyscall_illumos_amd64.go | 29 +- .../golang.org/x/sys/unix/zsyscall_linux.go | 77 + ...m64.1_11.go => zsyscall_openbsd_mips64.go} | 496 +- .../x/sys/unix/zsysctl_openbsd_mips64.go | 279 + .../x/sys/unix/zsysnum_darwin_386.go | 1 + .../x/sys/unix/zsysnum_darwin_amd64.go | 1 + .../x/sys/unix/zsysnum_darwin_arm.go | 1 + .../x/sys/unix/zsysnum_darwin_arm64.go | 1 + .../x/sys/unix/zsysnum_dragonfly_amd64.go | 255 +- .../x/sys/unix/zsysnum_linux_386.go | 2 + .../x/sys/unix/zsysnum_linux_amd64.go | 2 + .../x/sys/unix/zsysnum_linux_arm.go | 2 + .../x/sys/unix/zsysnum_linux_arm64.go | 2 + .../x/sys/unix/zsysnum_linux_mips.go | 2 + .../x/sys/unix/zsysnum_linux_mips64.go | 2 + .../x/sys/unix/zsysnum_linux_mips64le.go | 2 + .../x/sys/unix/zsysnum_linux_mipsle.go | 2 + .../x/sys/unix/zsysnum_linux_ppc64.go | 2 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 2 + .../x/sys/unix/zsysnum_linux_riscv64.go | 2 + .../x/sys/unix/zsysnum_linux_s390x.go | 2 + .../x/sys/unix/zsysnum_linux_sparc64.go | 2 + .../x/sys/unix/zsysnum_openbsd_mips64.go | 220 + .../x/sys/unix/ztypes_darwin_386.go | 32 +- .../x/sys/unix/ztypes_darwin_amd64.go | 43 +- .../x/sys/unix/ztypes_darwin_arm.go | 39 +- .../x/sys/unix/ztypes_darwin_arm64.go | 43 +- .../x/sys/unix/ztypes_dragonfly_amd64.go | 46 +- vendor/golang.org/x/sys/unix/ztypes_linux.go | 1102 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 20 + .../x/sys/unix/ztypes_linux_amd64.go | 23 + .../golang.org/x/sys/unix/ztypes_linux_arm.go | 23 + .../x/sys/unix/ztypes_linux_arm64.go | 23 + .../x/sys/unix/ztypes_linux_mips.go | 23 + .../x/sys/unix/ztypes_linux_mips64.go | 23 + .../x/sys/unix/ztypes_linux_mips64le.go | 23 + .../x/sys/unix/ztypes_linux_mipsle.go | 23 + .../x/sys/unix/ztypes_linux_ppc64.go | 23 + .../x/sys/unix/ztypes_linux_ppc64le.go | 23 + .../x/sys/unix/ztypes_linux_riscv64.go | 23 + .../x/sys/unix/ztypes_linux_s390x.go | 23 + .../x/sys/unix/ztypes_linux_sparc64.go | 23 + .../x/sys/unix/ztypes_openbsd_mips64.go | 565 + .../x/sys/unix/ztypes_solaris_amd64.go | 31 +- .../x/sys/windows/memory_windows.go | 20 +- vendor/golang.org/x/sys/windows/service.go | 2 + .../x/sys/windows/setupapierrors_windows.go | 100 + vendor/golang.org/x/sys/windows/syscall.go | 46 +- .../x/sys/windows/syscall_windows.go | 33 +- .../golang.org/x/sys/windows/types_windows.go | 12 - .../x/sys/windows/types_windows_386.go | 13 + .../x/sys/windows/types_windows_amd64.go | 12 + .../x/sys/windows/types_windows_arm.go | 13 + .../x/sys/windows/zsyscall_windows.go | 4397 ++-- .../x/text/unicode/bidi/tables12.0.0.go | 2 +- .../x/text/unicode/bidi/tables13.0.0.go | 1955 ++ .../x/text/unicode/norm/tables12.0.0.go | 2 +- .../x/text/unicode/norm/tables13.0.0.go | 7760 ++++++ .../golang.org/x/text/width/tables12.0.0.go | 2 +- .../golang.org/x/text/width/tables13.0.0.go | 1351 ++ vendor/golang.org/x/time/rate/rate.go | 12 +- .../googleapis/rpc/status/status.pb.go | 14 +- .../grpc/balancer/base/balancer.go | 67 +- .../grpc/resolver/dns/dns_resolver.go | 36 + .../grpc/resolver/passthrough/passthrough.go | 26 + vendor/google.golang.org/grpc/version.go | 2 +- .../protobuf/encoding/prototext/decode.go | 65 +- .../protobuf/encoding/prototext/encode.go | 12 +- .../protobuf/internal/fieldnum/any_gen.go | 13 - .../protobuf/internal/fieldnum/api_gen.go | 35 - .../internal/fieldnum/descriptor_gen.go | 240 - .../protobuf/internal/fieldnum/doc.go | 7 - .../internal/fieldnum/duration_gen.go | 13 - .../internal/fieldnum/field_mask_gen.go | 12 - .../internal/fieldnum/source_context_gen.go | 12 - .../protobuf/internal/fieldnum/struct_gen.go | 33 - .../internal/fieldnum/timestamp_gen.go | 13 - .../protobuf/internal/fieldnum/type_gen.go | 53 - .../internal/fieldnum/wrappers_gen.go | 52 - .../protobuf/internal/filedesc/build.go | 16 +- .../protobuf/internal/filedesc/desc.go | 5 +- .../protobuf/internal/filedesc/desc_init.go | 62 +- .../protobuf/internal/filedesc/desc_lazy.go | 124 +- .../protobuf/internal/filedesc/desc_list.go | 6 +- .../protobuf/internal/genid/any_gen.go | 34 + .../protobuf/internal/genid/api_gen.go | 106 + .../protobuf/internal/genid/descriptor_gen.go | 829 + .../protobuf/internal/genid/doc.go | 11 + .../protobuf/internal/genid/duration_gen.go | 34 + .../protobuf/internal/genid/empty_gen.go | 19 + .../protobuf/internal/genid/field_mask_gen.go | 31 + .../protobuf/internal/genid/goname.go | 25 + .../protobuf/internal/genid/map_entry.go | 16 + .../internal/genid/source_context_gen.go | 31 + .../protobuf/internal/genid/struct_gen.go | 116 + .../protobuf/internal/genid/timestamp_gen.go | 34 + .../protobuf/internal/genid/type_gen.go | 184 + .../protobuf/internal/genid/wrappers.go | 13 + .../protobuf/internal/genid/wrappers_gen.go | 175 + .../protobuf/internal/genname/name.go | 25 - .../protobuf/internal/impl/api_export.go | 7 + .../protobuf/internal/impl/codec_map.go | 5 +- .../protobuf/internal/impl/message.go | 10 +- .../protobuf/internal/impl/validate.go | 5 +- .../protobuf/internal/version/version.go | 2 +- .../protobuf/proto/decode.go | 5 +- .../protobuf/reflect/protoreflect/proto.go | 50 +- .../protobuf/types/known/anypb/any.pb.go | 207 + .../types/known/durationpb/duration.pb.go | 130 + .../types/known/timestamppb/timestamp.pb.go | 110 + .../natefinch/lumberjack.v2/.gitignore | 23 + .../natefinch/lumberjack.v2/.travis.yml | 6 + .../gopkg.in/natefinch/lumberjack.v2/LICENSE | 21 + .../natefinch/lumberjack.v2/README.md | 179 + .../gopkg.in/natefinch/lumberjack.v2/chown.go | 11 + .../natefinch/lumberjack.v2/chown_linux.go | 19 + .../natefinch/lumberjack.v2/lumberjack.go | 541 + vendor/k8s.io/api/admission/v1/doc.go | 23 + .../k8s.io/api/admission/v1/generated.pb.go | 1792 ++ .../k8s.io/api/admission/v1/generated.proto | 167 + vendor/k8s.io/api/admission/v1/register.go | 51 + vendor/k8s.io/api/admission/v1/types.go | 169 + .../v1/types_swagger_doc_generated.go | 78 + .../api/admission/v1/zz_generated.deepcopy.go | 141 + vendor/k8s.io/api/admission/v1beta1/doc.go | 24 + .../api/admission/v1beta1/generated.pb.go | 1792 ++ .../api/admission/v1beta1/generated.proto | 167 + .../v1beta1}/register.go | 11 +- vendor/k8s.io/api/admission/v1beta1/types.go | 174 + .../v1beta1/types_swagger_doc_generated.go | 78 + .../v1beta1/zz_generated.deepcopy.go | 141 + .../zz_generated.prerelease-lifecycle.go | 49 + .../admissionregistration/v1/generated.proto | 2 +- .../v1beta1/generated.proto | 2 +- .../api/apiserverinternal/v1alpha1/doc.go | 25 + .../v1alpha1/generated.pb.go | 1718 ++ .../v1alpha1/generated.proto | 121 + .../apiserverinternal/v1alpha1/register.go | 48 + .../api/apiserverinternal/v1alpha1/types.go | 127 + .../v1alpha1/types_swagger_doc_generated.go | 93 + .../v1alpha1/zz_generated.deepcopy.go | 175 + vendor/k8s.io/api/apps/v1/generated.proto | 2 +- .../k8s.io/api/apps/v1beta1/generated.proto | 2 +- vendor/k8s.io/api/apps/v1beta1/types.go | 16 +- .../zz_generated.prerelease-lifecycle.go | 16 +- .../k8s.io/api/apps/v1beta2/generated.proto | 2 +- vendor/k8s.io/api/apps/v1beta2/types.go | 22 +- .../zz_generated.prerelease-lifecycle.go | 22 +- .../api/authentication/v1/generated.proto | 2 +- .../authentication/v1beta1/generated.proto | 2 +- .../api/authorization/v1/generated.proto | 2 +- .../api/authorization/v1beta1/generated.proto | 2 +- .../k8s.io/api/autoscaling/v1/generated.pb.go | 937 +- .../k8s.io/api/autoscaling/v1/generated.proto | 85 +- vendor/k8s.io/api/autoscaling/v1/types.go | 81 +- .../v1/types_swagger_doc_generated.go | 50 +- .../autoscaling/v1/zz_generated.deepcopy.go | 58 + .../api/autoscaling/v2beta1/generated.pb.go | 942 +- .../api/autoscaling/v2beta1/generated.proto | 85 +- .../k8s.io/api/autoscaling/v2beta1/types.go | 81 +- .../v2beta1/types_swagger_doc_generated.go | 50 +- .../v2beta1/zz_generated.deepcopy.go | 58 + .../api/autoscaling/v2beta2/generated.pb.go | 913 +- .../api/autoscaling/v2beta2/generated.proto | 65 +- .../k8s.io/api/autoscaling/v2beta2/types.go | 65 +- .../v2beta2/types_swagger_doc_generated.go | 48 +- .../v2beta2/zz_generated.deepcopy.go | 44 + vendor/k8s.io/api/batch/v1/generated.proto | 4 +- vendor/k8s.io/api/batch/v1/types.go | 2 + .../batch/v1/types_swagger_doc_generated.go | 4 +- .../k8s.io/api/batch/v1beta1/generated.proto | 2 +- .../k8s.io/api/batch/v2alpha1/generated.proto | 2 +- .../api/certificates/v1/generated.proto | 2 +- .../api/certificates/v1beta1/generated.proto | 2 +- .../api/coordination/v1/generated.proto | 2 +- .../api/coordination/v1beta1/generated.proto | 2 +- vendor/k8s.io/api/core/v1/generated.pb.go | 2671 +- vendor/k8s.io/api/core/v1/generated.proto | 228 +- vendor/k8s.io/api/core/v1/resource.go | 51 +- vendor/k8s.io/api/core/v1/types.go | 267 +- .../core/v1/types_swagger_doc_generated.go | 57 +- .../k8s.io/api/core/v1/well_known_labels.go | 14 +- .../api/core/v1/zz_generated.deepcopy.go | 64 +- .../api/discovery/v1alpha1/generated.pb.go | 215 +- .../api/discovery/v1alpha1/generated.proto | 31 +- vendor/k8s.io/api/discovery/v1alpha1/types.go | 28 +- .../v1alpha1/types_swagger_doc_generated.go | 11 +- .../v1alpha1/zz_generated.deepcopy.go | 15 + .../api/discovery/v1beta1/generated.pb.go | 214 +- .../api/discovery/v1beta1/generated.proto | 31 +- vendor/k8s.io/api/discovery/v1beta1/types.go | 34 +- .../v1beta1/types_swagger_doc_generated.go | 11 +- .../v1beta1/zz_generated.deepcopy.go | 15 + vendor/k8s.io/api/events/v1/generated.proto | 18 +- vendor/k8s.io/api/events/v1/types.go | 19 +- .../events/v1/types_swagger_doc_generated.go | 8 +- .../k8s.io/api/events/v1beta1/generated.proto | 8 +- vendor/k8s.io/api/events/v1beta1/types.go | 9 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../api/extensions/v1beta1/generated.proto | 2 +- vendor/k8s.io/api/extensions/v1beta1/types.go | 24 +- .../zz_generated.prerelease-lifecycle.go | 24 +- vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go | 1 + .../api/flowcontrol/v1alpha1/generated.proto | 2 +- .../k8s.io/api/flowcontrol/v1alpha1/types.go | 22 + .../zz_generated.prerelease-lifecycle.go | 121 + vendor/k8s.io/api/flowcontrol/v1beta1/doc.go | 25 + .../api/flowcontrol/v1beta1/generated.pb.go | 5433 +++++ .../api/flowcontrol/v1beta1/generated.proto | 434 + .../api/flowcontrol/v1beta1/register.go | 58 + .../k8s.io/api/flowcontrol/v1beta1/types.go | 529 + .../v1beta1/types_swagger_doc_generated.go | 258 + .../v1beta1/zz_generated.deepcopy.go | 541 + .../zz_generated.prerelease-lifecycle.go | 93 + .../k8s.io/api/networking/v1/generated.proto | 2 +- .../api/networking/v1beta1/generated.proto | 2 +- .../api/{settings/v1alpha1 => node/v1}/doc.go | 6 +- vendor/k8s.io/api/node/v1/generated.pb.go | 1411 ++ vendor/k8s.io/api/node/v1/generated.proto | 109 + vendor/k8s.io/api/node/v1/register.go | 52 + vendor/k8s.io/api/node/v1/types.go | 107 + .../node/v1/types_swagger_doc_generated.go | 71 + .../v1}/zz_generated.deepcopy.go | 99 +- .../k8s.io/api/node/v1alpha1/generated.proto | 6 +- vendor/k8s.io/api/node/v1alpha1/types.go | 4 +- .../v1alpha1/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/node/v1beta1/generated.proto | 6 +- vendor/k8s.io/api/node/v1beta1/types.go | 4 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/policy/v1beta1/generated.proto | 2 +- vendor/k8s.io/api/rbac/v1/generated.proto | 2 +- .../k8s.io/api/rbac/v1alpha1/generated.proto | 2 +- .../k8s.io/api/rbac/v1beta1/generated.proto | 2 +- .../k8s.io/api/scheduling/v1/generated.proto | 2 +- .../api/scheduling/v1alpha1/generated.proto | 2 +- .../api/scheduling/v1beta1/generated.proto | 2 +- .../api/settings/v1alpha1/generated.pb.go | 1053 - .../api/settings/v1alpha1/generated.proto | 75 - vendor/k8s.io/api/settings/v1alpha1/types.go | 70 - .../v1alpha1/types_swagger_doc_generated.go | 61 - vendor/k8s.io/api/storage/v1/generated.pb.go | 483 +- vendor/k8s.io/api/storage/v1/generated.proto | 52 +- vendor/k8s.io/api/storage/v1/types.go | 51 + .../storage/v1/types_swagger_doc_generated.go | 12 + .../api/storage/v1/zz_generated.deepcopy.go | 33 + .../api/storage/v1alpha1/generated.proto | 2 +- .../api/storage/v1beta1/generated.pb.go | 486 +- .../api/storage/v1beta1/generated.proto | 52 +- vendor/k8s.io/api/storage/v1beta1/types.go | 51 + .../v1beta1/types_swagger_doc_generated.go | 12 + .../storage/v1beta1/zz_generated.deepcopy.go | 33 + .../apimachinery/pkg/api/meta/conditions.go | 1 + .../k8s.io/apimachinery/pkg/api/meta/meta.go | 4 - .../apimachinery/pkg/api/meta/restmapper.go | 3 + .../pkg/api/resource/generated.proto | 2 +- .../apimachinery/pkg/api/resource/quantity.go | 36 +- .../apimachinery/pkg/api/validation/doc.go | 18 + .../pkg/api/validation/generic.go | 86 + .../pkg/api/validation/objectmeta.go | 268 + .../internalversion/validation/validation.go | 46 + .../apimachinery/pkg/apis/meta/v1/OWNERS | 1 - .../pkg/apis/meta/v1/generated.proto | 3 +- .../pkg/apis/meta/v1/group_version.go | 9 + .../apimachinery/pkg/apis/meta/v1/helpers.go | 14 + .../pkg/apis/meta/v1/micro_time.go | 15 - .../pkg/apis/meta/v1/micro_time_fuzz.go | 39 + .../apimachinery/pkg/apis/meta/v1/time.go | 15 - .../pkg/apis/meta/v1/time_fuzz.go | 39 + .../apimachinery/pkg/apis/meta/v1/types.go | 1 + .../pkg/apis/meta/v1/unstructured/helpers.go | 8 - .../pkg/apis/meta/v1beta1/generated.proto | 2 +- .../meta/v1beta1/validation/validation.go | 33 + .../apimachinery/pkg/conversion/converter.go | 598 +- .../k8s.io/apimachinery/pkg/labels/labels.go | 19 - .../apimachinery/pkg/labels/selector.go | 50 +- .../apimachinery/pkg/runtime/converter.go | 3 + .../apimachinery/pkg/runtime/generated.proto | 2 +- .../apimachinery/pkg/runtime/interfaces.go | 2 +- .../apimachinery/pkg/runtime/negotiate.go | 33 - .../pkg/runtime/schema/generated.proto | 2 +- .../pkg/runtime/schema/interfaces.go | 4 +- .../k8s.io/apimachinery/pkg/runtime/scheme.go | 30 +- .../pkg/runtime/serializer/codec_factory.go | 12 +- .../pkg/runtime/serializer/json/json.go | 23 +- .../runtime/serializer/protobuf/protobuf.go | 4 + .../apimachinery/pkg/types/namespacedname.go | 6 +- .../apimachinery/pkg/util/cache/expiring.go | 2 +- .../apimachinery/pkg/util/clock/clock.go | 18 +- .../apimachinery/pkg/util/errors/errors.go | 2 +- .../apimachinery/pkg/util/framer/framer.go | 3 + .../pkg/util/intstr/generated.proto | 2 +- .../pkg/util/intstr/instr_fuzz.go | 42 + .../apimachinery/pkg/util/intstr/intstr.go | 70 +- .../k8s.io/apimachinery/pkg/util/json/json.go | 33 +- .../k8s.io/apimachinery/pkg/util/net/http.go | 51 +- .../apimachinery/pkg/util/net/port_range.go | 2 +- .../apimachinery/pkg/util/runtime/runtime.go | 4 +- .../k8s.io/apimachinery/pkg/util/uuid/uuid.go | 27 + .../pkg/util/validation/field/path.go | 3 + .../pkg/util/validation/validation.go | 6 +- .../k8s.io/apimachinery/pkg/util/wait/wait.go | 29 + .../apimachinery/pkg/util/waitgroup/doc.go | 19 + .../pkg/util/waitgroup/waitgroup.go | 57 + .../apimachinery/pkg/util/yaml/decoder.go | 31 + vendor/k8s.io/apimachinery/pkg/watch/mux.go | 65 +- .../apimachinery/pkg/watch/streamwatcher.go | 2 +- vendor/k8s.io/apimachinery/pkg/watch/watch.go | 2 +- .../apiserver/pkg/admission/attributes.go | 211 + .../k8s.io/apiserver/pkg/admission/audit.go | 103 + .../k8s.io/apiserver/pkg/admission/chain.go | 70 + .../k8s.io/apiserver/pkg/admission/config.go | 175 + .../configuration/configuration_manager.go | 166 + .../configuration/mutating_webhook_manager.go | 106 + .../validating_webhook_manager.go | 104 + .../apiserver/pkg/admission/decorator.go | 39 + .../k8s.io/apiserver/pkg/admission/errors.go | 72 + .../k8s.io/apiserver/pkg/admission/handler.go | 79 + .../pkg/admission/initializer/initializer.go | 72 + .../pkg/admission/initializer/interfaces.go | 61 + .../apiserver/pkg/admission/interfaces.go | 172 + .../pkg/admission/metrics/metrics.go | 251 + .../plugin/namespace/lifecycle/admission.go | 232 + .../pkg/admission/plugin/webhook/accessors.go | 297 + .../config/apis/webhookadmission/doc.go | 19 + .../config/apis/webhookadmission/register.go | 53 + .../config/apis/webhookadmission/types.go | 29 + .../config/apis/webhookadmission/v1/doc.go | 23 + .../apis/webhookadmission/v1/register.go | 50 + .../config/apis/webhookadmission/v1/types.go | 29 + .../v1/zz_generated.conversion.go | 67 + .../v1/zz_generated.deepcopy.go | 50 + .../v1/zz_generated.defaults.go | 32 + .../apis/webhookadmission/v1alpha1/doc.go | 23 + .../webhookadmission/v1alpha1/register.go | 50 + .../apis/webhookadmission/v1alpha1/types.go | 29 + .../v1alpha1/zz_generated.conversion.go | 67 + .../v1alpha1/zz_generated.deepcopy.go | 50 + .../v1alpha1/zz_generated.defaults.go | 32 + .../webhookadmission/zz_generated.deepcopy.go | 50 + .../plugin/webhook/config/kubeconfig.go | 71 + .../admission/plugin/webhook/errors/doc.go | 18 + .../plugin/webhook/errors/statuserror.go | 63 + .../plugin/webhook/generic/conversion.go | 112 + .../plugin/webhook/generic/interfaces.go | 75 + .../plugin/webhook/generic/webhook.go | 230 + .../plugin/webhook/mutating/dispatcher.go | 430 + .../admission/plugin/webhook/mutating/doc.go | 19 + .../plugin/webhook/mutating/plugin.go | 76 + .../webhook/mutating/reinvocationcontext.go | 68 + .../admission/plugin/webhook/namespace/doc.go | 20 + .../plugin/webhook/namespace/matcher.go | 121 + .../admission/plugin/webhook/object/doc.go | 20 + .../plugin/webhook/object/matcher.go | 57 + .../plugin/webhook/request/admissionreview.go | 283 + .../admission/plugin/webhook/request/doc.go | 18 + .../admission/plugin/webhook/rules/rules.go | 129 + .../plugin/webhook/validating/dispatcher.go | 238 + .../plugin/webhook/validating/doc.go | 19 + .../plugin/webhook/validating/plugin.go | 67 + .../k8s.io/apiserver/pkg/admission/plugins.go | 208 + .../apiserver/pkg/admission/reinvocation.go | 64 + vendor/k8s.io/apiserver/pkg/admission/util.go | 47 + .../apiserver/pkg/apis/apiserver/types.go | 3 +- .../pkg/apis/apiserver/v1alpha1/types.go | 3 +- .../pkg/apis/apiserver/v1beta1/types.go | 3 +- .../pkg/apis/audit/install/install.go | 37 + .../pkg/apis/audit/v1/generated.proto | 2 +- .../pkg/apis/audit/v1alpha1/generated.proto | 2 +- .../pkg/apis/audit/v1beta1/generated.proto | 2 +- .../pkg/apis/audit/validation/validation.go | 133 + .../k8s.io/apiserver/pkg/apis/config/doc.go | 19 + .../apiserver/pkg/apis/config/register.go | 53 + .../k8s.io/apiserver/pkg/apis/config/types.go | 100 + .../apiserver/pkg/apis/config/v1/defaults.go | 44 + .../apiserver/pkg/apis/config/v1/doc.go | 23 + .../apiserver/pkg/apis/config/v1/register.go | 53 + .../apiserver/pkg/apis/config/v1/types.go | 100 + .../apis/config/v1/zz_generated.conversion.go | 296 + .../apis/config/v1/zz_generated.deepcopy.go | 227 + .../apis/config/v1/zz_generated.defaults.go | 45 + .../pkg/apis/config/validation/validation.go | 220 + .../pkg/apis/config/zz_generated.deepcopy.go | 227 + .../pkg/apis/flowcontrol/bootstrap/default.go | 475 + .../apiserver/pkg/audit/policy/checker.go | 219 + .../apiserver/pkg/audit/policy/enforce.go | 53 + .../apiserver/pkg/audit/policy/reader.go | 90 + .../k8s.io/apiserver/pkg/audit/policy/util.go | 68 + vendor/k8s.io/apiserver/pkg/audit/request.go | 4 +- .../authenticatorfactory/delegating.go | 11 +- .../request/bearertoken/bearertoken.go | 2 +- .../pkg/authentication/request/x509/x509.go | 29 +- .../pkg/authentication/serviceaccount/util.go | 183 + .../apiserver/pkg/authentication/user/user.go | 1 + .../authorizerfactory/delegating.go | 12 + .../apiserver/pkg/authorization/path/doc.go | 18 + .../apiserver/pkg/authorization/path/path.go | 67 + .../pkg/authorization/union/union.go | 106 + .../pkg/endpoints/deprecation/deprecation.go | 133 + .../pkg/endpoints/discovery/addresses.go | 72 + .../pkg/endpoints/discovery/group.go | 73 + .../pkg/endpoints/discovery/legacy.go | 76 + .../apiserver/pkg/endpoints/discovery/root.go | 135 + .../endpoints/discovery/storageversionhash.go | 40 + .../apiserver/pkg/endpoints/discovery/util.go | 110 + .../pkg/endpoints/discovery/version.go | 83 + vendor/k8s.io/apiserver/pkg/endpoints/doc.go | 18 + .../endpoints/filterlatency/filterlatency.go | 96 + .../apiserver/pkg/endpoints/filters/OWNERS | 6 + .../apiserver/pkg/endpoints/filters/audit.go | 256 + .../endpoints/filters/audit_annotations.go | 39 + .../pkg/endpoints/filters/authentication.go | 94 + .../pkg/endpoints/filters/authn_audit.go | 86 + .../pkg/endpoints/filters/authorization.go | 106 + .../pkg/endpoints/filters/cachecontrol.go | 33 + .../apiserver/pkg/endpoints/filters/doc.go | 21 + .../pkg/endpoints/filters/impersonation.go | 239 + .../pkg/endpoints/filters/metrics.go | 115 + .../filters/request_received_time.go | 40 + .../pkg/endpoints/filters/requestinfo.go | 41 + .../pkg/endpoints/filters/storageversion.go | 121 + .../pkg/endpoints/filters/warning.go | 133 + .../apiserver/pkg/endpoints/groupversion.go | 137 + .../pkg/endpoints/handlers/create.go | 247 + .../pkg/endpoints/handlers/delete.go | 290 + .../apiserver/pkg/endpoints/handlers/doc.go | 18 + .../endpoints/handlers/fieldmanager/OWNERS | 5 + .../handlers/fieldmanager/buildmanagerinfo.go | 72 + .../handlers/fieldmanager/capmanagers.go | 134 + .../handlers/fieldmanager/endpoints.yaml | 7018 ++++++ .../handlers/fieldmanager/fieldmanager.go | 243 + .../fieldmanager/internal/atmostevery.go | 60 + .../fieldmanager/internal/conflict.go | 85 + .../handlers/fieldmanager/internal/fields.go | 47 + .../fieldmanager/internal/gvkparser.go | 127 + .../fieldmanager/internal/managedfields.go | 244 + .../fieldmanager/internal/pathelement.go | 140 + .../fieldmanager/lastappliedmanager.go | 172 + .../fieldmanager/lastappliedupdater.go | 117 + .../fieldmanager/managedfieldsupdater.go | 86 + .../endpoints/handlers/fieldmanager/node.yaml | 259 + .../endpoints/handlers/fieldmanager/pod.yaml | 121 + .../handlers/fieldmanager/skipnonapplied.go | 91 + .../handlers/fieldmanager/stripmeta.go | 90 + .../handlers/fieldmanager/structuredmerge.go | 171 + .../handlers/fieldmanager/typeconverter.go | 130 + .../handlers/fieldmanager/versionconverter.go | 101 + .../apiserver/pkg/endpoints/handlers/get.go | 287 + .../pkg/endpoints/handlers/helpers.go | 60 + .../apiserver/pkg/endpoints/handlers/namer.go | 155 + .../pkg/endpoints/handlers/negotiation/doc.go | 18 + .../endpoints/handlers/negotiation/errors.go | 99 + .../handlers/negotiation/negotiate.go | 263 + .../apiserver/pkg/endpoints/handlers/patch.go | 680 + .../pkg/endpoints/handlers/response.go | 275 + .../endpoints/handlers/responsewriters/doc.go | 18 + .../handlers/responsewriters/errors.go | 78 + .../handlers/responsewriters/status.go | 83 + .../handlers/responsewriters/writers.go | 286 + .../apiserver/pkg/endpoints/handlers/rest.go | 529 + .../pkg/endpoints/handlers/update.go | 269 + .../apiserver/pkg/endpoints/handlers/watch.go | 337 + .../apiserver/pkg/endpoints/installer.go | 1246 + .../pkg/endpoints/metrics/metrics.go | 114 +- .../apiserver/pkg/endpoints/openapi/OWNERS | 4 + .../pkg/endpoints/openapi/openapi.go | 191 + .../pkg/endpoints/request/received_time.go | 45 + .../pkg/endpoints/request/requestinfo.go | 2 +- .../pkg/endpoints/warning/warning.go | 39 + .../apiserver/pkg/features/kube_features.go | 53 +- vendor/k8s.io/apiserver/pkg/quota/v1/OWNERS | 13 + .../apiserver/pkg/quota/v1/interfaces.go | 88 + .../apiserver/pkg/quota/v1/resources.go | 293 + .../apiserver/pkg/registry/generic/OWNERS | 28 + .../apiserver/pkg/registry/generic/doc.go | 19 + .../apiserver/pkg/registry/generic/matcher.go | 52 + .../apiserver/pkg/registry/generic/options.go | 54 + .../generic/registry/decorated_watcher.go | 102 + .../pkg/registry/generic/registry/doc.go | 19 + .../pkg/registry/generic/registry/dryrun.go | 121 + .../generic/registry/storage_factory.go | 137 + .../pkg/registry/generic/registry/store.go | 1412 ++ .../pkg/registry/generic/storage_decorator.go | 58 + .../k8s.io/apiserver/pkg/registry/rest/OWNERS | 23 + .../apiserver/pkg/registry/rest/create.go | 192 + .../pkg/registry/rest/create_update.go | 52 + .../apiserver/pkg/registry/rest/delete.go | 183 + .../k8s.io/apiserver/pkg/registry/rest/doc.go | 18 + .../apiserver/pkg/registry/rest/export.go | 34 + .../apiserver/pkg/registry/rest/meta.go | 43 + .../apiserver/pkg/registry/rest/rest.go | 351 + .../apiserver/pkg/registry/rest/table.go | 107 + .../apiserver/pkg/registry/rest/update.go | 279 + vendor/k8s.io/apiserver/pkg/server/config.go | 854 + .../apiserver/pkg/server/config_selfclient.go | 97 + .../pkg/server/deprecated_insecure_serving.go | 94 + vendor/k8s.io/apiserver/pkg/server/doc.go | 18 + .../server/dynamiccertificates/cert_key.go | 74 + .../server/dynamiccertificates/client_ca.go | 81 + .../configmap_cafile_content.go | 274 + .../dynamic_cafile_content.go | 255 + .../dynamic_serving_content.go | 180 + .../dynamic_sni_content.go | 50 + .../dynamiccertificates/named_certificates.go | 93 + .../dynamiccertificates/static_content.go | 114 + .../server/dynamiccertificates/tlsconfig.go | 284 + .../dynamiccertificates/union_content.go | 107 + .../pkg/server/dynamiccertificates/util.go | 68 + .../pkg/server/egressselector/config.go | 35 +- .../server/egressselector/egress_selector.go | 15 +- .../apiserver/pkg/server/filters/OWNERS | 5 + .../pkg/server/filters/content_type.go | 28 + .../apiserver/pkg/server/filters/cors.go | 98 + .../apiserver/pkg/server/filters/doc.go | 19 + .../apiserver/pkg/server/filters/goaway.go | 84 + .../pkg/server/filters/longrunning.go | 41 + .../pkg/server/filters/maxinflight.go | 220 + .../server/filters/priority-and-fairness.go | 155 + .../apiserver/pkg/server/filters/timeout.go | 314 + .../apiserver/pkg/server/filters/waitgroup.go | 61 + .../apiserver/pkg/server/filters/wrap.go | 72 + .../apiserver/pkg/server/genericapiserver.go | 632 + vendor/k8s.io/apiserver/pkg/server/handler.go | 190 + vendor/k8s.io/apiserver/pkg/server/healthz.go | 159 + .../apiserver/pkg/server/healthz/healthz.go | 13 +- vendor/k8s.io/apiserver/pkg/server/hooks.go | 244 + vendor/k8s.io/apiserver/pkg/server/mux/OWNERS | 4 + vendor/k8s.io/apiserver/pkg/server/mux/doc.go | 18 + .../apiserver/pkg/server/mux/pathrecorder.go | 278 + .../apiserver/pkg/server/options/OWNERS | 16 + .../apiserver/pkg/server/options/admission.go | 234 + .../pkg/server/options/api_enablement.go | 115 + .../apiserver/pkg/server/options/audit.go | 607 + .../pkg/server/options/authentication.go | 421 + .../authentication_dynamic_request_header.go | 79 + .../pkg/server/options/authorization.go | 220 + .../apiserver/pkg/server/options/coreapi.go | 84 + .../options/deprecated_insecure_serving.go | 169 + .../apiserver/pkg/server/options/doc.go | 21 + .../pkg/server/options/egress_selector.go | 93 + .../server/options/encryptionconfig/OWNERS | 9 + .../server/options/encryptionconfig/config.go | 375 + .../apiserver/pkg/server/options/etcd.go | 329 + .../apiserver/pkg/server/options/feature.go | 72 + .../pkg/server/options/recommended.go | 150 + .../pkg/server/options/server_run_options.go | 215 + .../apiserver/pkg/server/options/serving.go | 356 + .../pkg/server/options/serving_unix.go | 31 + .../pkg/server/options/serving_windows.go | 30 + .../server/options/serving_with_loopback.go | 79 + vendor/k8s.io/apiserver/pkg/server/plugins.go | 32 + .../pkg/server/resourceconfig/doc.go | 18 + .../pkg/server/resourceconfig/helpers.go | 201 + .../k8s.io/apiserver/pkg/server/routes/OWNERS | 4 + .../k8s.io/apiserver/pkg/server/routes/doc.go | 18 + .../apiserver/pkg/server/routes/flags.go | 127 + .../apiserver/pkg/server/routes/index.go | 69 + .../apiserver/pkg/server/routes/metrics.go | 51 + .../apiserver/pkg/server/routes/openapi.go | 53 + .../apiserver/pkg/server/routes/profiling.go | 43 + .../apiserver/pkg/server/routes/version.go | 57 + .../apiserver/pkg/server/secure_serving.go | 289 + vendor/k8s.io/apiserver/pkg/server/signal.go | 69 + .../apiserver/pkg/server/signal_posix.go | 26 + .../apiserver/pkg/server/signal_windows.go | 23 + .../apiserver/pkg/server/storage/doc.go | 18 + .../pkg/server/storage/resource_config.go | 124 + .../storage/resource_encoding_config.go | 84 + .../pkg/server/storage/storage_codec.go | 98 + .../pkg/server/storage/storage_factory.go | 350 + vendor/k8s.io/apiserver/pkg/storage/OWNERS | 26 + .../apiserver/pkg/storage/cacher/cacher.go | 1441 ++ .../pkg/storage/cacher/caching_object.go | 397 + .../apiserver/pkg/storage/cacher/metrics.go | 74 + .../pkg/storage/cacher/time_budget.go | 100 + .../apiserver/pkg/storage/cacher/util.go | 60 + .../pkg/storage/cacher/watch_cache.go | 631 + vendor/k8s.io/apiserver/pkg/storage/doc.go | 18 + vendor/k8s.io/apiserver/pkg/storage/errors.go | 195 + .../apiserver/pkg/storage/errors/doc.go | 18 + .../apiserver/pkg/storage/errors/storage.go | 116 + .../k8s.io/apiserver/pkg/storage/etcd3/OWNERS | 7 + .../pkg/storage/etcd3/api_object_versioner.go | 131 + .../apiserver/pkg/storage/etcd3/compact.go | 162 + .../apiserver/pkg/storage/etcd3/errors.go | 71 + .../apiserver/pkg/storage/etcd3/event.go | 71 + .../pkg/storage/etcd3/healthcheck.go | 40 + .../pkg/storage/etcd3/lease_manager.go | 102 + .../apiserver/pkg/storage/etcd3/logger.go | 84 + .../pkg/storage/etcd3/metrics/metrics.go | 115 + .../apiserver/pkg/storage/etcd3/store.go | 939 + .../apiserver/pkg/storage/etcd3/watcher.go | 468 + .../apiserver/pkg/storage/interfaces.go | 275 + .../apiserver/pkg/storage/names/generate.go | 54 + .../pkg/storage/selection_predicate.go | 159 + .../pkg/storage/storagebackend/OWNERS | 8 + .../pkg/storage/storagebackend/config.go | 91 + .../storage/storagebackend/factory/etcd3.go | 291 + .../storage/storagebackend/factory/factory.go | 52 + vendor/k8s.io/apiserver/pkg/storage/util.go | 81 + .../pkg/storage/value/encrypt/aes/aes.go | 152 + .../value/encrypt/envelope/envelope.go | 200 + .../value/encrypt/envelope/grpc_service.go | 181 + .../storage/value/encrypt/envelope/metrics.go | 102 + .../encrypt/envelope/v1beta1/service.pb.go | 502 + .../encrypt/envelope/v1beta1/service.proto | 70 + .../value/encrypt/envelope/v1beta1/v1beta1.go | 23 + .../value/encrypt/identity/identity.go | 50 + .../value/encrypt/secretbox/secretbox.go | 69 + .../apiserver/pkg/storage/value/metrics.go | 141 + .../pkg/storage/value/transformer.go | 209 + .../apiserver/pkg/storageversion/OWNERS | 5 + .../apiserver/pkg/storageversion/manager.go | 277 + .../apiserver/pkg/storageversion/updater.go | 128 + .../apiserver/pkg/util/apihelpers/helpers.go | 100 + .../apiserver/pkg/util/dryrun/dryrun.go | 22 + .../pkg/util/flowcontrol/apf_controller.go | 764 + .../util/flowcontrol/apf_controller_debug.go | 268 + .../pkg/util/flowcontrol/apf_filter.go | 132 + .../pkg/util/flowcontrol/counter/interface.go | 33 + .../pkg/util/flowcontrol/counter/noop.go | 25 + .../pkg/util/flowcontrol/debug/dump.go | 48 + .../flowcontrol/fairqueuing/integrator.go | 180 + .../util/flowcontrol/fairqueuing/interface.go | 135 + .../fairqueuing/promise/interface.go | 129 + .../promise/lockingpromise/lockingpromise.go | 124 + .../flowcontrol/fairqueuing/queueset/doc.go | 120 + .../fairqueuing/queueset/queueset.go | 781 + .../flowcontrol/fairqueuing/queueset/types.go | 128 + .../pkg/util/flowcontrol/format/formatting.go | 231 + .../pkg/util/flowcontrol/formatting.go | 40 + .../pkg/util/flowcontrol/metrics/metrics.go | 261 + .../metrics/sample_and_watermark.go | 211 + .../flowcontrol/metrics/timed_observer.go | 52 + .../apiserver/pkg/util/flowcontrol/rule.go | 203 + .../apiserver/pkg/util/flushwriter/doc.go | 19 + .../apiserver/pkg/util/flushwriter/writer.go | 53 + .../apiserver/pkg/util/openapi/proto.go | 54 + .../util/shufflesharding/shufflesharding.go | 107 + .../pkg/util/webhook/authentication.go | 2 +- .../apiserver/pkg/util/webhook/webhook.go | 72 +- .../k8s.io/apiserver/pkg/warning/context.go | 59 + .../plugin/pkg/audit/buffered/buffered.go | 290 + .../plugin/pkg/audit/buffered/doc.go | 19 + .../apiserver/plugin/pkg/audit/log/backend.go | 104 + .../plugin/pkg/audit/truncate/doc.go | 19 + .../plugin/pkg/audit/truncate/truncate.go | 160 + .../plugin/pkg/audit/webhook/webhook.go | 139 + .../authenticator/token/webhook/webhook.go | 31 +- .../plugin/pkg/authorizer/webhook/webhook.go | 28 +- .../client-go/discovery/discovery_client.go | 2 +- .../interface.go | 4 +- .../v1alpha1/interface.go | 10 +- .../v1alpha1/storageversion.go | 89 + vendor/k8s.io/client-go/informers/factory.go | 12 +- .../informers/flowcontrol/interface.go | 8 + .../v1beta1/flowschema.go} | 45 +- .../flowcontrol/v1beta1/interface.go | 52 + .../v1beta1/prioritylevelconfiguration.go | 89 + vendor/k8s.io/client-go/informers/generic.go | 22 +- .../client-go/informers/node/interface.go | 8 + .../client-go/informers/node/v1/interface.go | 45 + .../informers/node/v1/runtimeclass.go | 89 + .../k8s.io/client-go/kubernetes/clientset.go | 56 +- .../kubernetes/fake/clientset_generated.go | 28 +- .../client-go/kubernetes/fake/register.go | 8 +- .../client-go/kubernetes/scheme/register.go | 8 +- .../v1alpha1/apiserverinternal_client.go} | 32 +- .../v1alpha1/doc.go | 0 .../v1alpha1/fake/doc.go | 0 .../fake/fake_apiserverinternal_client.go} | 10 +- .../v1alpha1/fake/fake_storageversion.go | 133 + .../v1alpha1/generated_expansion.go | 2 +- .../v1alpha1/storageversion.go | 184 + .../typed/flowcontrol/v1beta1/doc.go | 20 + .../typed/flowcontrol/v1beta1/fake/doc.go | 20 + .../v1beta1/fake/fake_flowcontrol_client.go | 44 + .../v1beta1/fake/fake_flowschema.go | 133 + .../fake/fake_prioritylevelconfiguration.go | 133 + .../flowcontrol/v1beta1/flowcontrol_client.go | 94 + .../typed/flowcontrol/v1beta1/flowschema.go | 184 + .../v1beta1/generated_expansion.go | 23 + .../v1beta1/prioritylevelconfiguration.go | 184 + .../client-go/kubernetes/typed/node/v1/doc.go | 20 + .../kubernetes/typed/node/v1/fake/doc.go | 20 + .../typed/node/v1/fake/fake_node_client.go | 40 + .../typed/node/v1/fake/fake_runtimeclass.go | 122 + .../typed/node/v1/generated_expansion.go | 21 + .../kubernetes/typed/node/v1/node_client.go | 89 + .../kubernetes/typed/node/v1/runtimeclass.go | 168 + .../settings/v1alpha1/fake/fake_podpreset.go | 130 - .../typed/settings/v1alpha1/podpreset.go | 178 - .../v1alpha1/expansion_generated.go | 10 +- .../v1alpha1/storageversion.go | 68 + .../v1beta1/expansion_generated.go | 27 + .../listers/flowcontrol/v1beta1/flowschema.go | 68 + .../v1beta1/prioritylevelconfiguration.go | 68 + .../listers/node/v1/expansion_generated.go | 23 + .../client-go/listers/node/v1/runtimeclass.go | 68 + .../listers/settings/v1alpha1/podpreset.go | 99 - .../pkg/apis/clientauthentication/types.go | 69 +- .../v1alpha1/conversion.go | 27 + .../clientauthentication/v1alpha1/types.go | 6 +- .../v1alpha1/zz_generated.conversion.go | 16 +- .../v1beta1/conversion.go | 8 +- .../clientauthentication/v1beta1/types.go | 74 +- .../v1beta1/zz_generated.conversion.go | 62 + .../v1beta1/zz_generated.deepcopy.go | 29 +- .../zz_generated.deepcopy.go | 29 + .../plugin/pkg/client/auth/exec/exec.go | 58 +- vendor/k8s.io/client-go/rest/config.go | 30 +- vendor/k8s.io/client-go/rest/exec.go | 85 + vendor/k8s.io/client-go/rest/request.go | 22 +- vendor/k8s.io/client-go/rest/transport.go | 11 +- vendor/k8s.io/client-go/rest/warnings.go | 7 +- .../k8s.io/client-go/tools/auth/clientauth.go | 4 +- vendor/k8s.io/client-go/tools/cache/OWNERS | 1 - .../client-go/tools/cache/controller.go | 4 + .../client-go/tools/cache/delta_fifo.go | 34 +- .../k8s.io/client-go/tools/cache/reflector.go | 27 +- .../client-go/tools/cache/shared_informer.go | 4 +- .../client-go/tools/clientcmd/api/types.go | 42 +- .../client-go/tools/clientcmd/api/v1/types.go | 13 +- .../api/v1/zz_generated.conversion.go | 23 +- .../clientcmd/api/zz_generated.deepcopy.go | 3 + .../tools/clientcmd/client_config.go | 12 +- .../client-go/tools/clientcmd/config.go | 5 +- .../client-go/tools/clientcmd/loader.go | 4 + vendor/k8s.io/client-go/tools/events/OWNERS | 8 + vendor/k8s.io/client-go/tools/events/doc.go | 19 + .../tools/events/event_broadcaster.go | 384 + .../client-go/tools/events/event_recorder.go | 88 + vendor/k8s.io/client-go/tools/events/fake.go | 45 + .../client-go/tools/events/interfaces.go | 90 + vendor/k8s.io/client-go/tools/record/OWNERS | 28 + vendor/k8s.io/client-go/tools/record/doc.go | 19 + vendor/k8s.io/client-go/tools/record/event.go | 380 + .../client-go/tools/record/events_cache.go | 511 + vendor/k8s.io/client-go/tools/record/fake.go | 66 + .../client-go/tools/record/util/util.go | 44 + vendor/k8s.io/client-go/transport/cache.go | 49 +- vendor/k8s.io/client-go/transport/config.go | 8 +- .../client-go/transport/round_trippers.go | 8 +- .../cli/flag/ciphersuites_flag.go | 162 + .../cli/flag/ciphersuites_flag_114.go | 29 + .../colon_separated_multimap_string_string.go | 102 + .../cli/flag/configuration_map.go | 53 + .../k8s.io/component-base/cli/flag/flags.go | 61 + .../langle_separated_map_string_string.go | 82 + .../cli/flag/map_string_bool.go | 90 + .../cli/flag/map_string_string.go | 112 + .../cli/flag/namedcertkey_flag.go | 113 + vendor/k8s.io/component-base/cli/flag/noop.go | 41 + .../component-base/cli/flag/omitempty.go | 24 + .../component-base/cli/flag/sectioned.go | 79 + .../component-base/cli/flag/string_flag.go | 56 + .../component-base/cli/flag/tristate.go | 83 + vendor/k8s.io/component-base/logs/OWNERS | 8 + .../component-base/logs/datapol/datapol.go | 99 + .../logs/datapol/externaltypes.go | 49 + .../k8s.io/component-base/logs/json/json.go | 177 + vendor/k8s.io/component-base/logs/logs.go | 78 + vendor/k8s.io/component-base/logs/options.go | 133 + vendor/k8s.io/component-base/logs/registry.go | 106 + .../logs/sanitization/sanitization.go | 69 + .../k8s.io/component-base/metrics/metric.go | 3 - .../metrics/processstarttime.go | 8 +- .../metrics/prometheus/workqueue/metrics.go | 130 + .../metrics/testutil/metrics.go | 368 + .../metrics/testutil/promlint.go | 156 + .../metrics/testutil/testutil.go | 86 + vendor/k8s.io/klog/v2/README.md | 6 +- vendor/k8s.io/klog/v2/SECURITY.md | 22 + vendor/k8s.io/klog/v2/klog.go | 168 +- vendor/k8s.io/kube-openapi/pkg/builder/doc.go | 20 + .../kube-openapi/pkg/builder/openapi.go | 445 + .../k8s.io/kube-openapi/pkg/builder/util.go | 61 + .../k8s.io/kube-openapi/pkg/common/common.go | 208 + vendor/k8s.io/kube-openapi/pkg/common/doc.go | 19 + .../pkg/handler/default_pruning.go | 208 + .../kube-openapi/pkg/handler/handler.go | 267 + .../k8s.io/kube-openapi/pkg/schemaconv/smd.go | 452 + .../kube-openapi/pkg/util/proto/document.go | 62 +- .../kube-openapi/pkg/util/proto/openapi.go | 7 + vendor/k8s.io/kube-openapi/pkg/util/trie.go | 79 + vendor/k8s.io/kube-openapi/pkg/util/util.go | 105 + vendor/k8s.io/utils/net/ipnet.go | 221 + vendor/k8s.io/utils/net/net.go | 213 + vendor/k8s.io/utils/net/port.go | 137 + vendor/modules.txt | 273 +- .../konnectivity-client/pkg/client/client.go | 22 +- .../konnectivity-client/pkg/client/conn.go | 6 +- .../structured-merge-diff/v4/fieldpath/doc.go | 21 + .../v4/fieldpath/element.go | 317 + .../v4/fieldpath/fromvalue.go | 134 + .../v4/fieldpath/managers.go | 144 + .../v4/fieldpath/path.go | 118 + .../v4/fieldpath/pathelementmap.go | 85 + .../v4/fieldpath/serialize-pe.go | 168 + .../v4/fieldpath/serialize.go | 238 + .../structured-merge-diff/v4/fieldpath/set.go | 406 + .../v4/merge/conflict.go | 121 + .../structured-merge-diff/v4/merge/update.go | 329 + .../structured-merge-diff/v4/schema/doc.go | 28 + .../v4/schema/elements.go | 261 + .../structured-merge-diff/v4/schema/equals.go | 199 + .../v4/schema/schemaschema.go | 161 + .../structured-merge-diff/v4/typed/doc.go | 18 + .../structured-merge-diff/v4/typed/helpers.go | 256 + .../structured-merge-diff/v4/typed/merge.go | 353 + .../structured-merge-diff/v4/typed/parser.go | 151 + .../v4/typed/reconcile_schema.go | 295 + .../structured-merge-diff/v4/typed/remove.go | 112 + .../v4/typed/tofieldset.go | 166 + .../structured-merge-diff/v4/typed/typed.go | 315 + .../structured-merge-diff/v4/typed/union.go | 276 + .../v4/typed/validate.go | 195 + 1263 files changed, 193882 insertions(+), 15528 deletions(-) create mode 100644 vendor/github.com/NYTimes/gziphandler/.gitignore create mode 100644 vendor/github.com/NYTimes/gziphandler/.travis.yml create mode 100644 vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md create mode 100644 vendor/github.com/NYTimes/gziphandler/LICENSE.md create mode 100644 vendor/github.com/NYTimes/gziphandler/README.md create mode 100644 vendor/github.com/NYTimes/gziphandler/gzip.go create mode 100644 vendor/github.com/NYTimes/gziphandler/gzip_go18.go create mode 100644 vendor/github.com/blang/semver/.travis.yml create mode 100644 vendor/github.com/coreos/go-semver/LICENSE create mode 100644 vendor/github.com/coreos/go-semver/NOTICE create mode 100644 vendor/github.com/coreos/go-semver/semver/semver.go create mode 100644 vendor/github.com/coreos/go-semver/semver/sort.go create mode 100644 vendor/github.com/coreos/go-systemd/LICENSE create mode 100644 vendor/github.com/coreos/go-systemd/NOTICE create mode 100644 vendor/github.com/coreos/go-systemd/daemon/sdnotify.go create mode 100644 vendor/github.com/coreos/go-systemd/daemon/watchdog.go create mode 100644 vendor/github.com/coreos/go-systemd/journal/journal.go create mode 100644 vendor/github.com/coreos/pkg/LICENSE create mode 100644 vendor/github.com/coreos/pkg/NOTICE create mode 100644 vendor/github.com/coreos/pkg/capnslog/README.md create mode 100644 vendor/github.com/coreos/pkg/capnslog/formatters.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/glog_formatter.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/init.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/init_windows.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/journald_formatter.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/log_hijack.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/logmap.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/pkg_logger.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/Makefile create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/doc.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.proto create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/helper.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go create mode 100644 vendor/github.com/golang/groupcache/LICENSE create mode 100644 vendor/github.com/golang/groupcache/lru/lru.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/name.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_references.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go create mode 100644 vendor/github.com/munnerz/goautoneg/LICENSE create mode 100644 vendor/github.com/munnerz/goautoneg/Makefile create mode 100644 vendor/github.com/munnerz/goautoneg/README.txt create mode 100644 vendor/github.com/munnerz/goautoneg/autoneg.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go create mode 100644 vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md rename vendor/github.com/prometheus/procfs/{cpuinfo_arm.go => cpuinfo_armx.go} (97%) delete mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_mipsle.go rename vendor/github.com/prometheus/procfs/{cpuinfo_mips64le.go => cpuinfo_mipsx.go} (94%) rename vendor/github.com/prometheus/procfs/{cpuinfo_arm64.go => cpuinfo_others.go} (82%) delete mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_ppc64le.go rename vendor/github.com/prometheus/procfs/{cpuinfo_ppc64.go => cpuinfo_ppcx.go} (96%) rename vendor/github.com/prometheus/procfs/{cpuinfo_default.go => cpuinfo_x86.go} (100%) create mode 100644 vendor/go.etcd.io/etcd/LICENSE create mode 100644 vendor/go.etcd.io/etcd/NOTICE create mode 100644 vendor/go.etcd.io/etcd/auth/authpb/auth.pb.go create mode 100644 vendor/go.etcd.io/etcd/auth/authpb/auth.proto create mode 100644 vendor/go.etcd.io/etcd/clientv3/README.md create mode 100644 vendor/go.etcd.io/etcd/clientv3/auth.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/balancer/connectivity/connectivity.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/balancer/picker/doc.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/balancer/resolver/endpoint/endpoint.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/balancer/utils.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/client.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/cluster.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/compact_op.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/compare.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/config.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/credentials/credentials.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/ctx.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/doc.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/kv.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/lease.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/logger.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/maintenance.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/op.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/options.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/retry.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/sort.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/txn.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/utils.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/watch.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/doc.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/md.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.pb.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.proto create mode 100644 vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.pb.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.proto create mode 100644 vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.pb.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.proto create mode 100644 vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.pb.go create mode 100644 vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.proto create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/dir_unix.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/dir_windows.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/doc.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/fileutil.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/lock.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/lock_flock.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/lock_linux.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/lock_plan9.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/lock_solaris.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/lock_unix.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/lock_windows.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/preallocate.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_darwin.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unix.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unsupported.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/purge.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/read_dir.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/sync.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/sync_darwin.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/sync_linux.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/discard_logger.go rename vendor/{github.com/prometheus/procfs/cpuinfo_mips64.go => go.etcd.io/etcd/pkg/logutil/doc.go} (75%) create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/log_level.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/logger.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/merge_logger.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/package_logger.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/zap.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/zap_grpc.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/zap_journal.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/zap_raft.go rename vendor/{github.com/prometheus/procfs/cpuinfo_mips.go => go.etcd.io/etcd/pkg/systemd/doc.go} (75%) create mode 100644 vendor/go.etcd.io/etcd/pkg/systemd/journal.go create mode 100644 vendor/go.etcd.io/etcd/pkg/tlsutil/cipher_suites.go create mode 100644 vendor/go.etcd.io/etcd/pkg/tlsutil/doc.go create mode 100644 vendor/go.etcd.io/etcd/pkg/tlsutil/tlsutil.go create mode 100644 vendor/go.etcd.io/etcd/pkg/transport/doc.go create mode 100644 vendor/go.etcd.io/etcd/pkg/transport/keepalive_listener.go create mode 100644 vendor/go.etcd.io/etcd/pkg/transport/limit_listen.go create mode 100644 vendor/go.etcd.io/etcd/pkg/transport/listener.go create mode 100644 vendor/go.etcd.io/etcd/pkg/transport/listener_tls.go create mode 100644 vendor/go.etcd.io/etcd/pkg/transport/timeout_conn.go create mode 100644 vendor/go.etcd.io/etcd/pkg/transport/timeout_dialer.go create mode 100644 vendor/go.etcd.io/etcd/pkg/transport/timeout_listener.go create mode 100644 vendor/go.etcd.io/etcd/pkg/transport/timeout_transport.go create mode 100644 vendor/go.etcd.io/etcd/pkg/transport/tls.go create mode 100644 vendor/go.etcd.io/etcd/pkg/transport/transport.go create mode 100644 vendor/go.etcd.io/etcd/pkg/transport/unix_listener.go create mode 100644 vendor/go.etcd.io/etcd/pkg/types/doc.go create mode 100644 vendor/go.etcd.io/etcd/pkg/types/id.go create mode 100644 vendor/go.etcd.io/etcd/pkg/types/set.go create mode 100644 vendor/go.etcd.io/etcd/pkg/types/slice.go create mode 100644 vendor/go.etcd.io/etcd/pkg/types/urls.go create mode 100644 vendor/go.etcd.io/etcd/pkg/types/urlsmap.go create mode 100644 vendor/go.etcd.io/etcd/raft/OWNERS create mode 100644 vendor/go.etcd.io/etcd/raft/README.md create mode 100644 vendor/go.etcd.io/etcd/raft/bootstrap.go create mode 100644 vendor/go.etcd.io/etcd/raft/confchange/confchange.go create mode 100644 vendor/go.etcd.io/etcd/raft/confchange/restore.go create mode 100644 vendor/go.etcd.io/etcd/raft/design.md create mode 100644 vendor/go.etcd.io/etcd/raft/doc.go create mode 100644 vendor/go.etcd.io/etcd/raft/log.go create mode 100644 vendor/go.etcd.io/etcd/raft/log_unstable.go create mode 100644 vendor/go.etcd.io/etcd/raft/logger.go create mode 100644 vendor/go.etcd.io/etcd/raft/node.go create mode 100644 vendor/go.etcd.io/etcd/raft/quorum/joint.go create mode 100644 vendor/go.etcd.io/etcd/raft/quorum/majority.go create mode 100644 vendor/go.etcd.io/etcd/raft/quorum/quorum.go create mode 100644 vendor/go.etcd.io/etcd/raft/quorum/voteresult_string.go create mode 100644 vendor/go.etcd.io/etcd/raft/raft.go create mode 100644 vendor/go.etcd.io/etcd/raft/raftpb/confchange.go create mode 100644 vendor/go.etcd.io/etcd/raft/raftpb/confstate.go create mode 100644 vendor/go.etcd.io/etcd/raft/raftpb/raft.pb.go create mode 100644 vendor/go.etcd.io/etcd/raft/raftpb/raft.proto create mode 100644 vendor/go.etcd.io/etcd/raft/rawnode.go create mode 100644 vendor/go.etcd.io/etcd/raft/read_only.go create mode 100644 vendor/go.etcd.io/etcd/raft/status.go create mode 100644 vendor/go.etcd.io/etcd/raft/storage.go create mode 100644 vendor/go.etcd.io/etcd/raft/tracker/inflights.go create mode 100644 vendor/go.etcd.io/etcd/raft/tracker/progress.go create mode 100644 vendor/go.etcd.io/etcd/raft/tracker/state.go create mode 100644 vendor/go.etcd.io/etcd/raft/tracker/tracker.go create mode 100644 vendor/go.etcd.io/etcd/raft/util.go create mode 100644 vendor/go.etcd.io/etcd/version/version.go create mode 100644 vendor/go.uber.org/atomic/.codecov.yml create mode 100644 vendor/go.uber.org/atomic/.gitignore create mode 100644 vendor/go.uber.org/atomic/.travis.yml create mode 100644 vendor/go.uber.org/atomic/LICENSE.txt create mode 100644 vendor/go.uber.org/atomic/Makefile create mode 100644 vendor/go.uber.org/atomic/README.md create mode 100644 vendor/go.uber.org/atomic/atomic.go create mode 100644 vendor/go.uber.org/atomic/error.go create mode 100644 vendor/go.uber.org/atomic/glide.lock create mode 100644 vendor/go.uber.org/atomic/glide.yaml create mode 100644 vendor/go.uber.org/atomic/string.go create mode 100644 vendor/go.uber.org/multierr/.codecov.yml create mode 100644 vendor/go.uber.org/multierr/.gitignore create mode 100644 vendor/go.uber.org/multierr/.travis.yml create mode 100644 vendor/go.uber.org/multierr/CHANGELOG.md create mode 100644 vendor/go.uber.org/multierr/LICENSE.txt create mode 100644 vendor/go.uber.org/multierr/Makefile create mode 100644 vendor/go.uber.org/multierr/README.md create mode 100644 vendor/go.uber.org/multierr/error.go create mode 100644 vendor/go.uber.org/multierr/glide.lock create mode 100644 vendor/go.uber.org/multierr/glide.yaml create mode 100644 vendor/go.uber.org/zap/.codecov.yml create mode 100644 vendor/go.uber.org/zap/.gitignore create mode 100644 vendor/go.uber.org/zap/.readme.tmpl create mode 100644 vendor/go.uber.org/zap/.travis.yml create mode 100644 vendor/go.uber.org/zap/CHANGELOG.md create mode 100644 vendor/go.uber.org/zap/CODE_OF_CONDUCT.md create mode 100644 vendor/go.uber.org/zap/CONTRIBUTING.md create mode 100644 vendor/go.uber.org/zap/FAQ.md create mode 100644 vendor/go.uber.org/zap/LICENSE.txt create mode 100644 vendor/go.uber.org/zap/Makefile create mode 100644 vendor/go.uber.org/zap/README.md create mode 100644 vendor/go.uber.org/zap/array.go create mode 100644 vendor/go.uber.org/zap/buffer/buffer.go create mode 100644 vendor/go.uber.org/zap/buffer/pool.go create mode 100644 vendor/go.uber.org/zap/check_license.sh create mode 100644 vendor/go.uber.org/zap/config.go create mode 100644 vendor/go.uber.org/zap/doc.go create mode 100644 vendor/go.uber.org/zap/encoder.go create mode 100644 vendor/go.uber.org/zap/error.go create mode 100644 vendor/go.uber.org/zap/field.go create mode 100644 vendor/go.uber.org/zap/flag.go create mode 100644 vendor/go.uber.org/zap/glide.lock create mode 100644 vendor/go.uber.org/zap/glide.yaml create mode 100644 vendor/go.uber.org/zap/global.go create mode 100644 vendor/go.uber.org/zap/global_go112.go create mode 100644 vendor/go.uber.org/zap/global_prego112.go create mode 100644 vendor/go.uber.org/zap/http_handler.go create mode 100644 vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go create mode 100644 vendor/go.uber.org/zap/internal/color/color.go create mode 100644 vendor/go.uber.org/zap/internal/exit/exit.go create mode 100644 vendor/go.uber.org/zap/level.go create mode 100644 vendor/go.uber.org/zap/logger.go create mode 100644 vendor/go.uber.org/zap/options.go create mode 100644 vendor/go.uber.org/zap/sink.go create mode 100644 vendor/go.uber.org/zap/stacktrace.go create mode 100644 vendor/go.uber.org/zap/sugar.go create mode 100644 vendor/go.uber.org/zap/time.go create mode 100644 vendor/go.uber.org/zap/writer.go create mode 100644 vendor/go.uber.org/zap/zapcore/console_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/core.go create mode 100644 vendor/go.uber.org/zap/zapcore/doc.go create mode 100644 vendor/go.uber.org/zap/zapcore/encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/entry.go create mode 100644 vendor/go.uber.org/zap/zapcore/error.go create mode 100644 vendor/go.uber.org/zap/zapcore/field.go create mode 100644 vendor/go.uber.org/zap/zapcore/hook.go create mode 100644 vendor/go.uber.org/zap/zapcore/json_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/level.go create mode 100644 vendor/go.uber.org/zap/zapcore/level_strings.go create mode 100644 vendor/go.uber.org/zap/zapcore/marshaler.go create mode 100644 vendor/go.uber.org/zap/zapcore/memory_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/sampler.go create mode 100644 vendor/go.uber.org/zap/zapcore/tee.go create mode 100644 vendor/go.uber.org/zap/zapcore/write_syncer.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/asn1.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/builder.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/string.go create mode 100644 vendor/golang.org/x/crypto/internal/subtle/aliasing.go create mode 100644 vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go create mode 100644 vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go create mode 100644 vendor/golang.org/x/crypto/poly1305/bits_compat.go create mode 100644 vendor/golang.org/x/crypto/poly1305/bits_go1.13.go create mode 100644 vendor/golang.org/x/crypto/poly1305/mac_noasm.go create mode 100644 vendor/golang.org/x/crypto/poly1305/poly1305.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_amd64.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_amd64.s create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_generic.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_s390x.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_s390x.s create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go rename vendor/golang.org/x/net/idna/{tables12.00.go => tables12.0.0.go} (99%) create mode 100644 vendor/golang.org/x/net/idna/tables13.0.0.go create mode 100644 vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s create mode 100644 vendor/golang.org/x/sys/cpu/byteorder.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_aix.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.s rename vendor/{google.golang.org/protobuf/internal/fieldnum/empty_gen.go => golang.org/x/sys/cpu/cpu_gc_arm64.go} (57%) create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_x86.go rename vendor/golang.org/x/sys/{unix/syscall_darwin_arm.1_11.go => cpu/cpu_gccgo_arm64.go} (54%) create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go rename vendor/golang.org/x/sys/{unix/syscall_darwin_386.1_11.go => cpu/cpu_linux_noinit.go} (53%) create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mipsx.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm.go rename vendor/golang.org/x/sys/{unix/syscall_darwin_arm64.1_11.go => cpu/cpu_other_arm64.go} (53%) create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_riscv64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_wasm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/hwcap_linux.go create mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go create mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_amd64.1_11.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go rename vendor/golang.org/x/sys/unix/{zsyscall_darwin_arm64.1_11.go => zsyscall_openbsd_mips64.go} (86%) create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go create mode 100644 vendor/golang.org/x/sys/windows/setupapierrors_windows.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables13.0.0.go create mode 100644 vendor/golang.org/x/text/width/tables13.0.0.go create mode 100644 vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/doc.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/any_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/api_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/doc.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/duration_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/empty_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/goname.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/map_entry.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/struct_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/type_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genname/name.go create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/README.md create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/chown.go create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go create mode 100644 vendor/k8s.io/api/admission/v1/doc.go create mode 100644 vendor/k8s.io/api/admission/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/admission/v1/generated.proto create mode 100644 vendor/k8s.io/api/admission/v1/register.go create mode 100644 vendor/k8s.io/api/admission/v1/types.go create mode 100644 vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/admission/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/admission/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/admission/v1beta1/generated.proto rename vendor/k8s.io/api/{settings/v1alpha1 => admission/v1beta1}/register.go (90%) create mode 100644 vendor/k8s.io/api/admission/v1beta1/types.go create mode 100644 vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/apiserverinternal/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta1/register.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta1/types.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go rename vendor/k8s.io/api/{settings/v1alpha1 => node/v1}/doc.go (82%) create mode 100644 vendor/k8s.io/api/node/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/node/v1/generated.proto create mode 100644 vendor/k8s.io/api/node/v1/register.go create mode 100644 vendor/k8s.io/api/node/v1/types.go create mode 100644 vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go rename vendor/k8s.io/api/{settings/v1alpha1 => node/v1}/zz_generated.deepcopy.go (54%) delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/generated.proto delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/types.go delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/generic.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation/validation.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/waitgroup/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/waitgroup/waitgroup.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/attributes.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/audit.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/chain.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/config.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/configuration/configuration_manager.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/decorator.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/errors.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/handler.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/initializer/initializer.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/initializer/interfaces.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/interfaces.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/statuserror.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/interfaces.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/plugin.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/reinvocationcontext.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/matcher.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/object/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/object/matcher.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/admissionreview.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/rules.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/plugin.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugins.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/reinvocation.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/util.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/install/install.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/validation/validation.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/v1/defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/v1/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/v1/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/v1/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/validation/validation.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/policy/checker.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/policy/enforce.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/policy/reader.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/policy/util.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/path/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/path/path.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/union/union.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/deprecation/deprecation.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/discovery/addresses.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/discovery/group.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/discovery/legacy.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/discovery/root.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/discovery/storageversionhash.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/discovery/util.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/discovery/version.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/audit_annotations.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/authn_audit.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/cachecontrol.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/request_received_time.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/requestinfo.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/storageversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/warning.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/buildmanagerinfo.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/capmanagers.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/endpoints.yaml create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/atmostevery.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/conflict.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/fields.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/gvkparser.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfields.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/pathelement.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedmanager.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedupdater.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/managedfieldsupdater.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/node.yaml create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/pod.yaml create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/skipnonapplied.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/stripmeta.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/typeconverter.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/versionconverter.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/helpers.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/namer.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/errors.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/errors.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/status.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/installer.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/openapi/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/received_time.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/warning/warning.go create mode 100644 vendor/k8s.io/apiserver/pkg/quota/v1/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/quota/v1/interfaces.go create mode 100644 vendor/k8s.io/apiserver/pkg/quota/v1/resources.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/matcher.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/options.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/registry/decorated_watcher.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/registry/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/create.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/create_update.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/delete.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/export.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/meta.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/rest.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/table.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/update.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/config.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/config_selfclient.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/deprecated_insecure_serving.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/cert_key.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/client_ca.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_sni_content.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/named_certificates.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/static_content.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/tlsconfig.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/union_content.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/util.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/content_type.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/cors.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/goaway.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/longrunning.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/timeout.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/waitgroup.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/wrap.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/genericapiserver.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/handler.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/healthz.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/hooks.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/mux/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/server/mux/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/mux/pathrecorder.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/admission.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/api_enablement.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/audit.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/authentication.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/authentication_dynamic_request_header.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/authorization.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/coreapi.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/deprecated_insecure_serving.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/egress_selector.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/etcd.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/feature.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/recommended.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/serving.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/serving_unix.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/serving_windows.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/serving_with_loopback.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/plugins.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/resourceconfig/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/resourceconfig/helpers.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/flags.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/index.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/openapi.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/profiling.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/version.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/secure_serving.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/signal.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/signal_posix.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/signal_windows.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/storage/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/storage/resource_config.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/storage/resource_encoding_config.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/storage/storage_codec.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/cacher/caching_object.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/cacher/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/cacher/time_budget.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/cacher/util.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/errors.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/errors/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/errors/storage.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/api_object_versioner.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/compact.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/errors.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/event.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/healthcheck.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/lease_manager.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/logger.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/interfaces.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/names/generate.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/selection_predicate.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/storagebackend/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/util.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/aes/aes.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/envelope.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/grpc_service.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.pb.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.proto create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/v1beta1.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/identity/identity.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/secretbox.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/transformer.go create mode 100644 vendor/k8s.io/apiserver/pkg/storageversion/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/storageversion/manager.go create mode 100644 vendor/k8s.io/apiserver/pkg/storageversion/updater.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/dryrun/dryrun.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller_debug.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/counter/interface.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/counter/noop.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/debug/dump.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/integrator.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/interface.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/lockingpromise/lockingpromise.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/format/formatting.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/formatting.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/sample_and_watermark.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/timed_observer.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/rule.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flushwriter/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flushwriter/writer.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/openapi/proto.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/shufflesharding/shufflesharding.go create mode 100644 vendor/k8s.io/apiserver/pkg/warning/context.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/buffered.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/doc.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/audit/log/backend.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/doc.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/truncate.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/audit/webhook/webhook.go rename vendor/k8s.io/client-go/informers/{settings => apiserverinternal}/interface.go (94%) rename vendor/k8s.io/client-go/informers/{settings => apiserverinternal}/v1alpha1/interface.go (80%) create mode 100644 vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/storageversion.go rename vendor/k8s.io/client-go/informers/{settings/v1alpha1/podpreset.go => flowcontrol/v1beta1/flowschema.go} (51%) create mode 100644 vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/prioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/informers/node/v1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/node/v1/runtimeclass.go rename vendor/k8s.io/client-go/kubernetes/typed/{settings/v1alpha1/settings_client.go => apiserverinternal/v1alpha1/apiserverinternal_client.go} (64%) rename vendor/k8s.io/client-go/kubernetes/typed/{settings => apiserverinternal}/v1alpha1/doc.go (100%) rename vendor/k8s.io/client-go/kubernetes/typed/{settings => apiserverinternal}/v1alpha1/fake/doc.go (100%) rename vendor/k8s.io/client-go/kubernetes/typed/{settings/v1alpha1/fake/fake_settings_client.go => apiserverinternal/v1alpha1/fake/fake_apiserverinternal_client.go} (75%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go rename vendor/k8s.io/client-go/kubernetes/typed/{settings => apiserverinternal}/v1alpha1/generated_expansion.go (93%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowcontrol_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_node_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go rename vendor/k8s.io/client-go/listers/{settings => apiserverinternal}/v1alpha1/expansion_generated.go (69%) create mode 100644 vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/storageversion.go create mode 100644 vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/flowschema.go create mode 100644 vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/prioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/listers/node/v1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/node/v1/runtimeclass.go delete mode 100644 vendor/k8s.io/client-go/listers/settings/v1alpha1/podpreset.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/conversion.go create mode 100644 vendor/k8s.io/client-go/rest/exec.go create mode 100644 vendor/k8s.io/client-go/tools/events/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/events/doc.go create mode 100644 vendor/k8s.io/client-go/tools/events/event_broadcaster.go create mode 100644 vendor/k8s.io/client-go/tools/events/event_recorder.go create mode 100644 vendor/k8s.io/client-go/tools/events/fake.go create mode 100644 vendor/k8s.io/client-go/tools/events/interfaces.go create mode 100644 vendor/k8s.io/client-go/tools/record/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/record/doc.go create mode 100644 vendor/k8s.io/client-go/tools/record/event.go create mode 100644 vendor/k8s.io/client-go/tools/record/events_cache.go create mode 100644 vendor/k8s.io/client-go/tools/record/fake.go create mode 100644 vendor/k8s.io/client-go/tools/record/util/util.go create mode 100644 vendor/k8s.io/component-base/cli/flag/ciphersuites_flag.go create mode 100644 vendor/k8s.io/component-base/cli/flag/ciphersuites_flag_114.go create mode 100644 vendor/k8s.io/component-base/cli/flag/colon_separated_multimap_string_string.go create mode 100644 vendor/k8s.io/component-base/cli/flag/configuration_map.go create mode 100644 vendor/k8s.io/component-base/cli/flag/flags.go create mode 100644 vendor/k8s.io/component-base/cli/flag/langle_separated_map_string_string.go create mode 100644 vendor/k8s.io/component-base/cli/flag/map_string_bool.go create mode 100644 vendor/k8s.io/component-base/cli/flag/map_string_string.go create mode 100644 vendor/k8s.io/component-base/cli/flag/namedcertkey_flag.go create mode 100644 vendor/k8s.io/component-base/cli/flag/noop.go create mode 100644 vendor/k8s.io/component-base/cli/flag/omitempty.go create mode 100644 vendor/k8s.io/component-base/cli/flag/sectioned.go create mode 100644 vendor/k8s.io/component-base/cli/flag/string_flag.go create mode 100644 vendor/k8s.io/component-base/cli/flag/tristate.go create mode 100644 vendor/k8s.io/component-base/logs/OWNERS create mode 100644 vendor/k8s.io/component-base/logs/datapol/datapol.go create mode 100644 vendor/k8s.io/component-base/logs/datapol/externaltypes.go create mode 100644 vendor/k8s.io/component-base/logs/json/json.go create mode 100644 vendor/k8s.io/component-base/logs/logs.go create mode 100644 vendor/k8s.io/component-base/logs/options.go create mode 100644 vendor/k8s.io/component-base/logs/registry.go create mode 100644 vendor/k8s.io/component-base/logs/sanitization/sanitization.go create mode 100644 vendor/k8s.io/component-base/metrics/prometheus/workqueue/metrics.go create mode 100644 vendor/k8s.io/component-base/metrics/testutil/metrics.go create mode 100644 vendor/k8s.io/component-base/metrics/testutil/promlint.go create mode 100644 vendor/k8s.io/component-base/metrics/testutil/testutil.go create mode 100644 vendor/k8s.io/klog/v2/SECURITY.md create mode 100644 vendor/k8s.io/kube-openapi/pkg/builder/doc.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/builder/openapi.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/builder/util.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/common/common.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/common/doc.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/handler/default_pruning.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/handler/handler.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/trie.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/util.go create mode 100644 vendor/k8s.io/utils/net/ipnet.go create mode 100644 vendor/k8s.io/utils/net/net.go create mode 100644 vendor/k8s.io/utils/net/port.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/doc.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/element.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/fromvalue.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/managers.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/path.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/serialize-pe.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/serialize.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/set.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/schema/doc.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/schema/equals.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/typed/doc.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go diff --git a/vendor/github.com/NYTimes/gziphandler/.gitignore b/vendor/github.com/NYTimes/gziphandler/.gitignore new file mode 100644 index 000000000..1377554eb --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/.gitignore @@ -0,0 +1 @@ +*.swp diff --git a/vendor/github.com/NYTimes/gziphandler/.travis.yml b/vendor/github.com/NYTimes/gziphandler/.travis.yml new file mode 100644 index 000000000..d2b67f69c --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/.travis.yml @@ -0,0 +1,6 @@ +language: go + +go: + - 1.7 + - 1.8 + - tip diff --git a/vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md b/vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..cdbca194c --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md @@ -0,0 +1,75 @@ +--- +layout: code-of-conduct +version: v1.0 +--- + +This code of conduct outlines our expectations for participants within the **NYTimes/gziphandler** community, as well as steps to reporting unacceptable behavior. We are committed to providing a welcoming and inspiring community for all and expect our code of conduct to be honored. Anyone who violates this code of conduct may be banned from the community. + +Our open source community strives to: + +* **Be friendly and patient.** +* **Be welcoming**: We strive to be a community that welcomes and supports people of all backgrounds and identities. This includes, but is not limited to members of any race, ethnicity, culture, national origin, colour, immigration status, social and economic class, educational level, sex, sexual orientation, gender identity and expression, age, size, family status, political belief, religion, and mental and physical ability. +* **Be considerate**: Your work will be used by other people, and you in turn will depend on the work of others. Any decision you take will affect users and colleagues, and you should take those consequences into account when making decisions. Remember that we're a world-wide community, so you might not be communicating in someone else's primary language. +* **Be respectful**: Not all of us will agree all the time, but disagreement is no excuse for poor behavior and poor manners. We might all experience some frustration now and then, but we cannot allow that frustration to turn into a personal attack. It’s important to remember that a community where people feel uncomfortable or threatened is not a productive one. +* **Be careful in the words that we choose**: we are a community of professionals, and we conduct ourselves professionally. Be kind to others. Do not insult or put down other participants. Harassment and other exclusionary behavior aren't acceptable. +* **Try to understand why we disagree**: Disagreements, both social and technical, happen all the time. It is important that we resolve disagreements and differing views constructively. Remember that we’re different. The strength of our community comes from its diversity, people from a wide range of backgrounds. Different people have different perspectives on issues. Being unable to understand why someone holds a viewpoint doesn’t mean that they’re wrong. Don’t forget that it is human to err and blaming each other doesn’t get us anywhere. Instead, focus on helping to resolve issues and learning from mistakes. + +## Definitions + +Harassment includes, but is not limited to: + +- Offensive comments related to gender, gender identity and expression, sexual orientation, disability, mental illness, neuro(a)typicality, physical appearance, body size, race, age, regional discrimination, political or religious affiliation +- Unwelcome comments regarding a person’s lifestyle choices and practices, including those related to food, health, parenting, drugs, and employment +- Deliberate misgendering. This includes deadnaming or persistently using a pronoun that does not correctly reflect a person's gender identity. You must address people by the name they give you when not addressing them by their username or handle +- Physical contact and simulated physical contact (eg, textual descriptions like “*hug*” or “*backrub*”) without consent or after a request to stop +- Threats of violence, both physical and psychological +- Incitement of violence towards any individual, including encouraging a person to commit suicide or to engage in self-harm +- Deliberate intimidation +- Stalking or following +- Harassing photography or recording, including logging online activity for harassment purposes +- Sustained disruption of discussion +- Unwelcome sexual attention, including gratuitous or off-topic sexual images or behaviour +- Pattern of inappropriate social contact, such as requesting/assuming inappropriate levels of intimacy with others +- Continued one-on-one communication after requests to cease +- Deliberate “outing” of any aspect of a person’s identity without their consent except as necessary to protect others from intentional abuse +- Publication of non-harassing private communication + +Our open source community prioritizes marginalized people’s safety over privileged people’s comfort. We will not act on complaints regarding: + +- ‘Reverse’ -isms, including ‘reverse racism,’ ‘reverse sexism,’ and ‘cisphobia’ +- Reasonable communication of boundaries, such as “leave me alone,” “go away,” or “I’m not discussing this with you” +- Refusal to explain or debate social justice concepts +- Communicating in a ‘tone’ you don’t find congenial +- Criticizing racist, sexist, cissexist, or otherwise oppressive behavior or assumptions + + +### Diversity Statement + +We encourage everyone to participate and are committed to building a community for all. Although we will fail at times, we seek to treat everyone both as fairly and equally as possible. Whenever a participant has made a mistake, we expect them to take responsibility for it. If someone has been harmed or offended, it is our responsibility to listen carefully and respectfully, and do our best to right the wrong. + +Although this list cannot be exhaustive, we explicitly honor diversity in age, gender, gender identity or expression, culture, ethnicity, language, national origin, political beliefs, profession, race, religion, sexual orientation, socioeconomic status, and technical ability. We will not tolerate discrimination based on any of the protected +characteristics above, including participants with disabilities. + +### Reporting Issues + +If you experience or witness unacceptable behavior—or have any other concerns—please report it by contacting us via **code@nytimes.com**. All reports will be handled with discretion. In your report please include: + +- Your contact information. +- Names (real, nicknames, or pseudonyms) of any individuals involved. If there are additional witnesses, please +include them as well. Your account of what occurred, and if you believe the incident is ongoing. If there is a publicly available record (e.g. a mailing list archive or a public IRC logger), please include a link. +- Any additional information that may be helpful. + +After filing a report, a representative will contact you personally, review the incident, follow up with any additional questions, and make a decision as to how to respond. If the person who is harassing you is part of the response team, they will recuse themselves from handling your incident. If the complaint originates from a member of the response team, it will be handled by a different member of the response team. We will respect confidentiality requests for the purpose of protecting victims of abuse. + +### Attribution & Acknowledgements + +We all stand on the shoulders of giants across many open source communities. We'd like to thank the communities and projects that established code of conducts and diversity statements as our inspiration: + +* [Django](https://www.djangoproject.com/conduct/reporting/) +* [Python](https://www.python.org/community/diversity/) +* [Ubuntu](http://www.ubuntu.com/about/about-ubuntu/conduct) +* [Contributor Covenant](http://contributor-covenant.org/) +* [Geek Feminism](http://geekfeminism.org/about/code-of-conduct/) +* [Citizen Code of Conduct](http://citizencodeofconduct.org/) + +This Code of Conduct was based on https://github.com/todogroup/opencodeofconduct diff --git a/vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md b/vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md new file mode 100644 index 000000000..b89a9eb4f --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md @@ -0,0 +1,30 @@ +# Contributing to NYTimes/gziphandler + +This is an open source project started by handful of developers at The New York Times and open to the entire Go community. + +We really appreciate your help! + +## Filing issues + +When filing an issue, make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +## Contributing code + +Before submitting changes, please follow these guidelines: + +1. Check the open issues and pull requests for existing discussions. +2. Open an issue to discuss a new feature. +3. Write tests. +4. Make sure code follows the ['Go Code Review Comments'](https://github.com/golang/go/wiki/CodeReviewComments). +5. Make sure your changes pass `go test`. +6. Make sure the entire test suite passes locally and on Travis CI. +7. Open a Pull Request. +8. [Squash your commits](http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html) after receiving feedback and add a [great commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html). + +Unless otherwise noted, the gziphandler source files are distributed under the Apache 2.0-style license found in the LICENSE.md file. diff --git a/vendor/github.com/NYTimes/gziphandler/LICENSE.md b/vendor/github.com/NYTimes/gziphandler/LICENSE.md new file mode 100644 index 000000000..b7e2ecb63 --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/LICENSE.md @@ -0,0 +1,13 @@ +Copyright (c) 2015 The New York Times Company + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this library except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/NYTimes/gziphandler/README.md b/vendor/github.com/NYTimes/gziphandler/README.md new file mode 100644 index 000000000..6d7246070 --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/README.md @@ -0,0 +1,52 @@ +Gzip Handler +============ + +This is a tiny Go package which wraps HTTP handlers to transparently gzip the +response body, for clients which support it. Although it's usually simpler to +leave that to a reverse proxy (like nginx or Varnish), this package is useful +when that's undesirable. + + +## Usage + +Call `GzipHandler` with any handler (an object which implements the +`http.Handler` interface), and it'll return a new handler which gzips the +response. For example: + +```go +package main + +import ( + "io" + "net/http" + "github.com/NYTimes/gziphandler" +) + +func main() { + withoutGz := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + io.WriteString(w, "Hello, World") + }) + + withGz := gziphandler.GzipHandler(withoutGz) + + http.Handle("/", withGz) + http.ListenAndServe("0.0.0.0:8000", nil) +} +``` + + +## Documentation + +The docs can be found at [godoc.org][docs], as usual. + + +## License + +[Apache 2.0][license]. + + + + +[docs]: https://godoc.org/github.com/nytimes/gziphandler +[license]: https://github.com/nytimes/gziphandler/blob/master/LICENSE.md diff --git a/vendor/github.com/NYTimes/gziphandler/gzip.go b/vendor/github.com/NYTimes/gziphandler/gzip.go new file mode 100644 index 000000000..ea6dba1e7 --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/gzip.go @@ -0,0 +1,332 @@ +package gziphandler + +import ( + "bufio" + "compress/gzip" + "fmt" + "io" + "net" + "net/http" + "strconv" + "strings" + "sync" +) + +const ( + vary = "Vary" + acceptEncoding = "Accept-Encoding" + contentEncoding = "Content-Encoding" + contentType = "Content-Type" + contentLength = "Content-Length" +) + +type codings map[string]float64 + +const ( + // DefaultQValue is the default qvalue to assign to an encoding if no explicit qvalue is set. + // This is actually kind of ambiguous in RFC 2616, so hopefully it's correct. + // The examples seem to indicate that it is. + DefaultQValue = 1.0 + + // DefaultMinSize defines the minimum size to reach to enable compression. + // It's 512 bytes. + DefaultMinSize = 512 +) + +// gzipWriterPools stores a sync.Pool for each compression level for reuse of +// gzip.Writers. Use poolIndex to covert a compression level to an index into +// gzipWriterPools. +var gzipWriterPools [gzip.BestCompression - gzip.BestSpeed + 2]*sync.Pool + +func init() { + for i := gzip.BestSpeed; i <= gzip.BestCompression; i++ { + addLevelPool(i) + } + addLevelPool(gzip.DefaultCompression) +} + +// poolIndex maps a compression level to its index into gzipWriterPools. It +// assumes that level is a valid gzip compression level. +func poolIndex(level int) int { + // gzip.DefaultCompression == -1, so we need to treat it special. + if level == gzip.DefaultCompression { + return gzip.BestCompression - gzip.BestSpeed + 1 + } + return level - gzip.BestSpeed +} + +func addLevelPool(level int) { + gzipWriterPools[poolIndex(level)] = &sync.Pool{ + New: func() interface{} { + // NewWriterLevel only returns error on a bad level, we are guaranteeing + // that this will be a valid level so it is okay to ignore the returned + // error. + w, _ := gzip.NewWriterLevel(nil, level) + return w + }, + } +} + +// GzipResponseWriter provides an http.ResponseWriter interface, which gzips +// bytes before writing them to the underlying response. This doesn't close the +// writers, so don't forget to do that. +// It can be configured to skip response smaller than minSize. +type GzipResponseWriter struct { + http.ResponseWriter + index int // Index for gzipWriterPools. + gw *gzip.Writer + + code int // Saves the WriteHeader value. + + minSize int // Specifed the minimum response size to gzip. If the response length is bigger than this value, it is compressed. + buf []byte // Holds the first part of the write before reaching the minSize or the end of the write. +} + +// Write appends data to the gzip writer. +func (w *GzipResponseWriter) Write(b []byte) (int, error) { + // If content type is not set. + if _, ok := w.Header()[contentType]; !ok { + // It infer it from the uncompressed body. + w.Header().Set(contentType, http.DetectContentType(b)) + } + + // GZIP responseWriter is initialized. Use the GZIP responseWriter. + if w.gw != nil { + n, err := w.gw.Write(b) + return n, err + } + + // Save the write into a buffer for later use in GZIP responseWriter (if content is long enough) or at close with regular responseWriter. + // On the first write, w.buf changes from nil to a valid slice + w.buf = append(w.buf, b...) + + // If the global writes are bigger than the minSize, compression is enable. + if len(w.buf) >= w.minSize { + err := w.startGzip() + if err != nil { + return 0, err + } + } + + return len(b), nil +} + +// startGzip initialize any GZIP specific informations. +func (w *GzipResponseWriter) startGzip() error { + + // Set the GZIP header. + w.Header().Set(contentEncoding, "gzip") + + // if the Content-Length is already set, then calls to Write on gzip + // will fail to set the Content-Length header since its already set + // See: https://github.com/golang/go/issues/14975. + w.Header().Del(contentLength) + + // Write the header to gzip response. + if w.code != 0 { + w.ResponseWriter.WriteHeader(w.code) + } + + // Initialize the GZIP response. + w.init() + + // Flush the buffer into the gzip reponse. + n, err := w.gw.Write(w.buf) + + // This should never happen (per io.Writer docs), but if the write didn't + // accept the entire buffer but returned no specific error, we have no clue + // what's going on, so abort just to be safe. + if err == nil && n < len(w.buf) { + return io.ErrShortWrite + } + + w.buf = nil + return err +} + +// WriteHeader just saves the response code until close or GZIP effective writes. +func (w *GzipResponseWriter) WriteHeader(code int) { + w.code = code +} + +// init graps a new gzip writer from the gzipWriterPool and writes the correct +// content encoding header. +func (w *GzipResponseWriter) init() { + // Bytes written during ServeHTTP are redirected to this gzip writer + // before being written to the underlying response. + gzw := gzipWriterPools[w.index].Get().(*gzip.Writer) + gzw.Reset(w.ResponseWriter) + w.gw = gzw +} + +// Close will close the gzip.Writer and will put it back in the gzipWriterPool. +func (w *GzipResponseWriter) Close() error { + if w.gw == nil { + // Gzip not trigged yet, write out regular response. + if w.code != 0 { + w.ResponseWriter.WriteHeader(w.code) + } + if w.buf != nil { + _, writeErr := w.ResponseWriter.Write(w.buf) + // Returns the error if any at write. + if writeErr != nil { + return fmt.Errorf("gziphandler: write to regular responseWriter at close gets error: %q", writeErr.Error()) + } + } + return nil + } + + err := w.gw.Close() + gzipWriterPools[w.index].Put(w.gw) + w.gw = nil + return err +} + +// Flush flushes the underlying *gzip.Writer and then the underlying +// http.ResponseWriter if it is an http.Flusher. This makes GzipResponseWriter +// an http.Flusher. +func (w *GzipResponseWriter) Flush() { + if w.gw != nil { + w.gw.Flush() + } + + if fw, ok := w.ResponseWriter.(http.Flusher); ok { + fw.Flush() + } +} + +// Hijack implements http.Hijacker. If the underlying ResponseWriter is a +// Hijacker, its Hijack method is returned. Otherwise an error is returned. +func (w *GzipResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + if hj, ok := w.ResponseWriter.(http.Hijacker); ok { + return hj.Hijack() + } + return nil, nil, fmt.Errorf("http.Hijacker interface is not supported") +} + +// verify Hijacker interface implementation +var _ http.Hijacker = &GzipResponseWriter{} + +// MustNewGzipLevelHandler behaves just like NewGzipLevelHandler except that in +// an error case it panics rather than returning an error. +func MustNewGzipLevelHandler(level int) func(http.Handler) http.Handler { + wrap, err := NewGzipLevelHandler(level) + if err != nil { + panic(err) + } + return wrap +} + +// NewGzipLevelHandler returns a wrapper function (often known as middleware) +// which can be used to wrap an HTTP handler to transparently gzip the response +// body if the client supports it (via the Accept-Encoding header). Responses will +// be encoded at the given gzip compression level. An error will be returned only +// if an invalid gzip compression level is given, so if one can ensure the level +// is valid, the returned error can be safely ignored. +func NewGzipLevelHandler(level int) (func(http.Handler) http.Handler, error) { + return NewGzipLevelAndMinSize(level, DefaultMinSize) +} + +// NewGzipLevelAndMinSize behave as NewGzipLevelHandler except it let the caller +// specify the minimum size before compression. +func NewGzipLevelAndMinSize(level, minSize int) (func(http.Handler) http.Handler, error) { + if level != gzip.DefaultCompression && (level < gzip.BestSpeed || level > gzip.BestCompression) { + return nil, fmt.Errorf("invalid compression level requested: %d", level) + } + if minSize < 0 { + return nil, fmt.Errorf("minimum size must be more than zero") + } + return func(h http.Handler) http.Handler { + index := poolIndex(level) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add(vary, acceptEncoding) + + if acceptsGzip(r) { + gw := &GzipResponseWriter{ + ResponseWriter: w, + index: index, + minSize: minSize, + } + defer gw.Close() + + h.ServeHTTP(gw, r) + } else { + h.ServeHTTP(w, r) + } + }) + }, nil +} + +// GzipHandler wraps an HTTP handler, to transparently gzip the response body if +// the client supports it (via the Accept-Encoding header). This will compress at +// the default compression level. +func GzipHandler(h http.Handler) http.Handler { + wrapper, _ := NewGzipLevelHandler(gzip.DefaultCompression) + return wrapper(h) +} + +// acceptsGzip returns true if the given HTTP request indicates that it will +// accept a gzipped response. +func acceptsGzip(r *http.Request) bool { + acceptedEncodings, _ := parseEncodings(r.Header.Get(acceptEncoding)) + return acceptedEncodings["gzip"] > 0.0 +} + +// parseEncodings attempts to parse a list of codings, per RFC 2616, as might +// appear in an Accept-Encoding header. It returns a map of content-codings to +// quality values, and an error containing the errors encountered. It's probably +// safe to ignore those, because silently ignoring errors is how the internet +// works. +// +// See: http://tools.ietf.org/html/rfc2616#section-14.3. +func parseEncodings(s string) (codings, error) { + c := make(codings) + var e []string + + for _, ss := range strings.Split(s, ",") { + coding, qvalue, err := parseCoding(ss) + + if err != nil { + e = append(e, err.Error()) + } else { + c[coding] = qvalue + } + } + + // TODO (adammck): Use a proper multi-error struct, so the individual errors + // can be extracted if anyone cares. + if len(e) > 0 { + return c, fmt.Errorf("errors while parsing encodings: %s", strings.Join(e, ", ")) + } + + return c, nil +} + +// parseCoding parses a single conding (content-coding with an optional qvalue), +// as might appear in an Accept-Encoding header. It attempts to forgive minor +// formatting errors. +func parseCoding(s string) (coding string, qvalue float64, err error) { + for n, part := range strings.Split(s, ";") { + part = strings.TrimSpace(part) + qvalue = DefaultQValue + + if n == 0 { + coding = strings.ToLower(part) + } else if strings.HasPrefix(part, "q=") { + qvalue, err = strconv.ParseFloat(strings.TrimPrefix(part, "q="), 64) + + if qvalue < 0.0 { + qvalue = 0.0 + } else if qvalue > 1.0 { + qvalue = 1.0 + } + } + } + + if coding == "" { + err = fmt.Errorf("empty content-coding") + } + + return +} diff --git a/vendor/github.com/NYTimes/gziphandler/gzip_go18.go b/vendor/github.com/NYTimes/gziphandler/gzip_go18.go new file mode 100644 index 000000000..fa9665b7e --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/gzip_go18.go @@ -0,0 +1,43 @@ +// +build go1.8 + +package gziphandler + +import "net/http" + +// Push initiates an HTTP/2 server push. +// Push returns ErrNotSupported if the client has disabled push or if push +// is not supported on the underlying connection. +func (w *GzipResponseWriter) Push(target string, opts *http.PushOptions) error { + pusher, ok := w.ResponseWriter.(http.Pusher) + if ok && pusher != nil { + return pusher.Push(target, setAcceptEncodingForPushOptions(opts)) + } + return http.ErrNotSupported +} + +// setAcceptEncodingForPushOptions sets "Accept-Encoding" : "gzip" for PushOptions without overriding existing headers. +func setAcceptEncodingForPushOptions(opts *http.PushOptions) *http.PushOptions { + + if opts == nil { + opts = &http.PushOptions{ + Header: http.Header{ + acceptEncoding: []string{"gzip"}, + }, + } + return opts + } + + if opts.Header == nil { + opts.Header = http.Header{ + acceptEncoding: []string{"gzip"}, + } + return opts + } + + if encoding := opts.Header.Get(acceptEncoding); encoding == "" { + opts.Header.Add(acceptEncoding, "gzip") + return opts + } + + return opts +} diff --git a/vendor/github.com/blang/semver/.travis.yml b/vendor/github.com/blang/semver/.travis.yml new file mode 100644 index 000000000..102fb9a69 --- /dev/null +++ b/vendor/github.com/blang/semver/.travis.yml @@ -0,0 +1,21 @@ +language: go +matrix: + include: + - go: 1.4.3 + - go: 1.5.4 + - go: 1.6.3 + - go: 1.7 + - go: tip + allow_failures: + - go: tip +install: +- go get golang.org/x/tools/cmd/cover +- go get github.com/mattn/goveralls +script: +- echo "Test and track coverage" ; $HOME/gopath/bin/goveralls -package "." -service=travis-ci + -repotoken $COVERALLS_TOKEN +- echo "Build examples" ; cd examples && go build +- echo "Check if gofmt'd" ; diff -u <(echo -n) <(gofmt -d -s .) +env: + global: + secure: HroGEAUQpVq9zX1b1VIkraLiywhGbzvNnTZq2TMxgK7JHP8xqNplAeF1izrR2i4QLL9nsY+9WtYss4QuPvEtZcVHUobw6XnL6radF7jS1LgfYZ9Y7oF+zogZ2I5QUMRLGA7rcxQ05s7mKq3XZQfeqaNts4bms/eZRefWuaFZbkw= diff --git a/vendor/github.com/blang/semver/README.md b/vendor/github.com/blang/semver/README.md index 4399639e2..08b2e4a3d 100644 --- a/vendor/github.com/blang/semver/README.md +++ b/vendor/github.com/blang/semver/README.md @@ -1,4 +1,4 @@ -semver for golang [![Build Status](https://drone.io/github.com/blang/semver/status.png)](https://drone.io/github.com/blang/semver/latest) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master) +semver for golang [![Build Status](https://travis-ci.org/blang/semver.svg?branch=master)](https://travis-ci.org/blang/semver) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master) ====== semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`. @@ -41,6 +41,7 @@ Features - Compare Helper Methods - InPlace manipulation - Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1` +- Wildcards `>=1.x`, `<=2.5.x` - Sortable (implements sort.Interface) - database/sql compatible (sql.Scanner/Valuer) - encoding/json compatible (json.Marshaler/Unmarshaler) @@ -59,6 +60,8 @@ A condition is composed of an operator and a version. The supported operators ar - `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0` - `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`. +Note that spaces between the operator and the version will be gracefully tolerated. + A `Range` can link multiple `Ranges` separated by space: Ranges can be linked by logical AND: diff --git a/vendor/github.com/blang/semver/package.json b/vendor/github.com/blang/semver/package.json index 568be8d94..1cf8ebdd9 100644 --- a/vendor/github.com/blang/semver/package.json +++ b/vendor/github.com/blang/semver/package.json @@ -12,6 +12,6 @@ "license": "MIT", "name": "semver", "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", - "version": "3.4.0" + "version": "3.5.1" } diff --git a/vendor/github.com/coreos/go-semver/LICENSE b/vendor/github.com/coreos/go-semver/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-semver/NOTICE b/vendor/github.com/coreos/go-semver/NOTICE new file mode 100644 index 000000000..23a0ada2f --- /dev/null +++ b/vendor/github.com/coreos/go-semver/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go new file mode 100644 index 000000000..76cf4852c --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/semver.go @@ -0,0 +1,296 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Semantic Versions http://semver.org +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +type Version struct { + Major int64 + Minor int64 + Patch int64 + PreRelease PreRelease + Metadata string +} + +type PreRelease string + +func splitOff(input *string, delim string) (val string) { + parts := strings.SplitN(*input, delim, 2) + + if len(parts) == 2 { + *input = parts[0] + val = parts[1] + } + + return val +} + +func New(version string) *Version { + return Must(NewVersion(version)) +} + +func NewVersion(version string) (*Version, error) { + v := Version{} + + if err := v.Set(version); err != nil { + return nil, err + } + + return &v, nil +} + +// Must is a helper for wrapping NewVersion and will panic if err is not nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + return v +} + +// Set parses and updates v from the given version string. Implements flag.Value +func (v *Version) Set(version string) error { + metadata := splitOff(&version, "+") + preRelease := PreRelease(splitOff(&version, "-")) + dotParts := strings.SplitN(version, ".", 3) + + if len(dotParts) != 3 { + return fmt.Errorf("%s is not in dotted-tri format", version) + } + + if err := validateIdentifier(string(preRelease)); err != nil { + return fmt.Errorf("failed to validate pre-release: %v", err) + } + + if err := validateIdentifier(metadata); err != nil { + return fmt.Errorf("failed to validate metadata: %v", err) + } + + parsed := make([]int64, 3, 3) + + for i, v := range dotParts[:3] { + val, err := strconv.ParseInt(v, 10, 64) + parsed[i] = val + if err != nil { + return err + } + } + + v.Metadata = metadata + v.PreRelease = preRelease + v.Major = parsed[0] + v.Minor = parsed[1] + v.Patch = parsed[2] + return nil +} + +func (v Version) String() string { + var buffer bytes.Buffer + + fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch) + + if v.PreRelease != "" { + fmt.Fprintf(&buffer, "-%s", v.PreRelease) + } + + if v.Metadata != "" { + fmt.Fprintf(&buffer, "+%s", v.Metadata) + } + + return buffer.String() +} + +func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { + var data string + if err := unmarshal(&data); err != nil { + return err + } + return v.Set(data) +} + +func (v Version) MarshalJSON() ([]byte, error) { + return []byte(`"` + v.String() + `"`), nil +} + +func (v *Version) UnmarshalJSON(data []byte) error { + l := len(data) + if l == 0 || string(data) == `""` { + return nil + } + if l < 2 || data[0] != '"' || data[l-1] != '"' { + return errors.New("invalid semver string") + } + return v.Set(string(data[1 : l-1])) +} + +// Compare tests if v is less than, equal to, or greater than versionB, +// returning -1, 0, or +1 respectively. +func (v Version) Compare(versionB Version) int { + if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 { + return cmp + } + return preReleaseCompare(v, versionB) +} + +// Equal tests if v is equal to versionB. +func (v Version) Equal(versionB Version) bool { + return v.Compare(versionB) == 0 +} + +// LessThan tests if v is less than versionB. +func (v Version) LessThan(versionB Version) bool { + return v.Compare(versionB) < 0 +} + +// Slice converts the comparable parts of the semver into a slice of integers. +func (v Version) Slice() []int64 { + return []int64{v.Major, v.Minor, v.Patch} +} + +func (p PreRelease) Slice() []string { + preRelease := string(p) + return strings.Split(preRelease, ".") +} + +func preReleaseCompare(versionA Version, versionB Version) int { + a := versionA.PreRelease + b := versionB.PreRelease + + /* Handle the case where if two versions are otherwise equal it is the + * one without a PreRelease that is greater */ + if len(a) == 0 && (len(b) > 0) { + return 1 + } else if len(b) == 0 && (len(a) > 0) { + return -1 + } + + // If there is a prerelease, check and compare each part. + return recursivePreReleaseCompare(a.Slice(), b.Slice()) +} + +func recursiveCompare(versionA []int64, versionB []int64) int { + if len(versionA) == 0 { + return 0 + } + + a := versionA[0] + b := versionB[0] + + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursiveCompare(versionA[1:], versionB[1:]) +} + +func recursivePreReleaseCompare(versionA []string, versionB []string) int { + // A larger set of pre-release fields has a higher precedence than a smaller set, + // if all of the preceding identifiers are equal. + if len(versionA) == 0 { + if len(versionB) > 0 { + return -1 + } + return 0 + } else if len(versionB) == 0 { + // We're longer than versionB so return 1. + return 1 + } + + a := versionA[0] + b := versionB[0] + + aInt := false + bInt := false + + aI, err := strconv.Atoi(versionA[0]) + if err == nil { + aInt = true + } + + bI, err := strconv.Atoi(versionB[0]) + if err == nil { + bInt = true + } + + // Numeric identifiers always have lower precedence than non-numeric identifiers. + if aInt && !bInt { + return -1 + } else if !aInt && bInt { + return 1 + } + + // Handle Integer Comparison + if aInt && bInt { + if aI > bI { + return 1 + } else if aI < bI { + return -1 + } + } + + // Handle String Comparison + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursivePreReleaseCompare(versionA[1:], versionB[1:]) +} + +// BumpMajor increments the Major field by 1 and resets all other fields to their default values +func (v *Version) BumpMajor() { + v.Major += 1 + v.Minor = 0 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpMinor increments the Minor field by 1 and resets all other fields to their default values +func (v *Version) BumpMinor() { + v.Minor += 1 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpPatch increments the Patch field by 1 and resets all other fields to their default values +func (v *Version) BumpPatch() { + v.Patch += 1 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// validateIdentifier makes sure the provided identifier satisfies semver spec +func validateIdentifier(id string) error { + if id != "" && !reIdentifier.MatchString(id) { + return fmt.Errorf("%s is not a valid semver identifier", id) + } + return nil +} + +// reIdentifier is a regular expression used to check that pre-release and metadata +// identifiers satisfy the spec requirements +var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`) diff --git a/vendor/github.com/coreos/go-semver/semver/sort.go b/vendor/github.com/coreos/go-semver/semver/sort.go new file mode 100644 index 000000000..e256b41a5 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/sort.go @@ -0,0 +1,38 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semver + +import ( + "sort" +) + +type Versions []*Version + +func (s Versions) Len() int { + return len(s) +} + +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s Versions) Less(i, j int) bool { + return s[i].LessThan(*s[j]) +} + +// Sort sorts the given slice of Version +func Sort(versions []*Version) { + sort.Sort(Versions(versions)) +} diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/LICENSE new file mode 100644 index 000000000..37ec93a14 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-systemd/NOTICE b/vendor/github.com/coreos/go-systemd/NOTICE new file mode 100644 index 000000000..23a0ada2f --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go b/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go new file mode 100644 index 000000000..ba4ae31f1 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go @@ -0,0 +1,84 @@ +// Copyright 2014 Docker, Inc. +// Copyright 2015-2018 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package daemon provides a Go implementation of the sd_notify protocol. +// It can be used to inform systemd of service start-up completion, watchdog +// events, and other status changes. +// +// https://www.freedesktop.org/software/systemd/man/sd_notify.html#Description +package daemon + +import ( + "net" + "os" +) + +const ( + // SdNotifyReady tells the service manager that service startup is finished + // or the service finished loading its configuration. + SdNotifyReady = "READY=1" + + // SdNotifyStopping tells the service manager that the service is beginning + // its shutdown. + SdNotifyStopping = "STOPPING=1" + + // SdNotifyReloading tells the service manager that this service is + // reloading its configuration. Note that you must call SdNotifyReady when + // it completed reloading. + SdNotifyReloading = "RELOADING=1" + + // SdNotifyWatchdog tells the service manager to update the watchdog + // timestamp for the service. + SdNotifyWatchdog = "WATCHDOG=1" +) + +// SdNotify sends a message to the init daemon. It is common to ignore the error. +// If `unsetEnvironment` is true, the environment variable `NOTIFY_SOCKET` +// will be unconditionally unset. +// +// It returns one of the following: +// (false, nil) - notification not supported (i.e. NOTIFY_SOCKET is unset) +// (false, err) - notification supported, but failure happened (e.g. error connecting to NOTIFY_SOCKET or while sending data) +// (true, nil) - notification supported, data has been sent +func SdNotify(unsetEnvironment bool, state string) (bool, error) { + socketAddr := &net.UnixAddr{ + Name: os.Getenv("NOTIFY_SOCKET"), + Net: "unixgram", + } + + // NOTIFY_SOCKET not set + if socketAddr.Name == "" { + return false, nil + } + + if unsetEnvironment { + if err := os.Unsetenv("NOTIFY_SOCKET"); err != nil { + return false, err + } + } + + conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr) + // Error connecting to NOTIFY_SOCKET + if err != nil { + return false, err + } + defer conn.Close() + + if _, err = conn.Write([]byte(state)); err != nil { + return false, err + } + return true, nil +} diff --git a/vendor/github.com/coreos/go-systemd/daemon/watchdog.go b/vendor/github.com/coreos/go-systemd/daemon/watchdog.go new file mode 100644 index 000000000..7a0e0d3a5 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/daemon/watchdog.go @@ -0,0 +1,73 @@ +// Copyright 2016 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package daemon + +import ( + "fmt" + "os" + "strconv" + "time" +) + +// SdWatchdogEnabled returns watchdog information for a service. +// Processes should call daemon.SdNotify(false, daemon.SdNotifyWatchdog) every +// time / 2. +// If `unsetEnvironment` is true, the environment variables `WATCHDOG_USEC` and +// `WATCHDOG_PID` will be unconditionally unset. +// +// It returns one of the following: +// (0, nil) - watchdog isn't enabled or we aren't the watched PID. +// (0, err) - an error happened (e.g. error converting time). +// (time, nil) - watchdog is enabled and we can send ping. +// time is delay before inactive service will be killed. +func SdWatchdogEnabled(unsetEnvironment bool) (time.Duration, error) { + wusec := os.Getenv("WATCHDOG_USEC") + wpid := os.Getenv("WATCHDOG_PID") + if unsetEnvironment { + wusecErr := os.Unsetenv("WATCHDOG_USEC") + wpidErr := os.Unsetenv("WATCHDOG_PID") + if wusecErr != nil { + return 0, wusecErr + } + if wpidErr != nil { + return 0, wpidErr + } + } + + if wusec == "" { + return 0, nil + } + s, err := strconv.Atoi(wusec) + if err != nil { + return 0, fmt.Errorf("error converting WATCHDOG_USEC: %s", err) + } + if s <= 0 { + return 0, fmt.Errorf("error WATCHDOG_USEC must be a positive number") + } + interval := time.Duration(s) * time.Microsecond + + if wpid == "" { + return interval, nil + } + p, err := strconv.Atoi(wpid) + if err != nil { + return 0, fmt.Errorf("error converting WATCHDOG_PID: %s", err) + } + if os.Getpid() != p { + return 0, nil + } + + return interval, nil +} diff --git a/vendor/github.com/coreos/go-systemd/journal/journal.go b/vendor/github.com/coreos/go-systemd/journal/journal.go new file mode 100644 index 000000000..a0f4837a0 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/journal/journal.go @@ -0,0 +1,225 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package journal provides write bindings to the local systemd journal. +// It is implemented in pure Go and connects to the journal directly over its +// unix socket. +// +// To read from the journal, see the "sdjournal" package, which wraps the +// sd-journal a C API. +// +// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html +package journal + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" + "sync" + "sync/atomic" + "syscall" + "unsafe" +) + +// Priority of a journal message +type Priority int + +const ( + PriEmerg Priority = iota + PriAlert + PriCrit + PriErr + PriWarning + PriNotice + PriInfo + PriDebug +) + +var ( + // This can be overridden at build-time: + // https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable + journalSocket = "/run/systemd/journal/socket" + + // unixConnPtr atomically holds the local unconnected Unix-domain socket. + // Concrete safe pointer type: *net.UnixConn + unixConnPtr unsafe.Pointer + // onceConn ensures that unixConnPtr is initialized exactly once. + onceConn sync.Once +) + +func init() { + onceConn.Do(initConn) +} + +// Enabled checks whether the local systemd journal is available for logging. +func Enabled() bool { + onceConn.Do(initConn) + + if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil { + return false + } + + if _, err := net.Dial("unixgram", journalSocket); err != nil { + return false + } + + return true +} + +// Send a message to the local systemd journal. vars is a map of journald +// fields to values. Fields must be composed of uppercase letters, numbers, +// and underscores, but must not start with an underscore. Within these +// restrictions, any arbitrary field name may be used. Some names have special +// significance: see the journalctl documentation +// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) +// for more details. vars may be nil. +func Send(message string, priority Priority, vars map[string]string) error { + conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) + if conn == nil { + return errors.New("could not initialize socket to journald") + } + + socketAddr := &net.UnixAddr{ + Name: journalSocket, + Net: "unixgram", + } + + data := new(bytes.Buffer) + appendVariable(data, "PRIORITY", strconv.Itoa(int(priority))) + appendVariable(data, "MESSAGE", message) + for k, v := range vars { + appendVariable(data, k, v) + } + + _, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr) + if err == nil { + return nil + } + if !isSocketSpaceError(err) { + return err + } + + // Large log entry, send it via tempfile and ancillary-fd. + file, err := tempFd() + if err != nil { + return err + } + defer file.Close() + _, err = io.Copy(file, data) + if err != nil { + return err + } + rights := syscall.UnixRights(int(file.Fd())) + _, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr) + if err != nil { + return err + } + + return nil +} + +// Print prints a message to the local systemd journal using Send(). +func Print(priority Priority, format string, a ...interface{}) error { + return Send(fmt.Sprintf(format, a...), priority, nil) +} + +func appendVariable(w io.Writer, name, value string) { + if err := validVarName(name); err != nil { + fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name) + } + if strings.ContainsRune(value, '\n') { + /* When the value contains a newline, we write: + * - the variable name, followed by a newline + * - the size (in 64bit little endian format) + * - the data, followed by a newline + */ + fmt.Fprintln(w, name) + binary.Write(w, binary.LittleEndian, uint64(len(value))) + fmt.Fprintln(w, value) + } else { + /* just write the variable and value all on one line */ + fmt.Fprintf(w, "%s=%s\n", name, value) + } +} + +// validVarName validates a variable name to make sure journald will accept it. +// The variable name must be in uppercase and consist only of characters, +// numbers and underscores, and may not begin with an underscore: +// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html +func validVarName(name string) error { + if name == "" { + return errors.New("Empty variable name") + } else if name[0] == '_' { + return errors.New("Variable name begins with an underscore") + } + + for _, c := range name { + if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') { + return errors.New("Variable name contains invalid characters") + } + } + return nil +} + +// isSocketSpaceError checks whether the error is signaling +// an "overlarge message" condition. +func isSocketSpaceError(err error) bool { + opErr, ok := err.(*net.OpError) + if !ok || opErr == nil { + return false + } + + sysErr, ok := opErr.Err.(*os.SyscallError) + if !ok || sysErr == nil { + return false + } + + return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS +} + +// tempFd creates a temporary, unlinked file under `/dev/shm`. +func tempFd() (*os.File, error) { + file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") + if err != nil { + return nil, err + } + err = syscall.Unlink(file.Name()) + if err != nil { + return nil, err + } + return file, nil +} + +// initConn initializes the global `unixConnPtr` socket. +// It is meant to be called exactly once, at program startup. +func initConn() { + autobind, err := net.ResolveUnixAddr("unixgram", "") + if err != nil { + return + } + + sock, err := net.ListenUnixgram("unixgram", autobind) + if err != nil { + return + } + + atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock)) +} diff --git a/vendor/github.com/coreos/pkg/LICENSE b/vendor/github.com/coreos/pkg/LICENSE new file mode 100644 index 000000000..e06d20818 --- /dev/null +++ b/vendor/github.com/coreos/pkg/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/coreos/pkg/NOTICE b/vendor/github.com/coreos/pkg/NOTICE new file mode 100644 index 000000000..b39ddfa5c --- /dev/null +++ b/vendor/github.com/coreos/pkg/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/pkg/capnslog/README.md b/vendor/github.com/coreos/pkg/capnslog/README.md new file mode 100644 index 000000000..f79dbfca5 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/README.md @@ -0,0 +1,39 @@ +# capnslog, the CoreOS logging package + +There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?). +capnslog provides a simple but consistent logging interface suitable for all kinds of projects. + +### Design Principles + +##### `package main` is the place where logging gets turned on and routed + +A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak. + +##### All log options are runtime-configurable. + +Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly. + +##### There is one log object per package. It is registered under its repository and package name. + +`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs. + +##### There is *one* output stream, and it is an `io.Writer` composed with a formatter. + +Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer. + +Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependent. These are, at best, provided as options, but more likely, provided by your application. + +##### Log objects are an interface + +An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed. + +##### Log levels have specific meanings: + + * Critical: Unrecoverable. Must fail. + * Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost + * Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning. + * Notice: Normal, but important (uncommon) log information. + * Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations. + * Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices. + * Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query. + diff --git a/vendor/github.com/coreos/pkg/capnslog/formatters.go b/vendor/github.com/coreos/pkg/capnslog/formatters.go new file mode 100644 index 000000000..b305a845f --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/formatters.go @@ -0,0 +1,157 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "bufio" + "fmt" + "io" + "log" + "runtime" + "strings" + "time" +) + +type Formatter interface { + Format(pkg string, level LogLevel, depth int, entries ...interface{}) + Flush() +} + +func NewStringFormatter(w io.Writer) Formatter { + return &StringFormatter{ + w: bufio.NewWriter(w), + } +} + +type StringFormatter struct { + w *bufio.Writer +} + +func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) { + now := time.Now().UTC() + s.w.WriteString(now.Format(time.RFC3339)) + s.w.WriteByte(' ') + writeEntries(s.w, pkg, l, i, entries...) + s.Flush() +} + +func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) { + if pkg != "" { + w.WriteString(pkg + ": ") + } + str := fmt.Sprint(entries...) + endsInNL := strings.HasSuffix(str, "\n") + w.WriteString(str) + if !endsInNL { + w.WriteString("\n") + } +} + +func (s *StringFormatter) Flush() { + s.w.Flush() +} + +func NewPrettyFormatter(w io.Writer, debug bool) Formatter { + return &PrettyFormatter{ + w: bufio.NewWriter(w), + debug: debug, + } +} + +type PrettyFormatter struct { + w *bufio.Writer + debug bool +} + +func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) { + now := time.Now() + ts := now.Format("2006-01-02 15:04:05") + c.w.WriteString(ts) + ms := now.Nanosecond() / 1000 + c.w.WriteString(fmt.Sprintf(".%06d", ms)) + if c.debug { + _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + if line < 0 { + line = 0 // not a real line number + } + c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line)) + } + c.w.WriteString(fmt.Sprint(" ", l.Char(), " | ")) + writeEntries(c.w, pkg, l, depth, entries...) + c.Flush() +} + +func (c *PrettyFormatter) Flush() { + c.w.Flush() +} + +// LogFormatter emulates the form of the traditional built-in logger. +type LogFormatter struct { + logger *log.Logger + prefix string +} + +// NewLogFormatter is a helper to produce a new LogFormatter struct. It uses the +// golang log package to actually do the logging work so that logs look similar. +func NewLogFormatter(w io.Writer, prefix string, flag int) Formatter { + return &LogFormatter{ + logger: log.New(w, "", flag), // don't use prefix here + prefix: prefix, // save it instead + } +} + +// Format builds a log message for the LogFormatter. The LogLevel is ignored. +func (lf *LogFormatter) Format(pkg string, _ LogLevel, _ int, entries ...interface{}) { + str := fmt.Sprint(entries...) + prefix := lf.prefix + if pkg != "" { + prefix = fmt.Sprintf("%s%s: ", prefix, pkg) + } + lf.logger.Output(5, fmt.Sprintf("%s%v", prefix, str)) // call depth is 5 +} + +// Flush is included so that the interface is complete, but is a no-op. +func (lf *LogFormatter) Flush() { + // noop +} + +// NilFormatter is a no-op log formatter that does nothing. +type NilFormatter struct { +} + +// NewNilFormatter is a helper to produce a new LogFormatter struct. It logs no +// messages so that you can cause part of your logging to be silent. +func NewNilFormatter() Formatter { + return &NilFormatter{} +} + +// Format does nothing. +func (_ *NilFormatter) Format(_ string, _ LogLevel, _ int, _ ...interface{}) { + // noop +} + +// Flush is included so that the interface is complete, but is a no-op. +func (_ *NilFormatter) Flush() { + // noop +} diff --git a/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go new file mode 100644 index 000000000..426603ef3 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go @@ -0,0 +1,96 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "bufio" + "bytes" + "io" + "os" + "runtime" + "strconv" + "strings" + "time" +) + +var pid = os.Getpid() + +type GlogFormatter struct { + StringFormatter +} + +func NewGlogFormatter(w io.Writer) *GlogFormatter { + g := &GlogFormatter{} + g.w = bufio.NewWriter(w) + return g +} + +func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) { + g.w.Write(GlogHeader(level, depth+1)) + g.StringFormatter.Format(pkg, level, depth+1, entries...) +} + +func GlogHeader(level LogLevel, depth int) []byte { + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + now := time.Now().UTC() + _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + if line < 0 { + line = 0 // not a real line number + } + buf := &bytes.Buffer{} + buf.Grow(30) + _, month, day := now.Date() + hour, minute, second := now.Clock() + buf.WriteString(level.Char()) + twoDigits(buf, int(month)) + twoDigits(buf, day) + buf.WriteByte(' ') + twoDigits(buf, hour) + buf.WriteByte(':') + twoDigits(buf, minute) + buf.WriteByte(':') + twoDigits(buf, second) + buf.WriteByte('.') + buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000)) + buf.WriteByte('Z') + buf.WriteByte(' ') + buf.WriteString(strconv.Itoa(pid)) + buf.WriteByte(' ') + buf.WriteString(file) + buf.WriteByte(':') + buf.WriteString(strconv.Itoa(line)) + buf.WriteByte(']') + buf.WriteByte(' ') + return buf.Bytes() +} + +const digits = "0123456789" + +func twoDigits(b *bytes.Buffer, d int) { + c2 := digits[d%10] + d /= 10 + c1 := digits[d%10] + b.WriteByte(c1) + b.WriteByte(c2) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/init.go b/vendor/github.com/coreos/pkg/capnslog/init.go new file mode 100644 index 000000000..38ce6d261 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/init.go @@ -0,0 +1,49 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build !windows + +package capnslog + +import ( + "io" + "os" + "syscall" +) + +// Here's where the opinionation comes in. We need some sensible defaults, +// especially after taking over the log package. Your project (whatever it may +// be) may see things differently. That's okay; there should be no defaults in +// the main package that cannot be controlled or overridden programatically, +// otherwise it's a bug. Doing so is creating your own init_log.go file much +// like this one. + +func init() { + initHijack() + + // Go `log` package uses os.Stderr. + SetFormatter(NewDefaultFormatter(os.Stderr)) + SetGlobalLogLevel(INFO) +} + +func NewDefaultFormatter(out io.Writer) Formatter { + if syscall.Getppid() == 1 { + // We're running under init, which may be systemd. + f, err := NewJournaldFormatter() + if err == nil { + return f + } + } + return NewPrettyFormatter(out, false) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/init_windows.go b/vendor/github.com/coreos/pkg/capnslog/init_windows.go new file mode 100644 index 000000000..455305065 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/init_windows.go @@ -0,0 +1,25 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import "os" + +func init() { + initHijack() + + // Go `log` package uses os.Stderr. + SetFormatter(NewPrettyFormatter(os.Stderr, false)) + SetGlobalLogLevel(INFO) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go new file mode 100644 index 000000000..72e05207c --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go @@ -0,0 +1,68 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build !windows + +package capnslog + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/coreos/go-systemd/journal" +) + +func NewJournaldFormatter() (Formatter, error) { + if !journal.Enabled() { + return nil, errors.New("No systemd detected") + } + return &journaldFormatter{}, nil +} + +type journaldFormatter struct{} + +func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { + var pri journal.Priority + switch l { + case CRITICAL: + pri = journal.PriCrit + case ERROR: + pri = journal.PriErr + case WARNING: + pri = journal.PriWarning + case NOTICE: + pri = journal.PriNotice + case INFO: + pri = journal.PriInfo + case DEBUG: + pri = journal.PriDebug + case TRACE: + pri = journal.PriDebug + default: + panic("Unhandled loglevel") + } + msg := fmt.Sprint(entries...) + tags := map[string]string{ + "PACKAGE": pkg, + "SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]), + } + err := journal.Send(msg, pri, tags) + if err != nil { + fmt.Fprintln(os.Stderr, err) + } +} + +func (j *journaldFormatter) Flush() {} diff --git a/vendor/github.com/coreos/pkg/capnslog/log_hijack.go b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go new file mode 100644 index 000000000..970086b9f --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go @@ -0,0 +1,39 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "log" +) + +func initHijack() { + pkg := NewPackageLogger("log", "") + w := packageWriter{pkg} + log.SetFlags(0) + log.SetPrefix("") + log.SetOutput(w) +} + +type packageWriter struct { + pl *PackageLogger +} + +func (p packageWriter) Write(b []byte) (int, error) { + if p.pl.level < INFO { + return 0, nil + } + p.pl.internalLog(calldepth+2, INFO, string(b)) + return len(b), nil +} diff --git a/vendor/github.com/coreos/pkg/capnslog/logmap.go b/vendor/github.com/coreos/pkg/capnslog/logmap.go new file mode 100644 index 000000000..226b60c22 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/logmap.go @@ -0,0 +1,245 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "errors" + "strings" + "sync" +) + +// LogLevel is the set of all log levels. +type LogLevel int8 + +const ( + // CRITICAL is the lowest log level; only errors which will end the program will be propagated. + CRITICAL LogLevel = iota - 1 + // ERROR is for errors that are not fatal but lead to troubling behavior. + ERROR + // WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations. + WARNING + // NOTICE is for normal but significant conditions. + NOTICE + // INFO is a log level for common, everyday log updates. + INFO + // DEBUG is the default hidden level for more verbose updates about internal processes. + DEBUG + // TRACE is for (potentially) call by call tracing of programs. + TRACE +) + +// Char returns a single-character representation of the log level. +func (l LogLevel) Char() string { + switch l { + case CRITICAL: + return "C" + case ERROR: + return "E" + case WARNING: + return "W" + case NOTICE: + return "N" + case INFO: + return "I" + case DEBUG: + return "D" + case TRACE: + return "T" + default: + panic("Unhandled loglevel") + } +} + +// String returns a multi-character representation of the log level. +func (l LogLevel) String() string { + switch l { + case CRITICAL: + return "CRITICAL" + case ERROR: + return "ERROR" + case WARNING: + return "WARNING" + case NOTICE: + return "NOTICE" + case INFO: + return "INFO" + case DEBUG: + return "DEBUG" + case TRACE: + return "TRACE" + default: + panic("Unhandled loglevel") + } +} + +// Update using the given string value. Fulfills the flag.Value interface. +func (l *LogLevel) Set(s string) error { + value, err := ParseLevel(s) + if err != nil { + return err + } + + *l = value + return nil +} + +// Returns an empty string, only here to fulfill the pflag.Value interface. +func (l *LogLevel) Type() string { + return "" +} + +// ParseLevel translates some potential loglevel strings into their corresponding levels. +func ParseLevel(s string) (LogLevel, error) { + switch s { + case "CRITICAL", "C": + return CRITICAL, nil + case "ERROR", "0", "E": + return ERROR, nil + case "WARNING", "1", "W": + return WARNING, nil + case "NOTICE", "2", "N": + return NOTICE, nil + case "INFO", "3", "I": + return INFO, nil + case "DEBUG", "4", "D": + return DEBUG, nil + case "TRACE", "5", "T": + return TRACE, nil + } + return CRITICAL, errors.New("couldn't parse log level " + s) +} + +type RepoLogger map[string]*PackageLogger + +type loggerStruct struct { + sync.Mutex + repoMap map[string]RepoLogger + formatter Formatter +} + +// logger is the global logger +var logger = new(loggerStruct) + +// SetGlobalLogLevel sets the log level for all packages in all repositories +// registered with capnslog. +func SetGlobalLogLevel(l LogLevel) { + logger.Lock() + defer logger.Unlock() + for _, r := range logger.repoMap { + r.setRepoLogLevelInternal(l) + } +} + +// GetRepoLogger may return the handle to the repository's set of packages' loggers. +func GetRepoLogger(repo string) (RepoLogger, error) { + logger.Lock() + defer logger.Unlock() + r, ok := logger.repoMap[repo] + if !ok { + return nil, errors.New("no packages registered for repo " + repo) + } + return r, nil +} + +// MustRepoLogger returns the handle to the repository's packages' loggers. +func MustRepoLogger(repo string) RepoLogger { + r, err := GetRepoLogger(repo) + if err != nil { + panic(err) + } + return r +} + +// SetRepoLogLevel sets the log level for all packages in the repository. +func (r RepoLogger) SetRepoLogLevel(l LogLevel) { + logger.Lock() + defer logger.Unlock() + r.setRepoLogLevelInternal(l) +} + +func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) { + for _, v := range r { + v.level = l + } +} + +// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in +// order, and returns a map of the results, for use in SetLogLevel. +func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) { + setlist := strings.Split(conf, ",") + out := make(map[string]LogLevel) + for _, setstring := range setlist { + setting := strings.Split(setstring, "=") + if len(setting) != 2 { + return nil, errors.New("oddly structured `pkg=level` option: " + setstring) + } + l, err := ParseLevel(setting[1]) + if err != nil { + return nil, err + } + out[setting[0]] = l + } + return out, nil +} + +// SetLogLevel takes a map of package names within a repository to their desired +// loglevel, and sets the levels appropriately. Unknown packages are ignored. +// "*" is a special package name that corresponds to all packages, and will be +// processed first. +func (r RepoLogger) SetLogLevel(m map[string]LogLevel) { + logger.Lock() + defer logger.Unlock() + if l, ok := m["*"]; ok { + r.setRepoLogLevelInternal(l) + } + for k, v := range m { + l, ok := r[k] + if !ok { + continue + } + l.level = v + } +} + +// SetFormatter sets the formatting function for all logs. +func SetFormatter(f Formatter) { + logger.Lock() + defer logger.Unlock() + logger.formatter = f +} + +// NewPackageLogger creates a package logger object. +// This should be defined as a global var in your package, referencing your repo. +func NewPackageLogger(repo string, pkg string) (p *PackageLogger) { + logger.Lock() + defer logger.Unlock() + if logger.repoMap == nil { + logger.repoMap = make(map[string]RepoLogger) + } + r, rok := logger.repoMap[repo] + if !rok { + logger.repoMap[repo] = make(RepoLogger) + r = logger.repoMap[repo] + } + p, pok := r[pkg] + if !pok { + r[pkg] = &PackageLogger{ + pkg: pkg, + level: INFO, + } + p = r[pkg] + } + return +} diff --git a/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go new file mode 100644 index 000000000..00ff37149 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go @@ -0,0 +1,191 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "fmt" + "os" +) + +type PackageLogger struct { + pkg string + level LogLevel +} + +const calldepth = 2 + +func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) { + logger.Lock() + defer logger.Unlock() + if inLevel != CRITICAL && p.level < inLevel { + return + } + if logger.formatter != nil { + logger.formatter.Format(p.pkg, inLevel, depth+1, entries...) + } +} + +// SetLevel allows users to change the current logging level. +func (p *PackageLogger) SetLevel(l LogLevel) { + logger.Lock() + defer logger.Unlock() + p.level = l +} + +// LevelAt checks if the given log level will be outputted under current setting. +func (p *PackageLogger) LevelAt(l LogLevel) bool { + logger.Lock() + defer logger.Unlock() + return p.level >= l +} + +// Log a formatted string at any level between ERROR and TRACE +func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) { + p.internalLog(calldepth, l, fmt.Sprintf(format, args...)) +} + +// Log a message at any level between ERROR and TRACE +func (p *PackageLogger) Log(l LogLevel, args ...interface{}) { + p.internalLog(calldepth, l, fmt.Sprint(args...)) +} + +// log stdlib compatibility + +func (p *PackageLogger) Println(args ...interface{}) { + p.internalLog(calldepth, INFO, fmt.Sprintln(args...)) +} + +func (p *PackageLogger) Printf(format string, args ...interface{}) { + p.Logf(INFO, format, args...) +} + +func (p *PackageLogger) Print(args ...interface{}) { + p.internalLog(calldepth, INFO, fmt.Sprint(args...)) +} + +// Panic and fatal + +func (p *PackageLogger) Panicf(format string, args ...interface{}) { + s := fmt.Sprintf(format, args...) + p.internalLog(calldepth, CRITICAL, s) + panic(s) +} + +func (p *PackageLogger) Panic(args ...interface{}) { + s := fmt.Sprint(args...) + p.internalLog(calldepth, CRITICAL, s) + panic(s) +} + +func (p *PackageLogger) Panicln(args ...interface{}) { + s := fmt.Sprintln(args...) + p.internalLog(calldepth, CRITICAL, s) + panic(s) +} + +func (p *PackageLogger) Fatalf(format string, args ...interface{}) { + p.Logf(CRITICAL, format, args...) + os.Exit(1) +} + +func (p *PackageLogger) Fatal(args ...interface{}) { + s := fmt.Sprint(args...) + p.internalLog(calldepth, CRITICAL, s) + os.Exit(1) +} + +func (p *PackageLogger) Fatalln(args ...interface{}) { + s := fmt.Sprintln(args...) + p.internalLog(calldepth, CRITICAL, s) + os.Exit(1) +} + +// Error Functions + +func (p *PackageLogger) Errorf(format string, args ...interface{}) { + p.Logf(ERROR, format, args...) +} + +func (p *PackageLogger) Error(entries ...interface{}) { + p.internalLog(calldepth, ERROR, entries...) +} + +// Warning Functions + +func (p *PackageLogger) Warningf(format string, args ...interface{}) { + p.Logf(WARNING, format, args...) +} + +func (p *PackageLogger) Warning(entries ...interface{}) { + p.internalLog(calldepth, WARNING, entries...) +} + +// Notice Functions + +func (p *PackageLogger) Noticef(format string, args ...interface{}) { + p.Logf(NOTICE, format, args...) +} + +func (p *PackageLogger) Notice(entries ...interface{}) { + p.internalLog(calldepth, NOTICE, entries...) +} + +// Info Functions + +func (p *PackageLogger) Infof(format string, args ...interface{}) { + p.Logf(INFO, format, args...) +} + +func (p *PackageLogger) Info(entries ...interface{}) { + p.internalLog(calldepth, INFO, entries...) +} + +// Debug Functions + +func (p *PackageLogger) Debugf(format string, args ...interface{}) { + if p.level < DEBUG { + return + } + p.Logf(DEBUG, format, args...) +} + +func (p *PackageLogger) Debug(entries ...interface{}) { + if p.level < DEBUG { + return + } + p.internalLog(calldepth, DEBUG, entries...) +} + +// Trace Functions + +func (p *PackageLogger) Tracef(format string, args ...interface{}) { + if p.level < TRACE { + return + } + p.Logf(TRACE, format, args...) +} + +func (p *PackageLogger) Trace(entries ...interface{}) { + if p.level < TRACE { + return + } + p.internalLog(calldepth, TRACE, entries...) +} + +func (p *PackageLogger) Flush() { + logger.Lock() + defer logger.Unlock() + logger.formatter.Flush() +} diff --git a/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go new file mode 100644 index 000000000..4be5a1f2d --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go @@ -0,0 +1,65 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build !windows + +package capnslog + +import ( + "fmt" + "log/syslog" +) + +func NewSyslogFormatter(w *syslog.Writer) Formatter { + return &syslogFormatter{w} +} + +func NewDefaultSyslogFormatter(tag string) (Formatter, error) { + w, err := syslog.New(syslog.LOG_DEBUG, tag) + if err != nil { + return nil, err + } + return NewSyslogFormatter(w), nil +} + +type syslogFormatter struct { + w *syslog.Writer +} + +func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { + for _, entry := range entries { + str := fmt.Sprint(entry) + switch l { + case CRITICAL: + s.w.Crit(str) + case ERROR: + s.w.Err(str) + case WARNING: + s.w.Warning(str) + case NOTICE: + s.w.Notice(str) + case INFO: + s.w.Info(str) + case DEBUG: + s.w.Debug(str) + case TRACE: + s.w.Debug(str) + default: + panic("Unhandled loglevel") + } + } +} + +func (s *syslogFormatter) Flush() { +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/Makefile b/vendor/github.com/gogo/protobuf/gogoproto/Makefile new file mode 100644 index 000000000..0b4659b73 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/Makefile @@ -0,0 +1,37 @@ +# Protocol Buffers for Go with Gadgets +# +# Copyright (c) 2013, The GoGo Authors. All rights reserved. +# http://github.com/gogo/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:../../../../ --proto_path=../../../../:../protobuf/:. *.proto + +restore: + cp gogo.pb.golden gogo.pb.go + +preserve: + cp gogo.pb.go gogo.pb.golden diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go new file mode 100644 index 000000000..081c86fa8 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/doc.go @@ -0,0 +1,169 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package gogoproto provides extensions for protocol buffers to achieve: + + - fast marshalling and unmarshalling. + - peace of mind by optionally generating test and benchmark code. + - more canonical Go structures. + - less typing by optionally generating extra helper code. + - goprotobuf compatibility + +More Canonical Go Structures + +A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. +You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct. +Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions. + + - nullable, if false, a field is generated without a pointer (see warning below). + - embed, if true, the field is generated as an embedded field. + - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128 + - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. + - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums. + - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + +Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset. + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +for a quicker overview. + +The following message: + + package test; + + import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + + message A { + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +Will generate a go struct which looks a lot like this: + + type A struct { + Description string + Number int64 + Id github_com_gogo_protobuf_test_custom.Uuid + } + +You will see there are no pointers, since all fields are non-nullable. +You will also see a custom type which marshals to a string. +Be warned it is your responsibility to test your custom types thoroughly. +You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods. + +Next we will embed the message A in message B. + + message B { + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +See below that A is embedded in B. + + type B struct { + A + G []github_com_gogo_protobuf_test_custom.Uint128 + } + +Also see the repeated custom type. + + type Uint128 [2]uint64 + +Next we will create a custom name for one of our fields. + + message C { + optional int64 size = 1 [(gogoproto.customname) = "MySize"]; + } + +See below that the field's name is MySize and not Size. + + type C struct { + MySize *int64 + } + +The is useful when having a protocol buffer message with a field name which conflicts with a generated method. +As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error. +Using customname you can fix this error without changing the field name. +This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable. + +Gogoprotobuf also has some more subtle changes, these could be changed back: + + - the generated package name for imports do not have the extra /filename.pb, + but are actually the imports specified in the .proto file. + +Gogoprotobuf also has lost some features which should be brought back with time: + + - Marshalling and unmarshalling with reflect and without the unsafe package, + this requires work in pointer_reflect.go + +Why does nullable break protocol buffer specifications: + +The protocol buffer specification states, somewhere, that you should be able to tell whether a +field is set or unset. With the option nullable=false this feature is lost, +since your non-nullable fields will always be set. It can be seen as a layer on top of +protocol buffers, where before and after marshalling all non-nullable fields are set +and they cannot be unset. + +Goprotobuf Compatibility: + +Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers. +Gogoprotobuf generates the same code as goprotobuf if no extensions are used. +The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf: + + - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. + - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix + - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method. + - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face + - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. + - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension + - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. + - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). + +Less Typing and Peace of Mind is explained in their specific plugin folders godoc: + + - github.com/gogo/protobuf/plugin/ + +If you do not use any of these extension the code that is generated +will be the same as if goprotobuf has generated it. + +The most complete way to see examples is to look at + + github.com/gogo/protobuf/test/thetest.proto + +Gogoprototest is a seperate project, +because we want to keep gogoprotobuf independent of goprotobuf, +but we still want to test it thoroughly. + +*/ +package gogoproto diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go new file mode 100644 index 000000000..1e91766ae --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -0,0 +1,874 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gogo.proto + +package gogoproto + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62001, + Name: "gogoproto.goproto_enum_prefix", + Tag: "varint,62001,opt,name=goproto_enum_prefix", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62021, + Name: "gogoproto.goproto_enum_stringer", + Tag: "varint,62021,opt,name=goproto_enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62022, + Name: "gogoproto.enum_stringer", + Tag: "varint,62022,opt,name=enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*string)(nil), + Field: 62023, + Name: "gogoproto.enum_customname", + Tag: "bytes,62023,opt,name=enum_customname", + Filename: "gogo.proto", +} + +var E_Enumdecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62024, + Name: "gogoproto.enumdecl", + Tag: "varint,62024,opt,name=enumdecl", + Filename: "gogo.proto", +} + +var E_EnumvalueCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumValueOptions)(nil), + ExtensionType: (*string)(nil), + Field: 66001, + Name: "gogoproto.enumvalue_customname", + Tag: "bytes,66001,opt,name=enumvalue_customname", + Filename: "gogo.proto", +} + +var E_GoprotoGettersAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63001, + Name: "gogoproto.goproto_getters_all", + Tag: "varint,63001,opt,name=goproto_getters_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63002, + Name: "gogoproto.goproto_enum_prefix_all", + Tag: "varint,63002,opt,name=goproto_enum_prefix_all", + Filename: "gogo.proto", +} + +var E_GoprotoStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63003, + Name: "gogoproto.goproto_stringer_all", + Tag: "varint,63003,opt,name=goproto_stringer_all", + Filename: "gogo.proto", +} + +var E_VerboseEqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63004, + Name: "gogoproto.verbose_equal_all", + Tag: "varint,63004,opt,name=verbose_equal_all", + Filename: "gogo.proto", +} + +var E_FaceAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63005, + Name: "gogoproto.face_all", + Tag: "varint,63005,opt,name=face_all", + Filename: "gogo.proto", +} + +var E_GostringAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63006, + Name: "gogoproto.gostring_all", + Tag: "varint,63006,opt,name=gostring_all", + Filename: "gogo.proto", +} + +var E_PopulateAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63007, + Name: "gogoproto.populate_all", + Tag: "varint,63007,opt,name=populate_all", + Filename: "gogo.proto", +} + +var E_StringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63008, + Name: "gogoproto.stringer_all", + Tag: "varint,63008,opt,name=stringer_all", + Filename: "gogo.proto", +} + +var E_OnlyoneAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63009, + Name: "gogoproto.onlyone_all", + Tag: "varint,63009,opt,name=onlyone_all", + Filename: "gogo.proto", +} + +var E_EqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63013, + Name: "gogoproto.equal_all", + Tag: "varint,63013,opt,name=equal_all", + Filename: "gogo.proto", +} + +var E_DescriptionAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63014, + Name: "gogoproto.description_all", + Tag: "varint,63014,opt,name=description_all", + Filename: "gogo.proto", +} + +var E_TestgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63015, + Name: "gogoproto.testgen_all", + Tag: "varint,63015,opt,name=testgen_all", + Filename: "gogo.proto", +} + +var E_BenchgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63016, + Name: "gogoproto.benchgen_all", + Tag: "varint,63016,opt,name=benchgen_all", + Filename: "gogo.proto", +} + +var E_MarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63017, + Name: "gogoproto.marshaler_all", + Tag: "varint,63017,opt,name=marshaler_all", + Filename: "gogo.proto", +} + +var E_UnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63018, + Name: "gogoproto.unmarshaler_all", + Tag: "varint,63018,opt,name=unmarshaler_all", + Filename: "gogo.proto", +} + +var E_StableMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63019, + Name: "gogoproto.stable_marshaler_all", + Tag: "varint,63019,opt,name=stable_marshaler_all", + Filename: "gogo.proto", +} + +var E_SizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63020, + Name: "gogoproto.sizer_all", + Tag: "varint,63020,opt,name=sizer_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63021, + Name: "gogoproto.goproto_enum_stringer_all", + Tag: "varint,63021,opt,name=goproto_enum_stringer_all", + Filename: "gogo.proto", +} + +var E_EnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63022, + Name: "gogoproto.enum_stringer_all", + Tag: "varint,63022,opt,name=enum_stringer_all", + Filename: "gogo.proto", +} + +var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63023, + Name: "gogoproto.unsafe_marshaler_all", + Tag: "varint,63023,opt,name=unsafe_marshaler_all", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63024, + Name: "gogoproto.unsafe_unmarshaler_all", + Tag: "varint,63024,opt,name=unsafe_unmarshaler_all", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63025, + Name: "gogoproto.goproto_extensions_map_all", + Tag: "varint,63025,opt,name=goproto_extensions_map_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63026, + Name: "gogoproto.goproto_unrecognized_all", + Tag: "varint,63026,opt,name=goproto_unrecognized_all", + Filename: "gogo.proto", +} + +var E_GogoprotoImport = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63027, + Name: "gogoproto.gogoproto_import", + Tag: "varint,63027,opt,name=gogoproto_import", + Filename: "gogo.proto", +} + +var E_ProtosizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63028, + Name: "gogoproto.protosizer_all", + Tag: "varint,63028,opt,name=protosizer_all", + Filename: "gogo.proto", +} + +var E_CompareAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63029, + Name: "gogoproto.compare_all", + Tag: "varint,63029,opt,name=compare_all", + Filename: "gogo.proto", +} + +var E_TypedeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63030, + Name: "gogoproto.typedecl_all", + Tag: "varint,63030,opt,name=typedecl_all", + Filename: "gogo.proto", +} + +var E_EnumdeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63031, + Name: "gogoproto.enumdecl_all", + Tag: "varint,63031,opt,name=enumdecl_all", + Filename: "gogo.proto", +} + +var E_GoprotoRegistration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63032, + Name: "gogoproto.goproto_registration", + Tag: "varint,63032,opt,name=goproto_registration", + Filename: "gogo.proto", +} + +var E_MessagenameAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63033, + Name: "gogoproto.messagename_all", + Tag: "varint,63033,opt,name=messagename_all", + Filename: "gogo.proto", +} + +var E_GoprotoSizecacheAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63034, + Name: "gogoproto.goproto_sizecache_all", + Tag: "varint,63034,opt,name=goproto_sizecache_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63035, + Name: "gogoproto.goproto_unkeyed_all", + Tag: "varint,63035,opt,name=goproto_unkeyed_all", + Filename: "gogo.proto", +} + +var E_GoprotoGetters = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64001, + Name: "gogoproto.goproto_getters", + Tag: "varint,64001,opt,name=goproto_getters", + Filename: "gogo.proto", +} + +var E_GoprotoStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64003, + Name: "gogoproto.goproto_stringer", + Tag: "varint,64003,opt,name=goproto_stringer", + Filename: "gogo.proto", +} + +var E_VerboseEqual = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64004, + Name: "gogoproto.verbose_equal", + Tag: "varint,64004,opt,name=verbose_equal", + Filename: "gogo.proto", +} + +var E_Face = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64005, + Name: "gogoproto.face", + Tag: "varint,64005,opt,name=face", + Filename: "gogo.proto", +} + +var E_Gostring = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64006, + Name: "gogoproto.gostring", + Tag: "varint,64006,opt,name=gostring", + Filename: "gogo.proto", +} + +var E_Populate = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64007, + Name: "gogoproto.populate", + Tag: "varint,64007,opt,name=populate", + Filename: "gogo.proto", +} + +var E_Stringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 67008, + Name: "gogoproto.stringer", + Tag: "varint,67008,opt,name=stringer", + Filename: "gogo.proto", +} + +var E_Onlyone = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64009, + Name: "gogoproto.onlyone", + Tag: "varint,64009,opt,name=onlyone", + Filename: "gogo.proto", +} + +var E_Equal = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64013, + Name: "gogoproto.equal", + Tag: "varint,64013,opt,name=equal", + Filename: "gogo.proto", +} + +var E_Description = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64014, + Name: "gogoproto.description", + Tag: "varint,64014,opt,name=description", + Filename: "gogo.proto", +} + +var E_Testgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64015, + Name: "gogoproto.testgen", + Tag: "varint,64015,opt,name=testgen", + Filename: "gogo.proto", +} + +var E_Benchgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64016, + Name: "gogoproto.benchgen", + Tag: "varint,64016,opt,name=benchgen", + Filename: "gogo.proto", +} + +var E_Marshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64017, + Name: "gogoproto.marshaler", + Tag: "varint,64017,opt,name=marshaler", + Filename: "gogo.proto", +} + +var E_Unmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64018, + Name: "gogoproto.unmarshaler", + Tag: "varint,64018,opt,name=unmarshaler", + Filename: "gogo.proto", +} + +var E_StableMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64019, + Name: "gogoproto.stable_marshaler", + Tag: "varint,64019,opt,name=stable_marshaler", + Filename: "gogo.proto", +} + +var E_Sizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64020, + Name: "gogoproto.sizer", + Tag: "varint,64020,opt,name=sizer", + Filename: "gogo.proto", +} + +var E_UnsafeMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64023, + Name: "gogoproto.unsafe_marshaler", + Tag: "varint,64023,opt,name=unsafe_marshaler", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64024, + Name: "gogoproto.unsafe_unmarshaler", + Tag: "varint,64024,opt,name=unsafe_unmarshaler", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64025, + Name: "gogoproto.goproto_extensions_map", + Tag: "varint,64025,opt,name=goproto_extensions_map", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognized = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64026, + Name: "gogoproto.goproto_unrecognized", + Tag: "varint,64026,opt,name=goproto_unrecognized", + Filename: "gogo.proto", +} + +var E_Protosizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64028, + Name: "gogoproto.protosizer", + Tag: "varint,64028,opt,name=protosizer", + Filename: "gogo.proto", +} + +var E_Compare = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64029, + Name: "gogoproto.compare", + Tag: "varint,64029,opt,name=compare", + Filename: "gogo.proto", +} + +var E_Typedecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64030, + Name: "gogoproto.typedecl", + Tag: "varint,64030,opt,name=typedecl", + Filename: "gogo.proto", +} + +var E_Messagename = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64033, + Name: "gogoproto.messagename", + Tag: "varint,64033,opt,name=messagename", + Filename: "gogo.proto", +} + +var E_GoprotoSizecache = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64034, + Name: "gogoproto.goproto_sizecache", + Tag: "varint,64034,opt,name=goproto_sizecache", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64035, + Name: "gogoproto.goproto_unkeyed", + Tag: "varint,64035,opt,name=goproto_unkeyed", + Filename: "gogo.proto", +} + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65001, + Name: "gogoproto.nullable", + Tag: "varint,65001,opt,name=nullable", + Filename: "gogo.proto", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65002, + Name: "gogoproto.embed", + Tag: "varint,65002,opt,name=embed", + Filename: "gogo.proto", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65003, + Name: "gogoproto.customtype", + Tag: "bytes,65003,opt,name=customtype", + Filename: "gogo.proto", +} + +var E_Customname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65004, + Name: "gogoproto.customname", + Tag: "bytes,65004,opt,name=customname", + Filename: "gogo.proto", +} + +var E_Jsontag = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65005, + Name: "gogoproto.jsontag", + Tag: "bytes,65005,opt,name=jsontag", + Filename: "gogo.proto", +} + +var E_Moretags = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65006, + Name: "gogoproto.moretags", + Tag: "bytes,65006,opt,name=moretags", + Filename: "gogo.proto", +} + +var E_Casttype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65007, + Name: "gogoproto.casttype", + Tag: "bytes,65007,opt,name=casttype", + Filename: "gogo.proto", +} + +var E_Castkey = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65008, + Name: "gogoproto.castkey", + Tag: "bytes,65008,opt,name=castkey", + Filename: "gogo.proto", +} + +var E_Castvalue = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65009, + Name: "gogoproto.castvalue", + Tag: "bytes,65009,opt,name=castvalue", + Filename: "gogo.proto", +} + +var E_Stdtime = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65010, + Name: "gogoproto.stdtime", + Tag: "varint,65010,opt,name=stdtime", + Filename: "gogo.proto", +} + +var E_Stdduration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65011, + Name: "gogoproto.stdduration", + Tag: "varint,65011,opt,name=stdduration", + Filename: "gogo.proto", +} + +var E_Wktpointer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65012, + Name: "gogoproto.wktpointer", + Tag: "varint,65012,opt,name=wktpointer", + Filename: "gogo.proto", +} + +func init() { + proto.RegisterExtension(E_GoprotoEnumPrefix) + proto.RegisterExtension(E_GoprotoEnumStringer) + proto.RegisterExtension(E_EnumStringer) + proto.RegisterExtension(E_EnumCustomname) + proto.RegisterExtension(E_Enumdecl) + proto.RegisterExtension(E_EnumvalueCustomname) + proto.RegisterExtension(E_GoprotoGettersAll) + proto.RegisterExtension(E_GoprotoEnumPrefixAll) + proto.RegisterExtension(E_GoprotoStringerAll) + proto.RegisterExtension(E_VerboseEqualAll) + proto.RegisterExtension(E_FaceAll) + proto.RegisterExtension(E_GostringAll) + proto.RegisterExtension(E_PopulateAll) + proto.RegisterExtension(E_StringerAll) + proto.RegisterExtension(E_OnlyoneAll) + proto.RegisterExtension(E_EqualAll) + proto.RegisterExtension(E_DescriptionAll) + proto.RegisterExtension(E_TestgenAll) + proto.RegisterExtension(E_BenchgenAll) + proto.RegisterExtension(E_MarshalerAll) + proto.RegisterExtension(E_UnmarshalerAll) + proto.RegisterExtension(E_StableMarshalerAll) + proto.RegisterExtension(E_SizerAll) + proto.RegisterExtension(E_GoprotoEnumStringerAll) + proto.RegisterExtension(E_EnumStringerAll) + proto.RegisterExtension(E_UnsafeMarshalerAll) + proto.RegisterExtension(E_UnsafeUnmarshalerAll) + proto.RegisterExtension(E_GoprotoExtensionsMapAll) + proto.RegisterExtension(E_GoprotoUnrecognizedAll) + proto.RegisterExtension(E_GogoprotoImport) + proto.RegisterExtension(E_ProtosizerAll) + proto.RegisterExtension(E_CompareAll) + proto.RegisterExtension(E_TypedeclAll) + proto.RegisterExtension(E_EnumdeclAll) + proto.RegisterExtension(E_GoprotoRegistration) + proto.RegisterExtension(E_MessagenameAll) + proto.RegisterExtension(E_GoprotoSizecacheAll) + proto.RegisterExtension(E_GoprotoUnkeyedAll) + proto.RegisterExtension(E_GoprotoGetters) + proto.RegisterExtension(E_GoprotoStringer) + proto.RegisterExtension(E_VerboseEqual) + proto.RegisterExtension(E_Face) + proto.RegisterExtension(E_Gostring) + proto.RegisterExtension(E_Populate) + proto.RegisterExtension(E_Stringer) + proto.RegisterExtension(E_Onlyone) + proto.RegisterExtension(E_Equal) + proto.RegisterExtension(E_Description) + proto.RegisterExtension(E_Testgen) + proto.RegisterExtension(E_Benchgen) + proto.RegisterExtension(E_Marshaler) + proto.RegisterExtension(E_Unmarshaler) + proto.RegisterExtension(E_StableMarshaler) + proto.RegisterExtension(E_Sizer) + proto.RegisterExtension(E_UnsafeMarshaler) + proto.RegisterExtension(E_UnsafeUnmarshaler) + proto.RegisterExtension(E_GoprotoExtensionsMap) + proto.RegisterExtension(E_GoprotoUnrecognized) + proto.RegisterExtension(E_Protosizer) + proto.RegisterExtension(E_Compare) + proto.RegisterExtension(E_Typedecl) + proto.RegisterExtension(E_Messagename) + proto.RegisterExtension(E_GoprotoSizecache) + proto.RegisterExtension(E_GoprotoUnkeyed) + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) + proto.RegisterExtension(E_Customname) + proto.RegisterExtension(E_Jsontag) + proto.RegisterExtension(E_Moretags) + proto.RegisterExtension(E_Casttype) + proto.RegisterExtension(E_Castkey) + proto.RegisterExtension(E_Castvalue) + proto.RegisterExtension(E_Stdtime) + proto.RegisterExtension(E_Stdduration) + proto.RegisterExtension(E_Wktpointer) +} + +func init() { proto.RegisterFile("gogo.proto", fileDescriptor_592445b5231bc2b9) } + +var fileDescriptor_592445b5231bc2b9 = []byte{ + // 1328 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45, + 0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9, + 0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x0e, 0xc6, 0x89, 0xc3, 0x76, 0x18, + 0xf5, 0xf4, 0x94, 0xdb, 0x8d, 0xbb, 0xbb, 0x9a, 0xee, 0xea, 0x10, 0xe7, 0x86, 0xc2, 0x22, 0x84, + 0xd8, 0x91, 0x20, 0x21, 0x09, 0x04, 0xc4, 0xbe, 0x86, 0x7d, 0xb9, 0x70, 0x61, 0xb9, 0xf2, 0x1f, + 0xb8, 0x00, 0x66, 0xf7, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x69, 0x8f, 0x54, 0x35, 0xb7, + 0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xef, 0x4d, 0x33, 0xe6, 0x49, 0x4f, 0x4e, 0xc6, + 0x89, 0x54, 0xb2, 0x5e, 0x83, 0xeb, 0xfc, 0x72, 0xdf, 0x7e, 0x4f, 0x4a, 0x2f, 0x10, 0x53, 0xf9, + 0x5f, 0xcd, 0x6c, 0x75, 0xaa, 0x25, 0x52, 0x37, 0xf1, 0x63, 0x25, 0x93, 0x62, 0x31, 0x3f, 0xc6, + 0xc6, 0x70, 0x71, 0x43, 0x44, 0x59, 0xd8, 0x88, 0x13, 0xb1, 0xea, 0x9f, 0xae, 0x5f, 0x3f, 0x59, + 0x90, 0x93, 0x44, 0x4e, 0xce, 0x47, 0x59, 0x78, 0x47, 0xac, 0x7c, 0x19, 0xa5, 0x7b, 0xaf, 0xfc, + 0x72, 0xf5, 0xfe, 0xab, 0x6e, 0xe9, 0x5f, 0x1e, 0x45, 0x14, 0xfe, 0xb7, 0x94, 0x83, 0x7c, 0x99, + 0x5d, 0xd3, 0xe1, 0x4b, 0x55, 0xe2, 0x47, 0x9e, 0x48, 0x0c, 0xc6, 0xef, 0xd1, 0x38, 0xa6, 0x19, + 0x8f, 0x23, 0xca, 0xe7, 0xd8, 0x50, 0x2f, 0xae, 0x1f, 0xd0, 0x35, 0x28, 0x74, 0xc9, 0x02, 0x1b, + 0xc9, 0x25, 0x6e, 0x96, 0x2a, 0x19, 0x46, 0x4e, 0x28, 0x0c, 0x9a, 0x1f, 0x73, 0x4d, 0x6d, 0x79, + 0x18, 0xb0, 0xb9, 0x92, 0xe2, 0x9c, 0xf5, 0xc3, 0x27, 0x2d, 0xe1, 0x06, 0x06, 0xc3, 0x4f, 0xb8, + 0x91, 0x72, 0x3d, 0x3f, 0xc9, 0xc6, 0xe1, 0xfa, 0x94, 0x13, 0x64, 0x42, 0xdf, 0xc9, 0x4d, 0x5d, + 0x3d, 0x27, 0x61, 0x19, 0xc9, 0x7e, 0x3e, 0xbb, 0x2b, 0xdf, 0xce, 0x58, 0x29, 0xd0, 0xf6, 0xa4, + 0x65, 0xd1, 0x13, 0x4a, 0x89, 0x24, 0x6d, 0x38, 0x41, 0xb7, 0xed, 0x1d, 0xf1, 0x83, 0xd2, 0x78, + 0x6e, 0xb3, 0x33, 0x8b, 0x0b, 0x05, 0x39, 0x1b, 0x04, 0x7c, 0x85, 0x5d, 0xdb, 0xe5, 0xa9, 0xb0, + 0x70, 0x9e, 0x47, 0xe7, 0xf8, 0x8e, 0x27, 0x03, 0xb4, 0x4b, 0x8c, 0x3e, 0x2f, 0x73, 0x69, 0xe1, + 0x7c, 0x19, 0x9d, 0x75, 0x64, 0x29, 0xa5, 0x60, 0xbc, 0x8d, 0x8d, 0x9e, 0x12, 0x49, 0x53, 0xa6, + 0xa2, 0x21, 0x1e, 0xc8, 0x9c, 0xc0, 0x42, 0x77, 0x01, 0x75, 0x23, 0x08, 0xce, 0x03, 0x07, 0xae, + 0x83, 0xac, 0x7f, 0xd5, 0x71, 0x85, 0x85, 0xe2, 0x22, 0x2a, 0xfa, 0x60, 0x3d, 0xa0, 0xb3, 0x6c, + 0xd0, 0x93, 0xc5, 0x2d, 0x59, 0xe0, 0x97, 0x10, 0x1f, 0x20, 0x06, 0x15, 0xb1, 0x8c, 0xb3, 0xc0, + 0x51, 0x36, 0x3b, 0x78, 0x85, 0x14, 0xc4, 0xa0, 0xa2, 0x87, 0xb0, 0xbe, 0x4a, 0x8a, 0x54, 0x8b, + 0xe7, 0x0c, 0x1b, 0x90, 0x51, 0xb0, 0x21, 0x23, 0x9b, 0x4d, 0x5c, 0x46, 0x03, 0x43, 0x04, 0x04, + 0xd3, 0xac, 0x66, 0x9b, 0x88, 0x37, 0x36, 0xe9, 0x78, 0x50, 0x06, 0x16, 0xd8, 0x08, 0x15, 0x28, + 0x5f, 0x46, 0x16, 0x8a, 0x37, 0x51, 0x31, 0xac, 0x61, 0x78, 0x1b, 0x4a, 0xa4, 0xca, 0x13, 0x36, + 0x92, 0xb7, 0xe8, 0x36, 0x10, 0xc1, 0x50, 0x36, 0x45, 0xe4, 0xae, 0xd9, 0x19, 0xde, 0xa6, 0x50, + 0x12, 0x03, 0x8a, 0x39, 0x36, 0x14, 0x3a, 0x49, 0xba, 0xe6, 0x04, 0x56, 0xe9, 0x78, 0x07, 0x1d, + 0x83, 0x25, 0x84, 0x11, 0xc9, 0xa2, 0x5e, 0x34, 0xef, 0x52, 0x44, 0x34, 0x0c, 0x8f, 0x5e, 0xaa, + 0x9c, 0x66, 0x20, 0x1a, 0xbd, 0xd8, 0xde, 0xa3, 0xa3, 0x57, 0xb0, 0x8b, 0xba, 0x71, 0x9a, 0xd5, + 0x52, 0xff, 0x8c, 0x95, 0xe6, 0x7d, 0xca, 0x74, 0x0e, 0x00, 0x7c, 0x0f, 0xbb, 0xae, 0x6b, 0x9b, + 0xb0, 0x90, 0x7d, 0x80, 0xb2, 0x89, 0x2e, 0xad, 0x02, 0x4b, 0x42, 0xaf, 0xca, 0x0f, 0xa9, 0x24, + 0x88, 0x8a, 0x6b, 0x89, 0x8d, 0x67, 0x51, 0xea, 0xac, 0xf6, 0x16, 0xb5, 0x8f, 0x28, 0x6a, 0x05, + 0xdb, 0x11, 0xb5, 0x13, 0x6c, 0x02, 0x8d, 0xbd, 0xe5, 0xf5, 0x63, 0x2a, 0xac, 0x05, 0xbd, 0xd2, + 0x99, 0xdd, 0xfb, 0xd8, 0xbe, 0x32, 0x9c, 0xa7, 0x95, 0x88, 0x52, 0x60, 0x1a, 0xa1, 0x13, 0x5b, + 0x98, 0xaf, 0xa0, 0x99, 0x2a, 0xfe, 0x7c, 0x29, 0x58, 0x74, 0x62, 0x90, 0xdf, 0xcd, 0xf6, 0x92, + 0x3c, 0x8b, 0x12, 0xe1, 0x4a, 0x2f, 0xf2, 0xcf, 0x88, 0x96, 0x85, 0xfa, 0x93, 0x4a, 0xaa, 0x56, + 0x34, 0x1c, 0xcc, 0x47, 0xd9, 0x9e, 0x72, 0x56, 0x69, 0xf8, 0x61, 0x2c, 0x13, 0x65, 0x30, 0x7e, + 0x4a, 0x99, 0x2a, 0xb9, 0xa3, 0x39, 0xc6, 0xe7, 0xd9, 0x70, 0xfe, 0xa7, 0xed, 0x23, 0xf9, 0x19, + 0x8a, 0x86, 0xda, 0x14, 0x16, 0x0e, 0x57, 0x86, 0xb1, 0x93, 0xd8, 0xd4, 0xbf, 0xcf, 0xa9, 0x70, + 0x20, 0x82, 0x85, 0x43, 0x6d, 0xc4, 0x02, 0xba, 0xbd, 0x85, 0xe1, 0x0b, 0x2a, 0x1c, 0xc4, 0xa0, + 0x82, 0x06, 0x06, 0x0b, 0xc5, 0x97, 0xa4, 0x20, 0x06, 0x14, 0x77, 0xb6, 0x1b, 0x6d, 0x22, 0x3c, + 0x3f, 0x55, 0x89, 0x03, 0xab, 0x0d, 0xaa, 0xaf, 0x36, 0x3b, 0x87, 0xb0, 0x65, 0x0d, 0x85, 0x4a, + 0x14, 0x8a, 0x34, 0x75, 0x3c, 0x01, 0x13, 0x87, 0xc5, 0xc6, 0xbe, 0xa6, 0x4a, 0xa4, 0x61, 0xb0, + 0x37, 0x6d, 0x42, 0x84, 0xb0, 0xbb, 0x8e, 0xbb, 0x66, 0xa3, 0xfb, 0xa6, 0xb2, 0xb9, 0xe3, 0xc4, + 0x82, 0x53, 0x9b, 0x7f, 0xb2, 0x68, 0x5d, 0x6c, 0x58, 0x3d, 0x9d, 0xdf, 0x56, 0xe6, 0x9f, 0x95, + 0x82, 0x2c, 0x6a, 0xc8, 0x48, 0x65, 0x9e, 0xaa, 0xdf, 0xb8, 0xc3, 0xb5, 0x58, 0xdc, 0x17, 0xe9, + 0x1e, 0xda, 0xc2, 0xfb, 0xed, 0x1c, 0xa7, 0xf8, 0xed, 0xf0, 0x90, 0x77, 0x0e, 0x3d, 0x66, 0xd9, + 0xd9, 0xad, 0xf2, 0x39, 0xef, 0x98, 0x79, 0xf8, 0x11, 0x36, 0xd4, 0x31, 0xf0, 0x98, 0x55, 0x0f, + 0xa3, 0x6a, 0x50, 0x9f, 0x77, 0xf8, 0x01, 0xb6, 0x0b, 0x86, 0x17, 0x33, 0xfe, 0x08, 0xe2, 0xf9, + 0x72, 0x7e, 0x88, 0xf5, 0xd3, 0xd0, 0x62, 0x46, 0x1f, 0x45, 0xb4, 0x44, 0x00, 0xa7, 0x81, 0xc5, + 0x8c, 0x3f, 0x46, 0x38, 0x21, 0x80, 0xdb, 0x87, 0xf0, 0xbb, 0x27, 0x76, 0x61, 0xd3, 0xa1, 0xd8, + 0x4d, 0xb3, 0x3e, 0x9c, 0x54, 0xcc, 0xf4, 0xe3, 0xf8, 0xe5, 0x44, 0xf0, 0x5b, 0xd9, 0x6e, 0xcb, + 0x80, 0x3f, 0x89, 0x68, 0xb1, 0x9e, 0xcf, 0xb1, 0x01, 0x6d, 0x3a, 0x31, 0xe3, 0x4f, 0x21, 0xae, + 0x53, 0xb0, 0x75, 0x9c, 0x4e, 0xcc, 0x82, 0xa7, 0x69, 0xeb, 0x48, 0x40, 0xd8, 0x68, 0x30, 0x31, + 0xd3, 0xcf, 0x50, 0xd4, 0x09, 0xe1, 0x33, 0xac, 0x56, 0x36, 0x1b, 0x33, 0xff, 0x2c, 0xf2, 0x6d, + 0x06, 0x22, 0xa0, 0x35, 0x3b, 0xb3, 0xe2, 0x39, 0x8a, 0x80, 0x46, 0xc1, 0x31, 0xaa, 0x0e, 0x30, + 0x66, 0xd3, 0xf3, 0x74, 0x8c, 0x2a, 0xf3, 0x0b, 0x64, 0x33, 0xaf, 0xf9, 0x66, 0xc5, 0x0b, 0x94, + 0xcd, 0x7c, 0x3d, 0x6c, 0xa3, 0x3a, 0x11, 0x98, 0x1d, 0x2f, 0xd2, 0x36, 0x2a, 0x03, 0x01, 0x5f, + 0x62, 0xf5, 0x9d, 0xd3, 0x80, 0xd9, 0xf7, 0x12, 0xfa, 0x46, 0x77, 0x0c, 0x03, 0xfc, 0x2e, 0x36, + 0xd1, 0x7d, 0x12, 0x30, 0x5b, 0xcf, 0x6d, 0x55, 0x7e, 0xbb, 0xe9, 0x83, 0x00, 0x3f, 0xd1, 0x6e, + 0x29, 0xfa, 0x14, 0x60, 0xd6, 0x9e, 0xdf, 0xea, 0x2c, 0xdc, 0xfa, 0x10, 0xc0, 0x67, 0x19, 0x6b, + 0x37, 0x60, 0xb3, 0xeb, 0x02, 0xba, 0x34, 0x08, 0x8e, 0x06, 0xf6, 0x5f, 0x33, 0x7f, 0x91, 0x8e, + 0x06, 0x12, 0x70, 0x34, 0xa8, 0xf5, 0x9a, 0xe9, 0x4b, 0x74, 0x34, 0x08, 0x81, 0x27, 0x5b, 0xeb, + 0x6e, 0x66, 0xc3, 0x65, 0x7a, 0xb2, 0x35, 0x8a, 0x1f, 0x63, 0xa3, 0x3b, 0x1a, 0xa2, 0x59, 0xf5, + 0x1a, 0xaa, 0xf6, 0x54, 0xfb, 0xa1, 0xde, 0xbc, 0xb0, 0x19, 0x9a, 0x6d, 0xaf, 0x57, 0x9a, 0x17, + 0xf6, 0x42, 0x3e, 0xcd, 0xfa, 0xa3, 0x2c, 0x08, 0xe0, 0xf0, 0xd4, 0x6f, 0xe8, 0xd2, 0x4d, 0x45, + 0xd0, 0x22, 0xc5, 0xaf, 0xdb, 0x18, 0x1d, 0x02, 0xf8, 0x01, 0xb6, 0x5b, 0x84, 0x4d, 0xd1, 0x32, + 0x91, 0xbf, 0x6d, 0x53, 0xc1, 0x84, 0xd5, 0x7c, 0x86, 0xb1, 0xe2, 0xd5, 0x08, 0x84, 0xd9, 0xc4, + 0xfe, 0xbe, 0x5d, 0xbc, 0xa5, 0xd1, 0x90, 0xb6, 0x20, 0x4f, 0x8a, 0x41, 0xb0, 0xd9, 0x29, 0xc8, + 0x33, 0x72, 0x90, 0xf5, 0xdd, 0x9f, 0xca, 0x48, 0x39, 0x9e, 0x89, 0xfe, 0x03, 0x69, 0x5a, 0x0f, + 0x01, 0x0b, 0x65, 0x22, 0x94, 0xe3, 0xa5, 0x26, 0xf6, 0x4f, 0x64, 0x4b, 0x00, 0x60, 0xd7, 0x49, + 0x95, 0xcd, 0x7d, 0xff, 0x45, 0x30, 0x01, 0xb0, 0x69, 0xb8, 0x5e, 0x17, 0x1b, 0x26, 0xf6, 0x6f, + 0xda, 0x34, 0xae, 0xe7, 0x87, 0x58, 0x0d, 0x2e, 0xf3, 0xb7, 0x4a, 0x26, 0xf8, 0x1f, 0x84, 0xdb, + 0x04, 0x7c, 0x73, 0xaa, 0x5a, 0xca, 0x37, 0x07, 0xfb, 0x5f, 0xcc, 0x34, 0xad, 0xe7, 0xb3, 0x6c, + 0x20, 0x55, 0xad, 0x56, 0x86, 0xf3, 0xa9, 0x01, 0xff, 0x6f, 0xbb, 0x7c, 0x65, 0x51, 0x32, 0x90, + 0xed, 0x07, 0xd7, 0x55, 0x2c, 0xfd, 0x48, 0x89, 0xc4, 0x64, 0xd8, 0x42, 0x83, 0x86, 0x1c, 0x9e, + 0x67, 0x63, 0xae, 0x0c, 0xab, 0xdc, 0x61, 0xb6, 0x20, 0x17, 0xe4, 0x52, 0x5e, 0x67, 0xee, 0xbd, + 0xd9, 0xf3, 0xd5, 0x5a, 0xd6, 0x9c, 0x74, 0x65, 0x38, 0x05, 0xbf, 0x3c, 0xda, 0x2f, 0x54, 0xcb, + 0xdf, 0x21, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x70, 0x4e, 0x83, 0x15, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden new file mode 100644 index 000000000..f6502e4b9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden @@ -0,0 +1,45 @@ +// Code generated by protoc-gen-go. +// source: gogo.proto +// DO NOT EDIT! + +package gogoproto + +import proto "github.com/gogo/protobuf/proto" +import json "encoding/json" +import math "math" +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +// Reference proto, json, and math imports to suppress error if they are not otherwise used. +var _ = proto.Marshal +var _ = &json.SyntaxError{} +var _ = math.Inf + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51235, + Name: "gogoproto.nullable", + Tag: "varint,51235,opt,name=nullable", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51236, + Name: "gogoproto.embed", + Tag: "varint,51236,opt,name=embed", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 51237, + Name: "gogoproto.customtype", + Tag: "bytes,51237,opt,name=customtype", +} + +func init() { + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto new file mode 100644 index 000000000..b80c85653 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto @@ -0,0 +1,144 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; +package gogoproto; + +import "google/protobuf/descriptor.proto"; + +option java_package = "com.google.protobuf"; +option java_outer_classname = "GoGoProtos"; +option go_package = "github.com/gogo/protobuf/gogoproto"; + +extend google.protobuf.EnumOptions { + optional bool goproto_enum_prefix = 62001; + optional bool goproto_enum_stringer = 62021; + optional bool enum_stringer = 62022; + optional string enum_customname = 62023; + optional bool enumdecl = 62024; +} + +extend google.protobuf.EnumValueOptions { + optional string enumvalue_customname = 66001; +} + +extend google.protobuf.FileOptions { + optional bool goproto_getters_all = 63001; + optional bool goproto_enum_prefix_all = 63002; + optional bool goproto_stringer_all = 63003; + optional bool verbose_equal_all = 63004; + optional bool face_all = 63005; + optional bool gostring_all = 63006; + optional bool populate_all = 63007; + optional bool stringer_all = 63008; + optional bool onlyone_all = 63009; + + optional bool equal_all = 63013; + optional bool description_all = 63014; + optional bool testgen_all = 63015; + optional bool benchgen_all = 63016; + optional bool marshaler_all = 63017; + optional bool unmarshaler_all = 63018; + optional bool stable_marshaler_all = 63019; + + optional bool sizer_all = 63020; + + optional bool goproto_enum_stringer_all = 63021; + optional bool enum_stringer_all = 63022; + + optional bool unsafe_marshaler_all = 63023; + optional bool unsafe_unmarshaler_all = 63024; + + optional bool goproto_extensions_map_all = 63025; + optional bool goproto_unrecognized_all = 63026; + optional bool gogoproto_import = 63027; + optional bool protosizer_all = 63028; + optional bool compare_all = 63029; + optional bool typedecl_all = 63030; + optional bool enumdecl_all = 63031; + + optional bool goproto_registration = 63032; + optional bool messagename_all = 63033; + + optional bool goproto_sizecache_all = 63034; + optional bool goproto_unkeyed_all = 63035; +} + +extend google.protobuf.MessageOptions { + optional bool goproto_getters = 64001; + optional bool goproto_stringer = 64003; + optional bool verbose_equal = 64004; + optional bool face = 64005; + optional bool gostring = 64006; + optional bool populate = 64007; + optional bool stringer = 67008; + optional bool onlyone = 64009; + + optional bool equal = 64013; + optional bool description = 64014; + optional bool testgen = 64015; + optional bool benchgen = 64016; + optional bool marshaler = 64017; + optional bool unmarshaler = 64018; + optional bool stable_marshaler = 64019; + + optional bool sizer = 64020; + + optional bool unsafe_marshaler = 64023; + optional bool unsafe_unmarshaler = 64024; + + optional bool goproto_extensions_map = 64025; + optional bool goproto_unrecognized = 64026; + + optional bool protosizer = 64028; + optional bool compare = 64029; + + optional bool typedecl = 64030; + + optional bool messagename = 64033; + + optional bool goproto_sizecache = 64034; + optional bool goproto_unkeyed = 64035; +} + +extend google.protobuf.FieldOptions { + optional bool nullable = 65001; + optional bool embed = 65002; + optional string customtype = 65003; + optional string customname = 65004; + optional string jsontag = 65005; + optional string moretags = 65006; + optional string casttype = 65007; + optional string castkey = 65008; + optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; + optional bool wktpointer = 65012; + +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go new file mode 100644 index 000000000..390d4e4be --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/helper.go @@ -0,0 +1,415 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gogoproto + +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +import proto "github.com/gogo/protobuf/proto" + +func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Embed, false) +} + +func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Nullable, true) +} + +func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdtime, false) +} + +func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdduration, false) +} + +func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue" +} + +func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue" +} + +func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value" +} + +func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value" +} + +func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value" +} + +func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value" +} + +func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue" +} + +func IsStdString(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue" +} + +func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue" +} + +func IsStdType(field *google_protobuf.FieldDescriptorProto) bool { + return (IsStdTime(field) || IsStdDuration(field) || + IsStdDouble(field) || IsStdFloat(field) || + IsStdInt64(field) || IsStdUInt64(field) || + IsStdInt32(field) || IsStdUInt32(field) || + IsStdBool(field) || + IsStdString(field) || IsStdBytes(field)) +} + +func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) +} + +func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { + nullable := IsNullable(field) + if field.IsMessage() || IsCustomType(field) { + return nullable + } + if proto3 { + return false + } + return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES +} + +func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCustomType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastKey(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastValue(field) + if len(typ) > 0 { + return true + } + return false +} + +func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true)) +} + +func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true)) +} + +func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customtype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Casttype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castkey) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castvalue) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool { + name := GetCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool { + name := GetEnumCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool { + name := GetEnumValueCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Jsontag) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Moretags) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool + +func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true)) +} + +func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true)) +} + +func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true)) +} + +func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false)) +} + +func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false)) +} + +func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false)) +} + +func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false)) +} + +func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false)) +} + +func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false)) +} + +func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false)) +} + +func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false)) +} + +func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false)) +} + +func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false)) +} + +func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false)) +} + +func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false)) +} + +func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false)) +} + +func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false)) +} + +func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false)) +} + +func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true)) +} + +func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false)) +} + +func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false)) +} + +func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false)) +} + +func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true)) +} + +func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true)) +} + +func IsProto3(file *google_protobuf.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true) +} + +func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false)) +} + +func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) +} + +func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false)) +} + +func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true)) +} + +func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true)) +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile new file mode 100644 index 000000000..3496dc99d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile @@ -0,0 +1,36 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + go install github.com/gogo/protobuf/protoc-gen-gostring + protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto + protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go new file mode 100644 index 000000000..a85bf1984 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go @@ -0,0 +1,118 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package descriptor provides functions for obtaining protocol buffer +// descriptors for generated Go types. +// +// These functions cannot go in package proto because they depend on the +// generated protobuf descriptor messages, which themselves depend on proto. +package descriptor + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/gogo/protobuf/proto" +) + +// extractFile extracts a FileDescriptorProto from a gzip'd buffer. +func extractFile(gz []byte) (*FileDescriptorProto, error) { + r, err := gzip.NewReader(bytes.NewReader(gz)) + if err != nil { + return nil, fmt.Errorf("failed to open gzip reader: %v", err) + } + defer r.Close() + + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) + } + + fd := new(FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) + } + + return fd, nil +} + +// Message is a proto.Message with a method to return its descriptor. +// +// Message types generated by the protocol compiler always satisfy +// the Message interface. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it +// describing the given message. +func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) { + gz, path := msg.Descriptor() + fd, err := extractFile(gz) + if err != nil { + panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) + } + + md = fd.MessageType[path[0]] + for _, i := range path[1:] { + md = md.NestedType[i] + } + return fd, md +} + +// Is this field a scalar numeric type? +func (field *FieldDescriptorProto) IsScalar() bool { + if field.Type == nil { + return false + } + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE, + FieldDescriptorProto_TYPE_FLOAT, + FieldDescriptorProto_TYPE_INT64, + FieldDescriptorProto_TYPE_UINT64, + FieldDescriptorProto_TYPE_INT32, + FieldDescriptorProto_TYPE_FIXED64, + FieldDescriptorProto_TYPE_FIXED32, + FieldDescriptorProto_TYPE_BOOL, + FieldDescriptorProto_TYPE_UINT32, + FieldDescriptorProto_TYPE_ENUM, + FieldDescriptorProto_TYPE_SFIXED32, + FieldDescriptorProto_TYPE_SFIXED64, + FieldDescriptorProto_TYPE_SINT32, + FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go new file mode 100644 index 000000000..18b2a3318 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -0,0 +1,2865 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} + +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} + +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} + +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} + +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} + +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} + +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} + +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} + +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} + +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} + +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} + +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} + +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} + +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} + +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} + +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} + +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} + +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} + +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} + +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} + +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} + +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} + +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{0} +} +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(m, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{1} +} +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(m, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2} +} +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (m *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(m, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 0} +} +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 1} +} +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) +} +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{3} +} + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4} +} +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(m, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{5} +} +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(m, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6} +} +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(m, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6, 0} +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{7} +} +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{8} +} +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{9} +} +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(m, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10} +} + +var extRange_FileOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (m *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(m, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetPhpMetadataNamespace() string { + if m != nil && m.PhpMetadataNamespace != nil { + return *m.PhpMetadataNamespace + } + return "" +} + +func (m *FileOptions) GetRubyPackage() string { + if m != nil && m.RubyPackage != nil { + return *m.RubyPackage + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{11} +} + +var extRange_MessageOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (m *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(m, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12} +} + +var extRange_FieldOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (m *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(m, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{13} +} + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (m *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(m, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{14} +} + +var extRange_EnumOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (m *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(m, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{15} +} + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (m *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(m, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{16} +} + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (m *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(m, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17} +} + +var extRange_MethodOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (m *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(m, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18} +} +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(m, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18, 0} +} +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19} +} +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(m, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19, 0} +} +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20} +} +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20, 0} +} +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") +} + +func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_308767df5ffe18af) } + +var fileDescriptor_308767df5ffe18af = []byte{ + // 2522 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, + 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0x65, 0x7a, 0xec, 0x75, 0x18, 0xef, 0x47, 0x1c, 0xed, 0x66, + 0xe3, 0x24, 0xbb, 0xca, 0xc2, 0x49, 0x9c, 0xac, 0x53, 0x6c, 0x2b, 0x4b, 0x8c, 0x57, 0xa9, 0xbe, + 0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, + 0x83, 0x1e, 0x02, 0xf4, 0x54, 0xa0, 0x7f, 0x40, 0x51, 0x14, 0x3d, 0xf4, 0xb2, 0x40, 0xff, 0x80, + 0x02, 0xed, 0xbd, 0xd7, 0x02, 0xbd, 0xf7, 0x50, 0xa0, 0x05, 0xda, 0x3f, 0xa1, 0xc7, 0x62, 0x66, + 0x48, 0x8a, 0xd4, 0x47, 0xe2, 0x5d, 0x20, 0xd9, 0x93, 0x3d, 0xef, 0xfd, 0xde, 0x9b, 0x37, 0x8f, + 0xbf, 0x79, 0xf3, 0x66, 0x04, 0x82, 0x46, 0x5c, 0xd5, 0xd1, 0x6d, 0xcf, 0x72, 0x2a, 0xb6, 0x63, + 0x79, 0x16, 0x5a, 0x1b, 0x5a, 0xd6, 0xd0, 0x20, 0x7c, 0x74, 0x32, 0x19, 0x94, 0x5b, 0xb0, 0x7e, + 0x4f, 0x37, 0x48, 0x3d, 0x04, 0xf6, 0x88, 0x87, 0xee, 0x40, 0x7a, 0xa0, 0x1b, 0x44, 0x4c, 0xec, + 0xa4, 0x76, 0x0b, 0x7b, 0x1f, 0x56, 0x66, 0x8c, 0x2a, 0x71, 0x8b, 0x2e, 0x15, 0xcb, 0xcc, 0xa2, + 0xfc, 0xef, 0x34, 0x6c, 0x2c, 0xd0, 0x22, 0x04, 0x69, 0x13, 0x8f, 0xa9, 0xc7, 0xc4, 0x6e, 0x5e, + 0x66, 0xff, 0x23, 0x11, 0x56, 0x6c, 0xac, 0x3e, 0xc6, 0x43, 0x22, 0x26, 0x99, 0x38, 0x18, 0xa2, + 0xf7, 0x01, 0x34, 0x62, 0x13, 0x53, 0x23, 0xa6, 0x7a, 0x2a, 0xa6, 0x76, 0x52, 0xbb, 0x79, 0x39, + 0x22, 0x41, 0xd7, 0x60, 0xdd, 0x9e, 0x9c, 0x18, 0xba, 0xaa, 0x44, 0x60, 0xb0, 0x93, 0xda, 0xcd, + 0xc8, 0x02, 0x57, 0xd4, 0xa7, 0xe0, 0xcb, 0xb0, 0xf6, 0x94, 0xe0, 0xc7, 0x51, 0x68, 0x81, 0x41, + 0x4b, 0x54, 0x1c, 0x01, 0xd6, 0xa0, 0x38, 0x26, 0xae, 0x8b, 0x87, 0x44, 0xf1, 0x4e, 0x6d, 0x22, + 0xa6, 0xd9, 0xea, 0x77, 0xe6, 0x56, 0x3f, 0xbb, 0xf2, 0x82, 0x6f, 0xd5, 0x3f, 0xb5, 0x09, 0xaa, + 0x42, 0x9e, 0x98, 0x93, 0x31, 0xf7, 0x90, 0x59, 0x92, 0x3f, 0xc9, 0x9c, 0x8c, 0x67, 0xbd, 0xe4, + 0xa8, 0x99, 0xef, 0x62, 0xc5, 0x25, 0xce, 0x13, 0x5d, 0x25, 0x62, 0x96, 0x39, 0xb8, 0x3c, 0xe7, + 0xa0, 0xc7, 0xf5, 0xb3, 0x3e, 0x02, 0x3b, 0x54, 0x83, 0x3c, 0x79, 0xe6, 0x11, 0xd3, 0xd5, 0x2d, + 0x53, 0x5c, 0x61, 0x4e, 0x2e, 0x2d, 0xf8, 0x8a, 0xc4, 0xd0, 0x66, 0x5d, 0x4c, 0xed, 0xd0, 0x3e, + 0xac, 0x58, 0xb6, 0xa7, 0x5b, 0xa6, 0x2b, 0xe6, 0x76, 0x12, 0xbb, 0x85, 0xbd, 0x77, 0x17, 0x12, + 0xa1, 0xc3, 0x31, 0x72, 0x00, 0x46, 0x0d, 0x10, 0x5c, 0x6b, 0xe2, 0xa8, 0x44, 0x51, 0x2d, 0x8d, + 0x28, 0xba, 0x39, 0xb0, 0xc4, 0x3c, 0x73, 0x70, 0x61, 0x7e, 0x21, 0x0c, 0x58, 0xb3, 0x34, 0xd2, + 0x30, 0x07, 0x96, 0x5c, 0x72, 0x63, 0x63, 0xb4, 0x05, 0x59, 0xf7, 0xd4, 0xf4, 0xf0, 0x33, 0xb1, + 0xc8, 0x18, 0xe2, 0x8f, 0xca, 0x7f, 0xce, 0xc2, 0xda, 0x59, 0x28, 0x76, 0x17, 0x32, 0x03, 0xba, + 0x4a, 0x31, 0xf9, 0x6d, 0x72, 0xc0, 0x6d, 0xe2, 0x49, 0xcc, 0x7e, 0xc7, 0x24, 0x56, 0xa1, 0x60, + 0x12, 0xd7, 0x23, 0x1a, 0x67, 0x44, 0xea, 0x8c, 0x9c, 0x02, 0x6e, 0x34, 0x4f, 0xa9, 0xf4, 0x77, + 0xa2, 0xd4, 0x03, 0x58, 0x0b, 0x43, 0x52, 0x1c, 0x6c, 0x0e, 0x03, 0x6e, 0x5e, 0x7f, 0x55, 0x24, + 0x15, 0x29, 0xb0, 0x93, 0xa9, 0x99, 0x5c, 0x22, 0xb1, 0x31, 0xaa, 0x03, 0x58, 0x26, 0xb1, 0x06, + 0x8a, 0x46, 0x54, 0x43, 0xcc, 0x2d, 0xc9, 0x52, 0x87, 0x42, 0xe6, 0xb2, 0x64, 0x71, 0xa9, 0x6a, + 0xa0, 0xcf, 0xa6, 0x54, 0x5b, 0x59, 0xc2, 0x94, 0x16, 0xdf, 0x64, 0x73, 0x6c, 0x3b, 0x86, 0x92, + 0x43, 0x28, 0xef, 0x89, 0xe6, 0xaf, 0x2c, 0xcf, 0x82, 0xa8, 0xbc, 0x72, 0x65, 0xb2, 0x6f, 0xc6, + 0x17, 0xb6, 0xea, 0x44, 0x87, 0xe8, 0x03, 0x08, 0x05, 0x0a, 0xa3, 0x15, 0xb0, 0x2a, 0x54, 0x0c, + 0x84, 0x6d, 0x3c, 0x26, 0xdb, 0xcf, 0xa1, 0x14, 0x4f, 0x0f, 0xda, 0x84, 0x8c, 0xeb, 0x61, 0xc7, + 0x63, 0x2c, 0xcc, 0xc8, 0x7c, 0x80, 0x04, 0x48, 0x11, 0x53, 0x63, 0x55, 0x2e, 0x23, 0xd3, 0x7f, + 0xd1, 0x8f, 0xa6, 0x0b, 0x4e, 0xb1, 0x05, 0x7f, 0x34, 0xff, 0x45, 0x63, 0x9e, 0x67, 0xd7, 0xbd, + 0x7d, 0x1b, 0x56, 0x63, 0x0b, 0x38, 0xeb, 0xd4, 0xe5, 0x5f, 0xc0, 0xdb, 0x0b, 0x5d, 0xa3, 0x07, + 0xb0, 0x39, 0x31, 0x75, 0xd3, 0x23, 0x8e, 0xed, 0x10, 0xca, 0x58, 0x3e, 0x95, 0xf8, 0x9f, 0x95, + 0x25, 0x9c, 0x3b, 0x8e, 0xa2, 0xb9, 0x17, 0x79, 0x63, 0x32, 0x2f, 0xbc, 0x9a, 0xcf, 0xfd, 0x77, + 0x45, 0x78, 0xf1, 0xe2, 0xc5, 0x8b, 0x64, 0xf9, 0x37, 0x59, 0xd8, 0x5c, 0xb4, 0x67, 0x16, 0x6e, + 0xdf, 0x2d, 0xc8, 0x9a, 0x93, 0xf1, 0x09, 0x71, 0x58, 0x92, 0x32, 0xb2, 0x3f, 0x42, 0x55, 0xc8, + 0x18, 0xf8, 0x84, 0x18, 0x62, 0x7a, 0x27, 0xb1, 0x5b, 0xda, 0xbb, 0x76, 0xa6, 0x5d, 0x59, 0x69, + 0x52, 0x13, 0x99, 0x5b, 0xa2, 0xcf, 0x21, 0xed, 0x97, 0x68, 0xea, 0xe1, 0xea, 0xd9, 0x3c, 0xd0, + 0xbd, 0x24, 0x33, 0x3b, 0xf4, 0x0e, 0xe4, 0xe9, 0x5f, 0xce, 0x8d, 0x2c, 0x8b, 0x39, 0x47, 0x05, + 0x94, 0x17, 0x68, 0x1b, 0x72, 0x6c, 0x9b, 0x68, 0x24, 0x38, 0xda, 0xc2, 0x31, 0x25, 0x96, 0x46, + 0x06, 0x78, 0x62, 0x78, 0xca, 0x13, 0x6c, 0x4c, 0x08, 0x23, 0x7c, 0x5e, 0x2e, 0xfa, 0xc2, 0x9f, + 0x52, 0x19, 0xba, 0x00, 0x05, 0xbe, 0xab, 0x74, 0x53, 0x23, 0xcf, 0x58, 0xf5, 0xcc, 0xc8, 0x7c, + 0xa3, 0x35, 0xa8, 0x84, 0x4e, 0xff, 0xc8, 0xb5, 0xcc, 0x80, 0x9a, 0x6c, 0x0a, 0x2a, 0x60, 0xd3, + 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x5e, 0xde, 0x2c, 0xa7, 0xca, 0x7f, 0x4a, 0x42, 0x9a, 0xd5, + 0x8b, 0x35, 0x28, 0xf4, 0x1f, 0x76, 0x25, 0xa5, 0xde, 0x39, 0x3e, 0x6c, 0x4a, 0x42, 0x02, 0x95, + 0x00, 0x98, 0xe0, 0x5e, 0xb3, 0x53, 0xed, 0x0b, 0xc9, 0x70, 0xdc, 0x68, 0xf7, 0xf7, 0x6f, 0x0a, + 0xa9, 0xd0, 0xe0, 0x98, 0x0b, 0xd2, 0x51, 0xc0, 0x8d, 0x3d, 0x21, 0x83, 0x04, 0x28, 0x72, 0x07, + 0x8d, 0x07, 0x52, 0x7d, 0xff, 0xa6, 0x90, 0x8d, 0x4b, 0x6e, 0xec, 0x09, 0x2b, 0x68, 0x15, 0xf2, + 0x4c, 0x72, 0xd8, 0xe9, 0x34, 0x85, 0x5c, 0xe8, 0xb3, 0xd7, 0x97, 0x1b, 0xed, 0x23, 0x21, 0x1f, + 0xfa, 0x3c, 0x92, 0x3b, 0xc7, 0x5d, 0x01, 0x42, 0x0f, 0x2d, 0xa9, 0xd7, 0xab, 0x1e, 0x49, 0x42, + 0x21, 0x44, 0x1c, 0x3e, 0xec, 0x4b, 0x3d, 0xa1, 0x18, 0x0b, 0xeb, 0xc6, 0x9e, 0xb0, 0x1a, 0x4e, + 0x21, 0xb5, 0x8f, 0x5b, 0x42, 0x09, 0xad, 0xc3, 0x2a, 0x9f, 0x22, 0x08, 0x62, 0x6d, 0x46, 0xb4, + 0x7f, 0x53, 0x10, 0xa6, 0x81, 0x70, 0x2f, 0xeb, 0x31, 0xc1, 0xfe, 0x4d, 0x01, 0x95, 0x6b, 0x90, + 0x61, 0xec, 0x42, 0x08, 0x4a, 0xcd, 0xea, 0xa1, 0xd4, 0x54, 0x3a, 0xdd, 0x7e, 0xa3, 0xd3, 0xae, + 0x36, 0x85, 0xc4, 0x54, 0x26, 0x4b, 0x3f, 0x39, 0x6e, 0xc8, 0x52, 0x5d, 0x48, 0x46, 0x65, 0x5d, + 0xa9, 0xda, 0x97, 0xea, 0x42, 0xaa, 0xac, 0xc2, 0xe6, 0xa2, 0x3a, 0xb9, 0x70, 0x67, 0x44, 0x3e, + 0x71, 0x72, 0xc9, 0x27, 0x66, 0xbe, 0xe6, 0x3e, 0xf1, 0xbf, 0x92, 0xb0, 0xb1, 0xe0, 0xac, 0x58, + 0x38, 0xc9, 0x0f, 0x21, 0xc3, 0x29, 0xca, 0x4f, 0xcf, 0x2b, 0x0b, 0x0f, 0x1d, 0x46, 0xd8, 0xb9, + 0x13, 0x94, 0xd9, 0x45, 0x3b, 0x88, 0xd4, 0x92, 0x0e, 0x82, 0xba, 0x98, 0xab, 0xe9, 0x3f, 0x9f, + 0xab, 0xe9, 0xfc, 0xd8, 0xdb, 0x3f, 0xcb, 0xb1, 0xc7, 0x64, 0xdf, 0xae, 0xb6, 0x67, 0x16, 0xd4, + 0xf6, 0xbb, 0xb0, 0x3e, 0xe7, 0xe8, 0xcc, 0x35, 0xf6, 0x97, 0x09, 0x10, 0x97, 0x25, 0xe7, 0x15, + 0x95, 0x2e, 0x19, 0xab, 0x74, 0x77, 0x67, 0x33, 0x78, 0x71, 0xf9, 0x47, 0x98, 0xfb, 0xd6, 0xdf, + 0x24, 0x60, 0x6b, 0x71, 0xa7, 0xb8, 0x30, 0x86, 0xcf, 0x21, 0x3b, 0x26, 0xde, 0xc8, 0x0a, 0xba, + 0xa5, 0x8f, 0x16, 0x9c, 0xc1, 0x54, 0x3d, 0xfb, 0xb1, 0x7d, 0xab, 0xe8, 0x21, 0x9e, 0x5a, 0xd6, + 0xee, 0xf1, 0x68, 0xe6, 0x22, 0xfd, 0x55, 0x12, 0xde, 0x5e, 0xe8, 0x7c, 0x61, 0xa0, 0xef, 0x01, + 0xe8, 0xa6, 0x3d, 0xf1, 0x78, 0x47, 0xc4, 0x0b, 0x6c, 0x9e, 0x49, 0x58, 0xf1, 0xa2, 0xc5, 0x73, + 0xe2, 0x85, 0xfa, 0x14, 0xd3, 0x03, 0x17, 0x31, 0xc0, 0x9d, 0x69, 0xa0, 0x69, 0x16, 0xe8, 0xfb, + 0x4b, 0x56, 0x3a, 0x47, 0xcc, 0x4f, 0x41, 0x50, 0x0d, 0x9d, 0x98, 0x9e, 0xe2, 0x7a, 0x0e, 0xc1, + 0x63, 0xdd, 0x1c, 0xb2, 0x13, 0x24, 0x77, 0x90, 0x19, 0x60, 0xc3, 0x25, 0xf2, 0x1a, 0x57, 0xf7, + 0x02, 0x2d, 0xb5, 0x60, 0x04, 0x72, 0x22, 0x16, 0xd9, 0x98, 0x05, 0x57, 0x87, 0x16, 0xe5, 0x5f, + 0xe7, 0xa1, 0x10, 0xe9, 0xab, 0xd1, 0x45, 0x28, 0x3e, 0xc2, 0x4f, 0xb0, 0x12, 0xdc, 0x95, 0x78, + 0x26, 0x0a, 0x54, 0xd6, 0xf5, 0xef, 0x4b, 0x9f, 0xc2, 0x26, 0x83, 0x58, 0x13, 0x8f, 0x38, 0x8a, + 0x6a, 0x60, 0xd7, 0x65, 0x49, 0xcb, 0x31, 0x28, 0xa2, 0xba, 0x0e, 0x55, 0xd5, 0x02, 0x0d, 0xba, + 0x05, 0x1b, 0xcc, 0x62, 0x3c, 0x31, 0x3c, 0xdd, 0x36, 0x88, 0x42, 0x6f, 0x6f, 0x2e, 0x3b, 0x49, + 0xc2, 0xc8, 0xd6, 0x29, 0xa2, 0xe5, 0x03, 0x68, 0x44, 0x2e, 0xaa, 0xc3, 0x7b, 0xcc, 0x6c, 0x48, + 0x4c, 0xe2, 0x60, 0x8f, 0x28, 0xe4, 0xeb, 0x09, 0x36, 0x5c, 0x05, 0x9b, 0x9a, 0x32, 0xc2, 0xee, + 0x48, 0xdc, 0xa4, 0x0e, 0x0e, 0x93, 0x62, 0x42, 0x3e, 0x4f, 0x81, 0x47, 0x3e, 0x4e, 0x62, 0xb0, + 0xaa, 0xa9, 0x7d, 0x81, 0xdd, 0x11, 0x3a, 0x80, 0x2d, 0xe6, 0xc5, 0xf5, 0x1c, 0xdd, 0x1c, 0x2a, + 0xea, 0x88, 0xa8, 0x8f, 0x95, 0x89, 0x37, 0xb8, 0x23, 0xbe, 0x13, 0x9d, 0x9f, 0x45, 0xd8, 0x63, + 0x98, 0x1a, 0x85, 0x1c, 0x7b, 0x83, 0x3b, 0xa8, 0x07, 0x45, 0xfa, 0x31, 0xc6, 0xfa, 0x73, 0xa2, + 0x0c, 0x2c, 0x87, 0x1d, 0x8d, 0xa5, 0x05, 0xa5, 0x29, 0x92, 0xc1, 0x4a, 0xc7, 0x37, 0x68, 0x59, + 0x1a, 0x39, 0xc8, 0xf4, 0xba, 0x92, 0x54, 0x97, 0x0b, 0x81, 0x97, 0x7b, 0x96, 0x43, 0x09, 0x35, + 0xb4, 0xc2, 0x04, 0x17, 0x38, 0xa1, 0x86, 0x56, 0x90, 0xde, 0x5b, 0xb0, 0xa1, 0xaa, 0x7c, 0xcd, + 0xba, 0xaa, 0xf8, 0x77, 0x2c, 0x57, 0x14, 0x62, 0xc9, 0x52, 0xd5, 0x23, 0x0e, 0xf0, 0x39, 0xee, + 0xa2, 0xcf, 0xe0, 0xed, 0x69, 0xb2, 0xa2, 0x86, 0xeb, 0x73, 0xab, 0x9c, 0x35, 0xbd, 0x05, 0x1b, + 0xf6, 0xe9, 0xbc, 0x21, 0x8a, 0xcd, 0x68, 0x9f, 0xce, 0x9a, 0xdd, 0x86, 0x4d, 0x7b, 0x64, 0xcf, + 0xdb, 0x5d, 0x8d, 0xda, 0x21, 0x7b, 0x64, 0xcf, 0x1a, 0x5e, 0x62, 0x17, 0x6e, 0x87, 0xa8, 0xd8, + 0x23, 0x9a, 0x78, 0x2e, 0x0a, 0x8f, 0x28, 0xd0, 0x75, 0x10, 0x54, 0x55, 0x21, 0x26, 0x3e, 0x31, + 0x88, 0x82, 0x1d, 0x62, 0x62, 0x57, 0xbc, 0x10, 0x05, 0x97, 0x54, 0x55, 0x62, 0xda, 0x2a, 0x53, + 0xa2, 0xab, 0xb0, 0x6e, 0x9d, 0x3c, 0x52, 0x39, 0x25, 0x15, 0xdb, 0x21, 0x03, 0xfd, 0x99, 0xf8, + 0x21, 0xcb, 0xef, 0x1a, 0x55, 0x30, 0x42, 0x76, 0x99, 0x18, 0x5d, 0x01, 0x41, 0x75, 0x47, 0xd8, + 0xb1, 0x59, 0x4d, 0x76, 0x6d, 0xac, 0x12, 0xf1, 0x12, 0x87, 0x72, 0x79, 0x3b, 0x10, 0xd3, 0x2d, + 0xe1, 0x3e, 0xd5, 0x07, 0x5e, 0xe0, 0xf1, 0x32, 0xdf, 0x12, 0x4c, 0xe6, 0x7b, 0xdb, 0x05, 0x81, + 0xa6, 0x22, 0x36, 0xf1, 0x2e, 0x83, 0x95, 0xec, 0x91, 0x1d, 0x9d, 0xf7, 0x03, 0x58, 0xa5, 0xc8, + 0xe9, 0xa4, 0x57, 0x78, 0x43, 0x66, 0x8f, 0x22, 0x33, 0xde, 0x84, 0x2d, 0x0a, 0x1a, 0x13, 0x0f, + 0x6b, 0xd8, 0xc3, 0x11, 0xf4, 0xc7, 0x0c, 0x4d, 0xf3, 0xde, 0xf2, 0x95, 0xb1, 0x38, 0x9d, 0xc9, + 0xc9, 0x69, 0xc8, 0xac, 0x4f, 0x78, 0x9c, 0x54, 0x16, 0x70, 0xeb, 0xb5, 0x35, 0xdd, 0xe5, 0x03, + 0x28, 0x46, 0x89, 0x8f, 0xf2, 0xc0, 0xa9, 0x2f, 0x24, 0x68, 0x17, 0x54, 0xeb, 0xd4, 0x69, 0xff, + 0xf2, 0x95, 0x24, 0x24, 0x69, 0x1f, 0xd5, 0x6c, 0xf4, 0x25, 0x45, 0x3e, 0x6e, 0xf7, 0x1b, 0x2d, + 0x49, 0x48, 0x45, 0x1b, 0xf6, 0xbf, 0x26, 0xa1, 0x14, 0xbf, 0x7b, 0xa1, 0x1f, 0xc0, 0xb9, 0xe0, + 0xa1, 0xc4, 0x25, 0x9e, 0xf2, 0x54, 0x77, 0xd8, 0x5e, 0x1c, 0x63, 0x7e, 0x2e, 0x86, 0x6c, 0xd8, + 0xf4, 0x51, 0x3d, 0xe2, 0x7d, 0xa9, 0x3b, 0x74, 0xa7, 0x8d, 0xb1, 0x87, 0x9a, 0x70, 0xc1, 0xb4, + 0x14, 0xd7, 0xc3, 0xa6, 0x86, 0x1d, 0x4d, 0x99, 0x3e, 0x51, 0x29, 0x58, 0x55, 0x89, 0xeb, 0x5a, + 0xfc, 0x0c, 0x0c, 0xbd, 0xbc, 0x6b, 0x5a, 0x3d, 0x1f, 0x3c, 0x3d, 0x1c, 0xaa, 0x3e, 0x74, 0x86, + 0xb9, 0xa9, 0x65, 0xcc, 0x7d, 0x07, 0xf2, 0x63, 0x6c, 0x2b, 0xc4, 0xf4, 0x9c, 0x53, 0xd6, 0x71, + 0xe7, 0xe4, 0xdc, 0x18, 0xdb, 0x12, 0x1d, 0xbf, 0x99, 0x8b, 0xcf, 0x3f, 0x52, 0x50, 0x8c, 0x76, + 0xdd, 0xf4, 0x12, 0xa3, 0xb2, 0x03, 0x2a, 0xc1, 0x4a, 0xd8, 0x07, 0x2f, 0xed, 0xd1, 0x2b, 0x35, + 0x7a, 0x72, 0x1d, 0x64, 0x79, 0x2f, 0x2c, 0x73, 0x4b, 0xda, 0x35, 0x50, 0x6a, 0x11, 0xde, 0x7b, + 0xe4, 0x64, 0x7f, 0x84, 0x8e, 0x20, 0xfb, 0xc8, 0x65, 0xbe, 0xb3, 0xcc, 0xf7, 0x87, 0x2f, 0xf7, + 0x7d, 0xbf, 0xc7, 0x9c, 0xe7, 0xef, 0xf7, 0x94, 0x76, 0x47, 0x6e, 0x55, 0x9b, 0xb2, 0x6f, 0x8e, + 0xce, 0x43, 0xda, 0xc0, 0xcf, 0x4f, 0xe3, 0x67, 0x1c, 0x13, 0x9d, 0x35, 0xf1, 0xe7, 0x21, 0xfd, + 0x94, 0xe0, 0xc7, 0xf1, 0x93, 0x85, 0x89, 0x5e, 0x23, 0xf5, 0xaf, 0x43, 0x86, 0xe5, 0x0b, 0x01, + 0xf8, 0x19, 0x13, 0xde, 0x42, 0x39, 0x48, 0xd7, 0x3a, 0x32, 0xa5, 0xbf, 0x00, 0x45, 0x2e, 0x55, + 0xba, 0x0d, 0xa9, 0x26, 0x09, 0xc9, 0xf2, 0x2d, 0xc8, 0xf2, 0x24, 0xd0, 0xad, 0x11, 0xa6, 0x41, + 0x78, 0xcb, 0x1f, 0xfa, 0x3e, 0x12, 0x81, 0xf6, 0xb8, 0x75, 0x28, 0xc9, 0x42, 0x32, 0xfa, 0x79, + 0x5d, 0x28, 0x46, 0x1b, 0xee, 0x37, 0xc3, 0xa9, 0xbf, 0x24, 0xa0, 0x10, 0x69, 0xa0, 0x69, 0xe7, + 0x83, 0x0d, 0xc3, 0x7a, 0xaa, 0x60, 0x43, 0xc7, 0xae, 0x4f, 0x0a, 0x60, 0xa2, 0x2a, 0x95, 0x9c, + 0xf5, 0xa3, 0xbd, 0x91, 0xe0, 0x7f, 0x9f, 0x00, 0x61, 0xb6, 0x77, 0x9d, 0x09, 0x30, 0xf1, 0xbd, + 0x06, 0xf8, 0xbb, 0x04, 0x94, 0xe2, 0x0d, 0xeb, 0x4c, 0x78, 0x17, 0xbf, 0xd7, 0xf0, 0xfe, 0x99, + 0x84, 0xd5, 0x58, 0x9b, 0x7a, 0xd6, 0xe8, 0xbe, 0x86, 0x75, 0x5d, 0x23, 0x63, 0xdb, 0xf2, 0x88, + 0xa9, 0x9e, 0x2a, 0x06, 0x79, 0x42, 0x0c, 0xb1, 0xcc, 0x0a, 0xc5, 0xf5, 0x97, 0x37, 0xc2, 0x95, + 0xc6, 0xd4, 0xae, 0x49, 0xcd, 0x0e, 0x36, 0x1a, 0x75, 0xa9, 0xd5, 0xed, 0xf4, 0xa5, 0x76, 0xed, + 0xa1, 0x72, 0xdc, 0xfe, 0x71, 0xbb, 0xf3, 0x65, 0x5b, 0x16, 0xf4, 0x19, 0xd8, 0x6b, 0xdc, 0xea, + 0x5d, 0x10, 0x66, 0x83, 0x42, 0xe7, 0x60, 0x51, 0x58, 0xc2, 0x5b, 0x68, 0x03, 0xd6, 0xda, 0x1d, + 0xa5, 0xd7, 0xa8, 0x4b, 0x8a, 0x74, 0xef, 0x9e, 0x54, 0xeb, 0xf7, 0xf8, 0xd3, 0x46, 0x88, 0xee, + 0xc7, 0x37, 0xf5, 0x6f, 0x53, 0xb0, 0xb1, 0x20, 0x12, 0x54, 0xf5, 0x2f, 0x25, 0xfc, 0x9e, 0xf4, + 0xc9, 0x59, 0xa2, 0xaf, 0xd0, 0xae, 0xa0, 0x8b, 0x1d, 0xcf, 0xbf, 0xc3, 0x5c, 0x01, 0x9a, 0x25, + 0xd3, 0xd3, 0x07, 0x3a, 0x71, 0xfc, 0x97, 0x20, 0x7e, 0x53, 0x59, 0x9b, 0xca, 0xf9, 0x63, 0xd0, + 0xc7, 0x80, 0x6c, 0xcb, 0xd5, 0x3d, 0xfd, 0x09, 0x51, 0x74, 0x33, 0x78, 0x36, 0xa2, 0x37, 0x97, + 0xb4, 0x2c, 0x04, 0x9a, 0x86, 0xe9, 0x85, 0x68, 0x93, 0x0c, 0xf1, 0x0c, 0x9a, 0x16, 0xf0, 0x94, + 0x2c, 0x04, 0x9a, 0x10, 0x7d, 0x11, 0x8a, 0x9a, 0x35, 0xa1, 0xed, 0x1c, 0xc7, 0xd1, 0xf3, 0x22, + 0x21, 0x17, 0xb8, 0x2c, 0x84, 0xf8, 0x8d, 0xfa, 0xf4, 0xbd, 0xaa, 0x28, 0x17, 0xb8, 0x8c, 0x43, + 0x2e, 0xc3, 0x1a, 0x1e, 0x0e, 0x1d, 0xea, 0x3c, 0x70, 0xc4, 0xaf, 0x1e, 0xa5, 0x50, 0xcc, 0x80, + 0xdb, 0xf7, 0x21, 0x17, 0xe4, 0x81, 0x1e, 0xc9, 0x34, 0x13, 0x8a, 0xcd, 0xef, 0xd3, 0xc9, 0xdd, + 0xbc, 0x9c, 0x33, 0x03, 0xe5, 0x45, 0x28, 0xea, 0xae, 0x32, 0x7d, 0x7e, 0x4f, 0xee, 0x24, 0x77, + 0x73, 0x72, 0x41, 0x77, 0xc3, 0xa7, 0xcb, 0xf2, 0x37, 0x49, 0x28, 0xc5, 0x7f, 0x3e, 0x40, 0x75, + 0xc8, 0x19, 0x96, 0x8a, 0x19, 0xb5, 0xf8, 0x6f, 0x57, 0xbb, 0xaf, 0xf8, 0xc5, 0xa1, 0xd2, 0xf4, + 0xf1, 0x72, 0x68, 0xb9, 0xfd, 0xb7, 0x04, 0xe4, 0x02, 0x31, 0xda, 0x82, 0xb4, 0x8d, 0xbd, 0x11, + 0x73, 0x97, 0x39, 0x4c, 0x0a, 0x09, 0x99, 0x8d, 0xa9, 0xdc, 0xb5, 0xb1, 0xc9, 0x28, 0xe0, 0xcb, + 0xe9, 0x98, 0x7e, 0x57, 0x83, 0x60, 0x8d, 0xdd, 0x6b, 0xac, 0xf1, 0x98, 0x98, 0x9e, 0x1b, 0x7c, + 0x57, 0x5f, 0x5e, 0xf3, 0xc5, 0xe8, 0x1a, 0xac, 0x7b, 0x0e, 0xd6, 0x8d, 0x18, 0x36, 0xcd, 0xb0, + 0x42, 0xa0, 0x08, 0xc1, 0x07, 0x70, 0x3e, 0xf0, 0xab, 0x11, 0x0f, 0xab, 0x23, 0xa2, 0x4d, 0x8d, + 0xb2, 0xec, 0xfd, 0xe2, 0x9c, 0x0f, 0xa8, 0xfb, 0xfa, 0xc0, 0xb6, 0xfc, 0xf7, 0x04, 0xac, 0x07, + 0x37, 0x31, 0x2d, 0x4c, 0x56, 0x0b, 0x00, 0x9b, 0xa6, 0xe5, 0x45, 0xd3, 0x35, 0x4f, 0xe5, 0x39, + 0xbb, 0x4a, 0x35, 0x34, 0x92, 0x23, 0x0e, 0xb6, 0xc7, 0x00, 0x53, 0xcd, 0xd2, 0xb4, 0x5d, 0x80, + 0x82, 0xff, 0xdb, 0x10, 0xfb, 0x81, 0x91, 0xdf, 0xdd, 0x81, 0x8b, 0xe8, 0x95, 0x0d, 0x6d, 0x42, + 0xe6, 0x84, 0x0c, 0x75, 0xd3, 0x7f, 0xf1, 0xe5, 0x83, 0xe0, 0x85, 0x25, 0x1d, 0xbe, 0xb0, 0x1c, + 0xfe, 0x0c, 0x36, 0x54, 0x6b, 0x3c, 0x1b, 0xee, 0xa1, 0x30, 0xf3, 0x7e, 0xe0, 0x7e, 0x91, 0xf8, + 0x0a, 0xa6, 0x2d, 0xe6, 0xff, 0x12, 0x89, 0x3f, 0x24, 0x53, 0x47, 0xdd, 0xc3, 0x3f, 0x26, 0xb7, + 0x8f, 0xb8, 0x69, 0x37, 0x58, 0xa9, 0x4c, 0x06, 0x06, 0x51, 0x69, 0xf4, 0xff, 0x0f, 0x00, 0x00, + 0xff, 0xff, 0x88, 0x17, 0xc1, 0xbe, 0x38, 0x1d, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go new file mode 100644 index 000000000..165b2110d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go @@ -0,0 +1,752 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + proto "github.com/gogo/protobuf/proto" + math "math" + reflect "reflect" + sort "sort" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (this *FileDescriptorSet) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.FileDescriptorSet{") + if this.File != nil { + s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 16) + s = append(s, "&descriptor.FileDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Package != nil { + s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n") + } + if this.Dependency != nil { + s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n") + } + if this.PublicDependency != nil { + s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n") + } + if this.WeakDependency != nil { + s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n") + } + if this.MessageType != nil { + s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.Service != nil { + s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceCodeInfo != nil { + s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n") + } + if this.Syntax != nil { + s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.DescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Field != nil { + s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.NestedType != nil { + s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.ExtensionRange != nil { + s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n") + } + if this.OneofDecl != nil { + s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ExtensionRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.DescriptorProto_ExtensionRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.DescriptorProto_ReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ExtensionRangeOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.ExtensionRangeOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.FieldDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Label != nil { + s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n") + } + if this.Type != nil { + s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n") + } + if this.TypeName != nil { + s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n") + } + if this.Extendee != nil { + s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n") + } + if this.DefaultValue != nil { + s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n") + } + if this.OneofIndex != nil { + s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n") + } + if this.JsonName != nil { + s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.OneofDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.EnumDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto_EnumReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumValueDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.ServiceDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Method != nil { + s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&descriptor.MethodDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.InputType != nil { + s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n") + } + if this.OutputType != nil { + s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ClientStreaming != nil { + s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n") + } + if this.ServerStreaming != nil { + s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 25) + s = append(s, "&descriptor.FileOptions{") + if this.JavaPackage != nil { + s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") + } + if this.JavaOuterClassname != nil { + s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n") + } + if this.JavaMultipleFiles != nil { + s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n") + } + if this.JavaGenerateEqualsAndHash != nil { + s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n") + } + if this.JavaStringCheckUtf8 != nil { + s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n") + } + if this.OptimizeFor != nil { + s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n") + } + if this.GoPackage != nil { + s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n") + } + if this.CcGenericServices != nil { + s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n") + } + if this.JavaGenericServices != nil { + s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n") + } + if this.PyGenericServices != nil { + s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n") + } + if this.PhpGenericServices != nil { + s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.CcEnableArenas != nil { + s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n") + } + if this.ObjcClassPrefix != nil { + s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n") + } + if this.CsharpNamespace != nil { + s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") + } + if this.SwiftPrefix != nil { + s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n") + } + if this.PhpClassPrefix != nil { + s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n") + } + if this.PhpNamespace != nil { + s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n") + } + if this.PhpMetadataNamespace != nil { + s = append(s, "PhpMetadataNamespace: "+valueToGoStringDescriptor(this.PhpMetadataNamespace, "string")+",\n") + } + if this.RubyPackage != nil { + s = append(s, "RubyPackage: "+valueToGoStringDescriptor(this.RubyPackage, "string")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MessageOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.MessageOptions{") + if this.MessageSetWireFormat != nil { + s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n") + } + if this.NoStandardDescriptorAccessor != nil { + s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.MapEntry != nil { + s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.FieldOptions{") + if this.Ctype != nil { + s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n") + } + if this.Packed != nil { + s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n") + } + if this.Jstype != nil { + s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n") + } + if this.Lazy != nil { + s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.Weak != nil { + s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.OneofOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumOptions{") + if this.AllowAlias != nil { + s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumValueOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.ServiceOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.MethodOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.IdempotencyLevel != nil { + s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.UninterpretedOption{") + if this.Name != nil { + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + } + if this.IdentifierValue != nil { + s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n") + } + if this.PositiveIntValue != nil { + s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n") + } + if this.NegativeIntValue != nil { + s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n") + } + if this.DoubleValue != nil { + s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n") + } + if this.StringValue != nil { + s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n") + } + if this.AggregateValue != nil { + s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption_NamePart) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.UninterpretedOption_NamePart{") + if this.NamePart != nil { + s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n") + } + if this.IsExtension != nil { + s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.SourceCodeInfo{") + if this.Location != nil { + s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo_Location) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.SourceCodeInfo_Location{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.Span != nil { + s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n") + } + if this.LeadingComments != nil { + s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n") + } + if this.TrailingComments != nil { + s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n") + } + if this.LeadingDetachedComments != nil { + s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.GeneratedCodeInfo{") + if this.Annotation != nil { + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo_Annotation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.SourceFile != nil { + s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n") + } + if this.Begin != nil { + s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDescriptor(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string { + e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m) + if e == nil { + return "nil" + } + s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{" + keys := make([]int, 0, len(e)) + for k := range e { + keys = append(keys, int(k)) + } + sort.Ints(keys) + ss := []string{} + for _, k := range keys { + ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) + } + s += strings.Join(ss, ",") + "})" + return s +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go new file mode 100644 index 000000000..e0846a357 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go @@ -0,0 +1,390 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package descriptor + +import ( + "strings" +) + +func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) { + if !msg.GetOptions().GetMapEntry() { + return nil, nil + } + return msg.GetField()[0], msg.GetField()[1] +} + +func dotToUnderscore(r rune) rune { + if r == '.' { + return '_' + } + return r +} + +func (field *FieldDescriptorProto) WireType() (wire int) { + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE: + return 1 + case FieldDescriptorProto_TYPE_FLOAT: + return 5 + case FieldDescriptorProto_TYPE_INT64: + return 0 + case FieldDescriptorProto_TYPE_UINT64: + return 0 + case FieldDescriptorProto_TYPE_INT32: + return 0 + case FieldDescriptorProto_TYPE_UINT32: + return 0 + case FieldDescriptorProto_TYPE_FIXED64: + return 1 + case FieldDescriptorProto_TYPE_FIXED32: + return 5 + case FieldDescriptorProto_TYPE_BOOL: + return 0 + case FieldDescriptorProto_TYPE_STRING: + return 2 + case FieldDescriptorProto_TYPE_GROUP: + return 2 + case FieldDescriptorProto_TYPE_MESSAGE: + return 2 + case FieldDescriptorProto_TYPE_BYTES: + return 2 + case FieldDescriptorProto_TYPE_ENUM: + return 0 + case FieldDescriptorProto_TYPE_SFIXED32: + return 5 + case FieldDescriptorProto_TYPE_SFIXED64: + return 1 + case FieldDescriptorProto_TYPE_SINT32: + return 0 + case FieldDescriptorProto_TYPE_SINT64: + return 0 + } + panic("unreachable") +} + +func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { + packed := field.IsPacked() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) { + packed := field.IsPacked3() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey() []byte { + x := field.GetKeyUint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (field *FieldDescriptorProto) GetKey3() []byte { + x := field.GetKey3Uint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { + msg := desc.GetMessage(packageName, messageName) + if msg == nil { + return nil + } + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto { + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+".")) + if nes != nil { + return nes + } + } + return nil +} + +func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+".")) + if res != nil { + return res + } + } + return nil +} + +func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + if msg.GetName()+"."+nes.GetName() == typeName { + return nes + } + } + } + } + return nil +} + +func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + if msg.GetName()+"."+nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + } + } + return false +} + +func (msg *DescriptorProto) IsExtendable() bool { + return len(msg.GetExtensionRange()) > 0 +} + +func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetName() == fieldName { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetNumber() == fieldNum { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", "" + } + field := parent.GetFieldDescriptor(fieldName) + if field == nil { + var extPackageName string + extPackageName, field = desc.FindExtension(packageName, typeName, fieldName) + if field == nil { + return "", "" + } + packageName = extPackageName + } + typeNames := strings.Split(field.GetTypeName(), ".") + if len(typeNames) == 1 { + msg := desc.GetMessage(packageName, typeName) + if msg == nil { + return "", "" + } + return packageName, msg.GetName() + } + if len(typeNames) > 2 { + for i := 1; i < len(typeNames)-1; i++ { + packageName = strings.Join(typeNames[1:len(typeNames)-i], ".") + typeName = strings.Join(typeNames[len(typeNames)-i:], ".") + msg := desc.GetMessage(packageName, typeName) + if msg != nil { + typeNames := strings.Split(msg.GetName(), ".") + if len(typeNames) == 1 { + return packageName, msg.GetName() + } + return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1] + } + } + } + return "", "" +} + +func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto { + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, enum := range file.GetEnumType() { + if enum.GetName() == typeName { + return enum + } + } + } + return nil +} + +func (f *FieldDescriptorProto) IsEnum() bool { + return *f.Type == FieldDescriptorProto_TYPE_ENUM +} + +func (f *FieldDescriptorProto) IsMessage() bool { + return *f.Type == FieldDescriptorProto_TYPE_MESSAGE +} + +func (f *FieldDescriptorProto) IsBytes() bool { + return *f.Type == FieldDescriptorProto_TYPE_BYTES +} + +func (f *FieldDescriptorProto) IsRepeated() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED +} + +func (f *FieldDescriptorProto) IsString() bool { + return *f.Type == FieldDescriptorProto_TYPE_STRING +} + +func (f *FieldDescriptorProto) IsBool() bool { + return *f.Type == FieldDescriptorProto_TYPE_BOOL +} + +func (f *FieldDescriptorProto) IsRequired() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED +} + +func (f *FieldDescriptorProto) IsPacked() bool { + return f.Options != nil && f.GetOptions().GetPacked() +} + +func (f *FieldDescriptorProto) IsPacked3() bool { + if f.IsRepeated() && f.IsScalar() { + if f.Options == nil || f.GetOptions().Packed == nil { + return true + } + return f.Options != nil && f.GetOptions().GetPacked() + } + return false +} + +func (m *DescriptorProto) HasExtension() bool { + return len(m.ExtensionRange) > 0 +} diff --git a/vendor/github.com/golang/groupcache/LICENSE b/vendor/github.com/golang/groupcache/LICENSE new file mode 100644 index 000000000..37ec93a14 --- /dev/null +++ b/vendor/github.com/golang/groupcache/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go new file mode 100644 index 000000000..eac1c7664 --- /dev/null +++ b/vendor/github.com/golang/groupcache/lru/lru.go @@ -0,0 +1,133 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package lru implements an LRU cache. +package lru + +import "container/list" + +// Cache is an LRU cache. It is not safe for concurrent access. +type Cache struct { + // MaxEntries is the maximum number of cache entries before + // an item is evicted. Zero means no limit. + MaxEntries int + + // OnEvicted optionally specifies a callback function to be + // executed when an entry is purged from the cache. + OnEvicted func(key Key, value interface{}) + + ll *list.List + cache map[interface{}]*list.Element +} + +// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators +type Key interface{} + +type entry struct { + key Key + value interface{} +} + +// New creates a new Cache. +// If maxEntries is zero, the cache has no limit and it's assumed +// that eviction is done by the caller. +func New(maxEntries int) *Cache { + return &Cache{ + MaxEntries: maxEntries, + ll: list.New(), + cache: make(map[interface{}]*list.Element), + } +} + +// Add adds a value to the cache. +func (c *Cache) Add(key Key, value interface{}) { + if c.cache == nil { + c.cache = make(map[interface{}]*list.Element) + c.ll = list.New() + } + if ee, ok := c.cache[key]; ok { + c.ll.MoveToFront(ee) + ee.Value.(*entry).value = value + return + } + ele := c.ll.PushFront(&entry{key, value}) + c.cache[key] = ele + if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { + c.RemoveOldest() + } +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key Key) (value interface{}, ok bool) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.ll.MoveToFront(ele) + return ele.Value.(*entry).value, true + } + return +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key Key) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.removeElement(ele) + } +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() { + if c.cache == nil { + return + } + ele := c.ll.Back() + if ele != nil { + c.removeElement(ele) + } +} + +func (c *Cache) removeElement(e *list.Element) { + c.ll.Remove(e) + kv := e.Value.(*entry) + delete(c.cache, kv.key) + if c.OnEvicted != nil { + c.OnEvicted(kv.key, kv.value) + } +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + if c.cache == nil { + return 0 + } + return c.ll.Len() +} + +// Clear purges all stored items from the cache. +func (c *Cache) Clear() { + if c.OnEvicted != nil { + for _, e := range c.cache { + kv := e.Value.(*entry) + c.OnEvicted(kv.key, kv.value) + } + } + c.ll = nil + c.cache = nil +} diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go index 4a5931009..47eb3e445 100644 --- a/vendor/github.com/golang/protobuf/proto/text_decode.go +++ b/vendor/github.com/golang/protobuf/proto/text_decode.go @@ -765,7 +765,7 @@ func unescape(s string) (ch string, tail string, err error) { if i > utf8.MaxRune { return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) } - return string(i), s, nil + return string(rune(i)), s, nil } return "", "", fmt.Errorf(`unknown escape \%c`, r) } diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go index c9a63ceda..665618684 100644 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -6,6 +6,10 @@ // // This package is intended to be a more powerful and safer alternative to // reflect.DeepEqual for comparing whether two values are semantically equal. +// It is intended to only be used in tests, as performance is not a goal and +// it may panic if it cannot compare the values. Its propensity towards +// panicking means that its unsuitable for production environments where a +// spurious panic may be fatal. // // The primary features of cmp are: // @@ -86,6 +90,52 @@ import ( // If there is a cycle, then the pointed at values are considered equal // only if both addresses were previously visited in the same path step. func Equal(x, y interface{}, opts ...Option) bool { + s := newState(opts) + s.compareAny(rootStep(x, y)) + return s.result.Equal() +} + +// Diff returns a human-readable report of the differences between two values: +// y - x. It returns an empty string if and only if Equal returns true for the +// same input values and options. +// +// The output is displayed as a literal in pseudo-Go syntax. +// At the start of each line, a "-" prefix indicates an element removed from y, +// a "+" prefix to indicates an element added to y, and the lack of a prefix +// indicates an element common to both x and y. If possible, the output +// uses fmt.Stringer.String or error.Error methods to produce more humanly +// readable outputs. In such cases, the string is prefixed with either an +// 's' or 'e' character, respectively, to indicate that the method was called. +// +// Do not depend on this output being stable. If you need the ability to +// programmatically interpret the difference, consider using a custom Reporter. +func Diff(x, y interface{}, opts ...Option) string { + s := newState(opts) + + // Optimization: If there are no other reporters, we can optimize for the + // common case where the result is equal (and thus no reported difference). + // This avoids the expensive construction of a difference tree. + if len(s.reporters) == 0 { + s.compareAny(rootStep(x, y)) + if s.result.Equal() { + return "" + } + s.result = diff.Result{} // Reset results + } + + r := new(defaultReporter) + s.reporters = append(s.reporters, reporter{r}) + s.compareAny(rootStep(x, y)) + d := r.String() + if (d == "") != s.result.Equal() { + panic("inconsistent difference and equality results") + } + return d +} + +// rootStep constructs the first path step. If x and y have differing types, +// then they are stored within an empty interface type. +func rootStep(x, y interface{}) PathStep { vx := reflect.ValueOf(x) vy := reflect.ValueOf(y) @@ -108,33 +158,7 @@ func Equal(x, y interface{}, opts ...Option) bool { t = vx.Type() } - s := newState(opts) - s.compareAny(&pathStep{t, vx, vy}) - return s.result.Equal() -} - -// Diff returns a human-readable report of the differences between two values. -// It returns an empty string if and only if Equal returns true for the same -// input values and options. -// -// The output is displayed as a literal in pseudo-Go syntax. -// At the start of each line, a "-" prefix indicates an element removed from x, -// a "+" prefix to indicates an element added to y, and the lack of a prefix -// indicates an element common to both x and y. If possible, the output -// uses fmt.Stringer.String or error.Error methods to produce more humanly -// readable outputs. In such cases, the string is prefixed with either an -// 's' or 'e' character, respectively, to indicate that the method was called. -// -// Do not depend on this output being stable. If you need the ability to -// programmatically interpret the difference, consider using a custom Reporter. -func Diff(x, y interface{}, opts ...Option) string { - r := new(defaultReporter) - eq := Equal(x, y, Options(opts), Reporter(r)) - d := r.String() - if (d == "") != eq { - panic("inconsistent difference and equality results") - } - return d + return &pathStep{t, vx, vy} } type state struct { @@ -352,7 +376,7 @@ func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { // assuming that T is assignable to R. // Otherwise, it returns the input value as is. func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { - // TODO(dsnet): Workaround for reflect bug (https://golang.org/issue/22143). + // TODO(≥go1.10): Workaround for reflect bug (https://golang.org/issue/22143). if !flags.AtLeastGo110 { if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { return reflect.New(t).Elem() @@ -362,6 +386,7 @@ func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { } func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { + var addr bool var vax, vay reflect.Value // Addressable versions of vx and vy var mayForce, mayForceInit bool @@ -383,6 +408,7 @@ func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { // For retrieveUnexportedField to work, the parent struct must // be addressable. Create a new copy of the values if // necessary to make them addressable. + addr = vx.CanAddr() || vy.CanAddr() vax = makeAddressable(vx) vay = makeAddressable(vy) } @@ -393,6 +419,7 @@ func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { mayForceInit = true } step.mayForce = mayForce + step.paddr = addr step.pvx = vax step.pvy = vay step.field = t.Field(i) diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go index dd032354f..dfa5d2137 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_panic.go +++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -10,6 +10,6 @@ import "reflect" const supportExporters = false -func retrieveUnexportedField(reflect.Value, reflect.StructField) reflect.Value { +func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value { panic("no support for forcibly accessing unexported fields") } diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go index 57020e26c..351f1a34b 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -17,9 +17,19 @@ const supportExporters = true // a struct such that the value has read-write permissions. // // The parent struct, v, must be addressable, while f must be a StructField -// describing the field to retrieve. -func retrieveUnexportedField(v reflect.Value, f reflect.StructField) reflect.Value { - // See https://github.com/google/go-cmp/issues/167 for discussion of the - // following expression. - return reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem() +// describing the field to retrieve. If addr is false, +// then the returned value will be shallowed copied to be non-addressable. +func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value { + ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem() + if !addr { + // A field is addressable if and only if the struct is addressable. + // If the original parent value was not addressable, shallow copy the + // value to make it non-addressable to avoid leaking an implementation + // detail of how forcibly exporting a field works. + if ve.Kind() == reflect.Interface && ve.IsNil() { + return reflect.Zero(f.Type) + } + return reflect.ValueOf(ve.Interface()).Convert(f.Type) + } + return ve } diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go index 3d2e42662..730e223ee 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -12,6 +12,13 @@ // is more important than obtaining a minimal Levenshtein distance. package diff +import ( + "math/rand" + "time" + + "github.com/google/go-cmp/cmp/internal/flags" +) + // EditType represents a single operation within an edit-script. type EditType uint8 @@ -112,6 +119,8 @@ func (r Result) Similar() bool { return r.NumSame+1 >= r.NumDiff } +var randInt = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) + // Difference reports whether two lists of lengths nx and ny are equal // given the definition of equality provided as f. // @@ -159,6 +168,17 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // A vertical edge is equivalent to inserting a symbol from list Y. // A diagonal edge is equivalent to a matching symbol between both X and Y. + // To ensure flexibility in changing the algorithm in the future, + // introduce some degree of deliberate instability. + // This is achieved by fiddling the zigzag iterator to start searching + // the graph starting from the bottom-right versus than the top-left. + // The result may differ depending on the starting search location, + // but still produces a valid edit script. + zigzagInit := randInt // either 0 or 1 + if flags.Deterministic { + zigzagInit = 0 + } + // Invariants: // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny @@ -209,7 +229,7 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { break } - for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + for stop1, stop2, i := false, false, zigzagInit; !(stop1 && stop2) && searchBudget > 0; i++ { // Search in a diagonal pattern for a match. z := zigzag(i) p := point{fwdFrontier.X + z, fwdFrontier.Y - z} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go new file mode 100644 index 000000000..8228e7d51 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go @@ -0,0 +1,157 @@ +// Copyright 2020, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import ( + "reflect" + "strconv" +) + +// TypeString is nearly identical to reflect.Type.String, +// but has an additional option to specify that full type names be used. +func TypeString(t reflect.Type, qualified bool) string { + return string(appendTypeName(nil, t, qualified, false)) +} + +func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte { + // BUG: Go reflection provides no way to disambiguate two named types + // of the same name and within the same package, + // but declared within the namespace of different functions. + + // Named type. + if t.Name() != "" { + if qualified && t.PkgPath() != "" { + b = append(b, '"') + b = append(b, t.PkgPath()...) + b = append(b, '"') + b = append(b, '.') + b = append(b, t.Name()...) + } else { + b = append(b, t.String()...) + } + return b + } + + // Unnamed type. + switch k := t.Kind(); k { + case reflect.Bool, reflect.String, reflect.UnsafePointer, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + b = append(b, k.String()...) + case reflect.Chan: + if t.ChanDir() == reflect.RecvDir { + b = append(b, "<-"...) + } + b = append(b, "chan"...) + if t.ChanDir() == reflect.SendDir { + b = append(b, "<-"...) + } + b = append(b, ' ') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Func: + if !elideFunc { + b = append(b, "func"...) + } + b = append(b, '(') + for i := 0; i < t.NumIn(); i++ { + if i > 0 { + b = append(b, ", "...) + } + if i == t.NumIn()-1 && t.IsVariadic() { + b = append(b, "..."...) + b = appendTypeName(b, t.In(i).Elem(), qualified, false) + } else { + b = appendTypeName(b, t.In(i), qualified, false) + } + } + b = append(b, ')') + switch t.NumOut() { + case 0: + // Do nothing + case 1: + b = append(b, ' ') + b = appendTypeName(b, t.Out(0), qualified, false) + default: + b = append(b, " ("...) + for i := 0; i < t.NumOut(); i++ { + if i > 0 { + b = append(b, ", "...) + } + b = appendTypeName(b, t.Out(i), qualified, false) + } + b = append(b, ')') + } + case reflect.Struct: + b = append(b, "struct{ "...) + for i := 0; i < t.NumField(); i++ { + if i > 0 { + b = append(b, "; "...) + } + sf := t.Field(i) + if !sf.Anonymous { + if qualified && sf.PkgPath != "" { + b = append(b, '"') + b = append(b, sf.PkgPath...) + b = append(b, '"') + b = append(b, '.') + } + b = append(b, sf.Name...) + b = append(b, ' ') + } + b = appendTypeName(b, sf.Type, qualified, false) + if sf.Tag != "" { + b = append(b, ' ') + b = strconv.AppendQuote(b, string(sf.Tag)) + } + } + if b[len(b)-1] == ' ' { + b = b[:len(b)-1] + } else { + b = append(b, ' ') + } + b = append(b, '}') + case reflect.Slice, reflect.Array: + b = append(b, '[') + if k == reflect.Array { + b = strconv.AppendUint(b, uint64(t.Len()), 10) + } + b = append(b, ']') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Map: + b = append(b, "map["...) + b = appendTypeName(b, t.Key(), qualified, false) + b = append(b, ']') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Ptr: + b = append(b, '*') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Interface: + b = append(b, "interface{ "...) + for i := 0; i < t.NumMethod(); i++ { + if i > 0 { + b = append(b, "; "...) + } + m := t.Method(i) + if qualified && m.PkgPath != "" { + b = append(b, '"') + b = append(b, m.PkgPath...) + b = append(b, '"') + b = append(b, '.') + } + b = append(b, m.Name...) + b = appendTypeName(b, m.Type, qualified, true) + } + if b[len(b)-1] == ' ' { + b = b[:len(b)-1] + } else { + b = append(b, ' ') + } + b = append(b, '}') + default: + panic("invalid kind: " + k.String()) + } + return b +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go index 0a01c4796..e9e384a1c 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -21,3 +21,13 @@ func PointerOf(v reflect.Value) Pointer { // assumes that the GC implementation does not use a moving collector. return Pointer{v.Pointer(), v.Type()} } + +// IsNil reports whether the pointer is nil. +func (p Pointer) IsNil() bool { + return p.p == 0 +} + +// Uintptr returns the pointer as a uintptr. +func (p Pointer) Uintptr() uintptr { + return p.p +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go index da134ae2a..b50c17ec7 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -24,3 +24,13 @@ func PointerOf(v reflect.Value) Pointer { // which is necessary if the GC ever uses a moving collector. return Pointer{unsafe.Pointer(v.Pointer()), v.Type()} } + +// IsNil reports whether the pointer is nil. +func (p Pointer) IsNil() bool { + return p.p == nil +} + +// Uintptr returns the pointer as a uintptr. +func (p Pointer) Uintptr() uintptr { + return uintptr(p.p) +} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go index abbd2a63b..4b0407a7f 100644 --- a/vendor/github.com/google/go-cmp/cmp/options.go +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -225,11 +225,14 @@ func (validator) apply(s *state, vx, vy reflect.Value) { // Unable to Interface implies unexported field without visibility access. if !vx.CanInterface() || !vy.CanInterface() { - const help = "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported" + help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported" var name string if t := s.curPath.Index(-2).Type(); t.Name() != "" { // Named type with unexported fields. name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType + if _, ok := reflect.New(t).Interface().(error); ok { + help = "consider using cmpopts.EquateErrors to compare error values" + } } else { // Unnamed type with unexported fields. Derive PkgPath from field. var pkgPath string diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index 509d6b852..603dbb002 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -177,7 +177,8 @@ type structField struct { // pvx, pvy, and field are only valid if unexported is true. unexported bool mayForce bool // Forcibly allow visibility - pvx, pvy reflect.Value // Parent values + paddr bool // Was parent addressable? + pvx, pvy reflect.Value // Parent values (always addressible) field reflect.StructField // Field information } @@ -189,8 +190,8 @@ func (sf StructField) Values() (vx, vy reflect.Value) { // Forcibly obtain read-write access to an unexported struct field. if sf.mayForce { - vx = retrieveUnexportedField(sf.pvx, sf.field) - vy = retrieveUnexportedField(sf.pvy, sf.field) + vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr) + vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr) return vx, vy // CanInterface reports true } return sf.vx, sf.vy // CanInterface reports false diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go index 6ddf29993..aafcb3635 100644 --- a/vendor/github.com/google/go-cmp/cmp/report.go +++ b/vendor/github.com/google/go-cmp/cmp/report.go @@ -41,7 +41,10 @@ func (r *defaultReporter) String() string { if r.root.NumDiff == 0 { return "" } - return formatOptions{}.FormatDiff(r.root).String() + ptrs := new(pointerReferences) + text := formatOptions{}.FormatDiff(r.root, ptrs) + resolveReferences(text) + return text.String() } func assert(ok bool) { diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go index 17a05eede..9e2180964 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_compare.go +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -11,14 +11,6 @@ import ( "github.com/google/go-cmp/cmp/internal/value" ) -// TODO: Enforce limits? -// * Enforce maximum number of records to print per node? -// * Enforce maximum size in bytes allowed? -// * As a heuristic, use less verbosity for equal nodes than unequal nodes. -// TODO: Enforce unique outputs? -// * Avoid Stringer methods if it results in same output? -// * Print pointer address if outputs still equal? - // numContextRecords is the number of surrounding equal records to print. const numContextRecords = 2 @@ -71,24 +63,66 @@ func (opts formatOptions) WithTypeMode(t typeMode) formatOptions { opts.TypeMode = t return opts } +func (opts formatOptions) WithVerbosity(level int) formatOptions { + opts.VerbosityLevel = level + opts.LimitVerbosity = true + return opts +} +func (opts formatOptions) verbosity() uint { + switch { + case opts.VerbosityLevel < 0: + return 0 + case opts.VerbosityLevel > 16: + return 16 // some reasonable maximum to avoid shift overflow + default: + return uint(opts.VerbosityLevel) + } +} + +const maxVerbosityPreset = 3 + +// verbosityPreset modifies the verbosity settings given an index +// between 0 and maxVerbosityPreset, inclusive. +func verbosityPreset(opts formatOptions, i int) formatOptions { + opts.VerbosityLevel = int(opts.verbosity()) + 2*i + if i > 0 { + opts.AvoidStringer = true + } + if i >= maxVerbosityPreset { + opts.PrintAddresses = true + opts.QualifiedNames = true + } + return opts +} // FormatDiff converts a valueNode tree into a textNode tree, where the later // is a textual representation of the differences detected in the former. -func (opts formatOptions) FormatDiff(v *valueNode) textNode { +func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) { + if opts.DiffMode == diffIdentical { + opts = opts.WithVerbosity(1) + } else { + opts = opts.WithVerbosity(3) + } + // Check whether we have specialized formatting for this node. // This is not necessary, but helpful for producing more readable outputs. if opts.CanFormatDiffSlice(v) { return opts.FormatDiffSlice(v) } + var parentKind reflect.Kind + if v.parent != nil && v.parent.TransformerName == "" { + parentKind = v.parent.Type.Kind() + } + // For leaf nodes, format the value based on the reflect.Values alone. if v.MaxDepth == 0 { switch opts.DiffMode { case diffUnknown, diffIdentical: // Format Equal. if v.NumDiff == 0 { - outx := opts.FormatValue(v.ValueX, visitedPointers{}) - outy := opts.FormatValue(v.ValueY, visitedPointers{}) + outx := opts.FormatValue(v.ValueX, parentKind, ptrs) + outy := opts.FormatValue(v.ValueY, parentKind, ptrs) if v.NumIgnored > 0 && v.NumSame == 0 { return textEllipsis } else if outx.Len() < outy.Len() { @@ -101,8 +135,13 @@ func (opts formatOptions) FormatDiff(v *valueNode) textNode { // Format unequal. assert(opts.DiffMode == diffUnknown) var list textList - outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, visitedPointers{}) - outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, visitedPointers{}) + outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs) + outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs) + for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { + opts2 := verbosityPreset(opts, i).WithTypeMode(elideType) + outx = opts2.FormatValue(v.ValueX, parentKind, ptrs) + outy = opts2.FormatValue(v.ValueY, parentKind, ptrs) + } if outx != nil { list = append(list, textRecord{Diff: '-', Value: outx}) } @@ -111,34 +150,57 @@ func (opts formatOptions) FormatDiff(v *valueNode) textNode { } return opts.WithTypeMode(emitType).FormatType(v.Type, list) case diffRemoved: - return opts.FormatValue(v.ValueX, visitedPointers{}) + return opts.FormatValue(v.ValueX, parentKind, ptrs) case diffInserted: - return opts.FormatValue(v.ValueY, visitedPointers{}) + return opts.FormatValue(v.ValueY, parentKind, ptrs) default: panic("invalid diff mode") } } + // Register slice element to support cycle detection. + if parentKind == reflect.Slice { + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true) + defer ptrs.Pop() + defer func() { out = wrapTrunkReferences(ptrRefs, out) }() + } + // Descend into the child value node. if v.TransformerName != "" { - out := opts.WithTypeMode(emitType).FormatDiff(v.Value) - out = textWrap{"Inverse(" + v.TransformerName + ", ", out, ")"} + out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) + out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"} return opts.FormatType(v.Type, out) } else { switch k := v.Type.Kind(); k { - case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map: - return opts.FormatType(v.Type, opts.formatDiffList(v.Records, k)) + case reflect.Struct, reflect.Array, reflect.Slice: + out = opts.formatDiffList(v.Records, k, ptrs) + out = opts.FormatType(v.Type, out) + case reflect.Map: + // Register map to support cycle detection. + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) + defer ptrs.Pop() + + out = opts.formatDiffList(v.Records, k, ptrs) + out = wrapTrunkReferences(ptrRefs, out) + out = opts.FormatType(v.Type, out) case reflect.Ptr: - return textWrap{"&", opts.FormatDiff(v.Value), ""} + // Register pointer to support cycle detection. + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) + defer ptrs.Pop() + + out = opts.FormatDiff(v.Value, ptrs) + out = wrapTrunkReferences(ptrRefs, out) + out = &textWrap{Prefix: "&", Value: out} case reflect.Interface: - return opts.WithTypeMode(emitType).FormatDiff(v.Value) + out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) default: panic(fmt.Sprintf("%v cannot have children", k)) } + return out } } -func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) textNode { +func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode { // Derive record name based on the data structure kind. var name string var formatKey func(reflect.Value) string @@ -154,7 +216,17 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te case reflect.Map: name = "entry" opts = opts.WithTypeMode(elideType) - formatKey = formatMapKey + formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) } + } + + maxLen := -1 + if opts.LimitVerbosity { + if opts.DiffMode == diffIdentical { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + } else { + maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc... + } + opts.VerbosityLevel-- } // Handle unification. @@ -163,6 +235,11 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te var list textList var deferredEllipsis bool // Add final "..." to indicate records were dropped for _, r := range recs { + if len(list) == maxLen { + deferredEllipsis = true + break + } + // Elide struct fields that are zero value. if k == reflect.Struct { var isZero bool @@ -186,23 +263,31 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te } continue } - if out := opts.FormatDiff(r.Value); out != nil { + if out := opts.FormatDiff(r.Value, ptrs); out != nil { list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) } } if deferredEllipsis { list.AppendEllipsis(diffStats{}) } - return textWrap{"{", list, "}"} + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} case diffUnknown: default: panic("invalid diff mode") } // Handle differencing. + var numDiffs int var list textList + var keys []reflect.Value // invariant: len(list) == len(keys) groups := coalesceAdjacentRecords(name, recs) + maxGroup := diffStats{Name: name} for i, ds := range groups { + if maxLen >= 0 && numDiffs >= maxLen { + maxGroup = maxGroup.Append(ds) + continue + } + // Handle equal records. if ds.NumDiff() == 0 { // Compute the number of leading and trailing records to print. @@ -226,16 +311,21 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te // Format the equal values. for _, r := range recs[:numLo] { - out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) } if numEqual > numLo+numHi { ds.NumIdentical -= numLo + numHi list.AppendEllipsis(ds) + for len(keys) < len(list) { + keys = append(keys, reflect.Value{}) + } } for _, r := range recs[numEqual-numHi : numEqual] { - out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) } recs = recs[numEqual:] continue @@ -247,24 +337,70 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te case opts.CanFormatDiffSlice(r.Value): out := opts.FormatDiffSlice(r.Value) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) case r.Value.NumChildren == r.Value.MaxDepth: - outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value) - outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value) + outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) + outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) + for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { + opts2 := verbosityPreset(opts, i) + outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) + outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) + } if outx != nil { list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx}) + keys = append(keys, r.Key) } if outy != nil { list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy}) + keys = append(keys, r.Key) } default: - out := opts.FormatDiff(r.Value) + out := opts.FormatDiff(r.Value, ptrs) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) } } recs = recs[ds.NumDiff():] + numDiffs += ds.NumDiff() + } + if maxGroup.IsZero() { + assert(len(recs) == 0) + } else { + list.AppendEllipsis(maxGroup) + for len(keys) < len(list) { + keys = append(keys, reflect.Value{}) + } } - assert(len(recs) == 0) - return textWrap{"{", list, "}"} + assert(len(list) == len(keys)) + + // For maps, the default formatting logic uses fmt.Stringer which may + // produce ambiguous output. Avoid calling String to disambiguate. + if k == reflect.Map { + var ambiguous bool + seenKeys := map[string]reflect.Value{} + for i, currKey := range keys { + if currKey.IsValid() { + strKey := list[i].Key + prevKey, seen := seenKeys[strKey] + if seen && prevKey.CanInterface() && currKey.CanInterface() { + ambiguous = prevKey.Interface() != currKey.Interface() + if ambiguous { + break + } + } + seenKeys[strKey] = currKey + } + } + if ambiguous { + for i, k := range keys { + if k.IsValid() { + list[i].Key = formatMapKey(k, true, ptrs) + } + } + } + } + + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} } // coalesceAdjacentRecords coalesces the list of records into groups of diff --git a/vendor/github.com/google/go-cmp/cmp/report_references.go b/vendor/github.com/google/go-cmp/cmp/report_references.go new file mode 100644 index 000000000..d620c2c20 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_references.go @@ -0,0 +1,264 @@ +// Copyright 2020, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/value" +) + +const ( + pointerDelimPrefix = "⟪" + pointerDelimSuffix = "⟫" +) + +// formatPointer prints the address of the pointer. +func formatPointer(p value.Pointer, withDelims bool) string { + v := p.Uintptr() + if flags.Deterministic { + v = 0xdeadf00f // Only used for stable testing purposes + } + if withDelims { + return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix + } + return formatHex(uint64(v)) +} + +// pointerReferences is a stack of pointers visited so far. +type pointerReferences [][2]value.Pointer + +func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) { + if deref && vx.IsValid() { + vx = vx.Addr() + } + if deref && vy.IsValid() { + vy = vy.Addr() + } + switch d { + case diffUnknown, diffIdentical: + pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)} + case diffRemoved: + pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}} + case diffInserted: + pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)} + } + *ps = append(*ps, pp) + return pp +} + +func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) { + p = value.PointerOf(v) + for _, pp := range *ps { + if p == pp[0] || p == pp[1] { + return p, true + } + } + *ps = append(*ps, [2]value.Pointer{p, p}) + return p, false +} + +func (ps *pointerReferences) Pop() { + *ps = (*ps)[:len(*ps)-1] +} + +// trunkReferences is metadata for a textNode indicating that the sub-tree +// represents the value for either pointer in a pair of references. +type trunkReferences struct{ pp [2]value.Pointer } + +// trunkReference is metadata for a textNode indicating that the sub-tree +// represents the value for the given pointer reference. +type trunkReference struct{ p value.Pointer } + +// leafReference is metadata for a textNode indicating that the value is +// truncated as it refers to another part of the tree (i.e., a trunk). +type leafReference struct{ p value.Pointer } + +func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode { + switch { + case pp[0].IsNil(): + return &textWrap{Value: s, Metadata: trunkReference{pp[1]}} + case pp[1].IsNil(): + return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} + case pp[0] == pp[1]: + return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} + default: + return &textWrap{Value: s, Metadata: trunkReferences{pp}} + } +} +func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode { + var prefix string + if printAddress { + prefix = formatPointer(p, true) + } + return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}} +} +func makeLeafReference(p value.Pointer, printAddress bool) textNode { + out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"} + var prefix string + if printAddress { + prefix = formatPointer(p, true) + } + return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}} +} + +// resolveReferences walks the textNode tree searching for any leaf reference +// metadata and resolves each against the corresponding trunk references. +// Since pointer addresses in memory are not particularly readable to the user, +// it replaces each pointer value with an arbitrary and unique reference ID. +func resolveReferences(s textNode) { + var walkNodes func(textNode, func(textNode)) + walkNodes = func(s textNode, f func(textNode)) { + f(s) + switch s := s.(type) { + case *textWrap: + walkNodes(s.Value, f) + case textList: + for _, r := range s { + walkNodes(r.Value, f) + } + } + } + + // Collect all trunks and leaves with reference metadata. + var trunks, leaves []*textWrap + walkNodes(s, func(s textNode) { + if s, ok := s.(*textWrap); ok { + switch s.Metadata.(type) { + case leafReference: + leaves = append(leaves, s) + case trunkReference, trunkReferences: + trunks = append(trunks, s) + } + } + }) + + // No leaf references to resolve. + if len(leaves) == 0 { + return + } + + // Collect the set of all leaf references to resolve. + leafPtrs := make(map[value.Pointer]bool) + for _, leaf := range leaves { + leafPtrs[leaf.Metadata.(leafReference).p] = true + } + + // Collect the set of trunk pointers that are always paired together. + // This allows us to assign a single ID to both pointers for brevity. + // If a pointer in a pair ever occurs by itself or as a different pair, + // then the pair is broken. + pairedTrunkPtrs := make(map[value.Pointer]value.Pointer) + unpair := func(p value.Pointer) { + if !pairedTrunkPtrs[p].IsNil() { + pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half + } + pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half + } + for _, trunk := range trunks { + switch p := trunk.Metadata.(type) { + case trunkReference: + unpair(p.p) // standalone pointer cannot be part of a pair + case trunkReferences: + p0, ok0 := pairedTrunkPtrs[p.pp[0]] + p1, ok1 := pairedTrunkPtrs[p.pp[1]] + switch { + case !ok0 && !ok1: + // Register the newly seen pair. + pairedTrunkPtrs[p.pp[0]] = p.pp[1] + pairedTrunkPtrs[p.pp[1]] = p.pp[0] + case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]: + // Exact pair already seen; do nothing. + default: + // Pair conflicts with some other pair; break all pairs. + unpair(p.pp[0]) + unpair(p.pp[1]) + } + } + } + + // Correlate each pointer referenced by leaves to a unique identifier, + // and print the IDs for each trunk that matches those pointers. + var nextID uint + ptrIDs := make(map[value.Pointer]uint) + newID := func() uint { + id := nextID + nextID++ + return id + } + for _, trunk := range trunks { + switch p := trunk.Metadata.(type) { + case trunkReference: + if print := leafPtrs[p.p]; print { + id, ok := ptrIDs[p.p] + if !ok { + id = newID() + ptrIDs[p.p] = id + } + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) + } + case trunkReferences: + print0 := leafPtrs[p.pp[0]] + print1 := leafPtrs[p.pp[1]] + if print0 || print1 { + id0, ok0 := ptrIDs[p.pp[0]] + id1, ok1 := ptrIDs[p.pp[1]] + isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0] + if isPair { + var id uint + assert(ok0 == ok1) // must be seen together or not at all + if ok0 { + assert(id0 == id1) // must have the same ID + id = id0 + } else { + id = newID() + ptrIDs[p.pp[0]] = id + ptrIDs[p.pp[1]] = id + } + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) + } else { + if print0 && !ok0 { + id0 = newID() + ptrIDs[p.pp[0]] = id0 + } + if print1 && !ok1 { + id1 = newID() + ptrIDs[p.pp[1]] = id1 + } + switch { + case print0 && print1: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1)) + case print0: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)) + case print1: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1)) + } + } + } + } + } + + // Update all leaf references with the unique identifier. + for _, leaf := range leaves { + if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok { + leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id)) + } + } +} + +func formatReference(id uint) string { + return fmt.Sprintf("ref#%d", id) +} + +func updateReferencePrefix(prefix, ref string) string { + if prefix == "" { + return pointerDelimPrefix + ref + pointerDelimSuffix + } + suffix := strings.TrimPrefix(prefix, pointerDelimPrefix) + return pointerDelimPrefix + ref + ": " + suffix +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go index 2761b6289..786f67126 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -5,13 +5,14 @@ package cmp import ( + "bytes" "fmt" "reflect" "strconv" "strings" "unicode" + "unicode/utf8" - "github.com/google/go-cmp/cmp/internal/flags" "github.com/google/go-cmp/cmp/internal/value" ) @@ -20,14 +21,22 @@ type formatValueOptions struct { // methods like error.Error or fmt.Stringer.String. AvoidStringer bool - // ShallowPointers controls whether to avoid descending into pointers. - // Useful when printing map keys, where pointer comparison is performed - // on the pointer address rather than the pointed-at value. - ShallowPointers bool - // PrintAddresses controls whether to print the address of all pointers, // slice elements, and maps. PrintAddresses bool + + // QualifiedNames controls whether FormatType uses the fully qualified name + // (including the full package path as opposed to just the package name). + QualifiedNames bool + + // VerbosityLevel controls the amount of output to produce. + // A higher value produces more output. A value of zero or lower produces + // no output (represented using an ellipsis). + // If LimitVerbosity is false, then the level is treated as infinite. + VerbosityLevel int + + // LimitVerbosity specifies that formatting should respect VerbosityLevel. + LimitVerbosity bool } // FormatType prints the type as if it were wrapping s. @@ -44,12 +53,15 @@ func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { default: return s } + if opts.DiffMode == diffIdentical { + return s // elide type for identical nodes + } case elideType: return s } // Determine the type label, applying special handling for unnamed types. - typeName := t.String() + typeName := value.TypeString(t, opts.QualifiedNames) if t.Name() == "" { // According to Go grammar, certain type literals contain symbols that // do not strongly bind to the next lexicographical token (e.g., *T). @@ -57,39 +69,77 @@ func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { case reflect.Chan, reflect.Func, reflect.Ptr: typeName = "(" + typeName + ")" } - typeName = strings.Replace(typeName, "struct {", "struct{", -1) - typeName = strings.Replace(typeName, "interface {", "interface{", -1) } + return &textWrap{Prefix: typeName, Value: wrapParens(s)} +} + +// wrapParens wraps s with a set of parenthesis, but avoids it if the +// wrapped node itself is already surrounded by a pair of parenthesis or braces. +// It handles unwrapping one level of pointer-reference nodes. +func wrapParens(s textNode) textNode { + var refNode *textWrap + if s2, ok := s.(*textWrap); ok { + // Unwrap a single pointer reference node. + switch s2.Metadata.(type) { + case leafReference, trunkReference, trunkReferences: + refNode = s2 + if s3, ok := refNode.Value.(*textWrap); ok { + s2 = s3 + } + } - // Avoid wrap the value in parenthesis if unnecessary. - if s, ok := s.(textWrap); ok { - hasParens := strings.HasPrefix(s.Prefix, "(") && strings.HasSuffix(s.Suffix, ")") - hasBraces := strings.HasPrefix(s.Prefix, "{") && strings.HasSuffix(s.Suffix, "}") + // Already has delimiters that make parenthesis unnecessary. + hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")") + hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}") if hasParens || hasBraces { - return textWrap{typeName, s, ""} + return s } } - return textWrap{typeName + "(", s, ")"} + if refNode != nil { + refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"} + return s + } + return &textWrap{Prefix: "(", Value: s, Suffix: ")"} } // FormatValue prints the reflect.Value, taking extra care to avoid descending -// into pointers already in m. As pointers are visited, m is also updated. -func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out textNode) { +// into pointers already in ptrs. As pointers are visited, ptrs is also updated. +func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) { if !v.IsValid() { return nil } t := v.Type() + // Check slice element for cycles. + if parentKind == reflect.Slice { + ptrRef, visited := ptrs.Push(v.Addr()) + if visited { + return makeLeafReference(ptrRef, false) + } + defer ptrs.Pop() + defer func() { out = wrapTrunkReference(ptrRef, false, out) }() + } + // Check whether there is an Error or String method to call. if !opts.AvoidStringer && v.CanInterface() { // Avoid calling Error or String methods on nil receivers since many // implementations crash when doing so. if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() { - switch v := v.Interface().(type) { - case error: - return textLine("e" + formatString(v.Error())) - case fmt.Stringer: - return textLine("s" + formatString(v.String())) + var prefix, strVal string + func() { + // Swallow and ignore any panics from String or Error. + defer func() { recover() }() + switch v := v.Interface().(type) { + case error: + strVal = v.Error() + prefix = "e" + case fmt.Stringer: + strVal = v.String() + prefix = "s" + } + }() + if prefix != "" { + return opts.formatString(prefix, strVal) } } } @@ -102,94 +152,140 @@ func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out t } }() - var ptr string switch t.Kind() { case reflect.Bool: return textLine(fmt.Sprint(v.Bool())) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return textLine(fmt.Sprint(v.Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - // Unnamed uints are usually bytes or words, so use hexadecimal. - if t.PkgPath() == "" || t.Kind() == reflect.Uintptr { + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return textLine(fmt.Sprint(v.Uint())) + case reflect.Uint8: + if parentKind == reflect.Slice || parentKind == reflect.Array { return textLine(formatHex(v.Uint())) } return textLine(fmt.Sprint(v.Uint())) + case reflect.Uintptr: + return textLine(formatHex(v.Uint())) case reflect.Float32, reflect.Float64: return textLine(fmt.Sprint(v.Float())) case reflect.Complex64, reflect.Complex128: return textLine(fmt.Sprint(v.Complex())) case reflect.String: - return textLine(formatString(v.String())) + return opts.formatString("", v.String()) case reflect.UnsafePointer, reflect.Chan, reflect.Func: - return textLine(formatPointer(v)) + return textLine(formatPointer(value.PointerOf(v), true)) case reflect.Struct: var list textList + v := makeAddressable(v) // needed for retrieveUnexportedField + maxLen := v.NumField() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } for i := 0; i < v.NumField(); i++ { vv := v.Field(i) if value.IsZero(vv) { continue // Elide fields with zero values } - s := opts.WithTypeMode(autoType).FormatValue(vv, m) - list = append(list, textRecord{Key: t.Field(i).Name, Value: s}) + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + sf := t.Field(i) + if supportExporters && !isExported(sf.Name) { + vv = retrieveUnexportedField(v, sf, true) + } + s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs) + list = append(list, textRecord{Key: sf.Name, Value: s}) } - return textWrap{"{", list, "}"} + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} case reflect.Slice: if v.IsNil() { return textNil } - if opts.PrintAddresses { - ptr = formatPointer(v) + + // Check whether this is a []byte of text data. + if t.Elem() == reflect.TypeOf(byte(0)) { + b := v.Bytes() + isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) && unicode.IsSpace(r) } + if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { + out = opts.formatString("", string(b)) + return opts.WithTypeMode(emitType).FormatType(t, out) + } } + fallthrough case reflect.Array: + maxLen := v.Len() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } var list textList for i := 0; i < v.Len(); i++ { - vi := v.Index(i) - if vi.CanAddr() { // Check for cyclic elements - p := vi.Addr() - if m.Visit(p) { - var out textNode - out = textLine(formatPointer(p)) - out = opts.WithTypeMode(emitType).FormatType(p.Type(), out) - out = textWrap{"*", out, ""} - list = append(list, textRecord{Value: out}) - continue - } + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break } - s := opts.WithTypeMode(elideType).FormatValue(vi, m) + s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs) list = append(list, textRecord{Value: s}) } - return textWrap{ptr + "{", list, "}"} + + out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + if t.Kind() == reflect.Slice && opts.PrintAddresses { + header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap()) + out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out} + } + return out case reflect.Map: if v.IsNil() { return textNil } - if m.Visit(v) { - return textLine(formatPointer(v)) + + // Check pointer for cycles. + ptrRef, visited := ptrs.Push(v) + if visited { + return makeLeafReference(ptrRef, opts.PrintAddresses) } + defer ptrs.Pop() + maxLen := v.Len() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } var list textList for _, k := range value.SortKeys(v.MapKeys()) { - sk := formatMapKey(k) - sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), m) + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + sk := formatMapKey(k, false, ptrs) + sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs) list = append(list, textRecord{Key: sk, Value: sv}) } - if opts.PrintAddresses { - ptr = formatPointer(v) - } - return textWrap{ptr + "{", list, "}"} + + out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) + return out case reflect.Ptr: if v.IsNil() { return textNil } - if m.Visit(v) || opts.ShallowPointers { - return textLine(formatPointer(v)) - } - if opts.PrintAddresses { - ptr = formatPointer(v) + + // Check pointer for cycles. + ptrRef, visited := ptrs.Push(v) + if visited { + out = makeLeafReference(ptrRef, opts.PrintAddresses) + return &textWrap{Prefix: "&", Value: out} } + defer ptrs.Pop() + skipType = true // Let the underlying value print the type instead - return textWrap{"&" + ptr, opts.FormatValue(v.Elem(), m), ""} + out = opts.FormatValue(v.Elem(), t.Kind(), ptrs) + out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) + out = &textWrap{Prefix: "&", Value: out} + return out case reflect.Interface: if v.IsNil() { return textNil @@ -197,19 +293,65 @@ func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out t // Interfaces accept different concrete types, // so configure the underlying value to explicitly print the type. skipType = true // Print the concrete type instead - return opts.WithTypeMode(emitType).FormatValue(v.Elem(), m) + return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs) default: panic(fmt.Sprintf("%v kind not handled", v.Kind())) } } +func (opts formatOptions) formatString(prefix, s string) textNode { + maxLen := len(s) + maxLines := strings.Count(s, "\n") + 1 + if opts.LimitVerbosity { + maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc... + maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... + } + + // For multiline strings, use the triple-quote syntax, + // but only use it when printing removed or inserted nodes since + // we only want the extra verbosity for those cases. + lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n") + isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+') + for i := 0; i < len(lines) && isTripleQuoted; i++ { + lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support + isPrintable := func(r rune) bool { + return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable + } + line := lines[i] + isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen + } + if isTripleQuoted { + var list textList + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true}) + for i, line := range lines { + if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 { + comment := commentString(fmt.Sprintf("%d elided lines", numElided)) + list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment}) + break + } + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true}) + } + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true}) + return &textWrap{Prefix: "(", Value: list, Suffix: ")"} + } + + // Format the string as a single-line quoted string. + if len(s) > maxLen+len(textEllipsis) { + return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis)) + } + return textLine(prefix + formatString(s)) +} + // formatMapKey formats v as if it were a map key. // The result is guaranteed to be a single line. -func formatMapKey(v reflect.Value) string { +func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string { var opts formatOptions + opts.DiffMode = diffIdentical opts.TypeMode = elideType - opts.ShallowPointers = true - s := opts.FormatValue(v, visitedPointers{}).String() + opts.PrintAddresses = disambiguate + opts.AvoidStringer = disambiguate + opts.QualifiedNames = disambiguate + s := opts.FormatValue(v, reflect.Map, ptrs).String() return strings.TrimSpace(s) } @@ -227,7 +369,7 @@ func formatString(s string) string { rawInvalid := func(r rune) bool { return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t') } - if strings.IndexFunc(s, rawInvalid) < 0 { + if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 { return "`" + s + "`" } return qs @@ -256,23 +398,3 @@ func formatHex(u uint64) string { } return fmt.Sprintf(f, u) } - -// formatPointer prints the address of the pointer. -func formatPointer(v reflect.Value) string { - p := v.Pointer() - if flags.Deterministic { - p = 0xdeadf00f // Only used for stable testing purposes - } - return fmt.Sprintf("⟪0x%x⟫", p) -} - -type visitedPointers map[value.Pointer]struct{} - -// Visit inserts pointer v into the visited map and reports whether it had -// already been visited before. -func (m visitedPointers) Visit(v reflect.Value) bool { - p := value.PointerOf(v) - _, visited := m[p] - m[p] = struct{}{} - return visited -} diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index eafcf2e4c..35315dad3 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -8,6 +8,7 @@ import ( "bytes" "fmt" "reflect" + "strconv" "strings" "unicode" "unicode/utf8" @@ -23,11 +24,25 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { return false // Must be formatting in diff mode case v.NumDiff == 0: return false // No differences detected - case v.NumIgnored+v.NumCompared+v.NumTransformed > 0: - // TODO: Handle the case where someone uses bytes.Equal on a large slice. - return false // Some custom option was used to determined equality case !v.ValueX.IsValid() || !v.ValueY.IsValid(): return false // Both values must be valid + case v.Type.Kind() == reflect.Slice && (v.ValueX.Len() == 0 || v.ValueY.Len() == 0): + return false // Both slice values have to be non-empty + case v.NumIgnored > 0: + return false // Some ignore option was used + case v.NumTransformed > 0: + return false // Some transform option was used + case v.NumCompared > 1: + return false // More than one comparison was used + case v.NumCompared == 1 && v.Type.Name() != "": + // The need for cmp to check applicability of options on every element + // in a slice is a significant performance detriment for large []byte. + // The workaround is to specify Comparer(bytes.Equal), + // which enables cmp to compare []byte more efficiently. + // If they differ, we still want to provide batched diffing. + // The logic disallows named types since they tend to have their own + // String method, with nicer formatting than what this provides. + return false } switch t := v.Type; t.Kind() { @@ -82,7 +97,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { } if isText || isBinary { var numLines, lastLineIdx, maxLineLen int - isBinary = false + isBinary = !utf8.ValidString(sx) || !utf8.ValidString(sy) for i, r := range sx + sy { if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError { isBinary = true @@ -97,7 +112,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { } } isText = !isBinary - isLinedText = isText && numLines >= 4 && maxLineLen <= 256 + isLinedText = isText && numLines >= 4 && maxLineLen <= 1024 } // Format the string into printable records. @@ -117,6 +132,83 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { }, ) delim = "\n" + + // If possible, use a custom triple-quote (""") syntax for printing + // differences in a string literal. This format is more readable, + // but has edge-cases where differences are visually indistinguishable. + // This format is avoided under the following conditions: + // • A line starts with `"""` + // • A line starts with "..." + // • A line contains non-printable characters + // • Adjacent different lines differ only by whitespace + // + // For example: + // """ + // ... // 3 identical lines + // foo + // bar + // - baz + // + BAZ + // """ + isTripleQuoted := true + prevRemoveLines := map[string]bool{} + prevInsertLines := map[string]bool{} + var list2 textList + list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) + for _, r := range list { + if !r.Value.Equal(textEllipsis) { + line, _ := strconv.Unquote(string(r.Value.(textLine))) + line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support + normLine := strings.Map(func(r rune) rune { + if unicode.IsSpace(r) { + return -1 // drop whitespace to avoid visually indistinguishable output + } + return r + }, line) + isPrintable := func(r rune) bool { + return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable + } + isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" + switch r.Diff { + case diffRemoved: + isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine] + prevRemoveLines[normLine] = true + case diffInserted: + isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine] + prevInsertLines[normLine] = true + } + if !isTripleQuoted { + break + } + r.Value = textLine(line) + r.ElideComma = true + } + if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group + prevRemoveLines = map[string]bool{} + prevInsertLines = map[string]bool{} + } + list2 = append(list2, r) + } + if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 { + list2 = list2[:len(list2)-1] // elide single empty line at the end + } + list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) + if isTripleQuoted { + var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"} + switch t.Kind() { + case reflect.String: + if t != reflect.TypeOf(string("")) { + out = opts.FormatType(t, out) + } + case reflect.Slice: + // Always emit type for slices since the triple-quote syntax + // looks like a string (not a slice). + opts = opts.WithTypeMode(emitType) + out = opts.FormatType(t, out) + } + return out + } + // If the text appears to be single-lined text, // then perform differencing in approximately fixed-sized chunks. // The output is printed as quoted strings. @@ -129,6 +221,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { }, ) delim = "" + // If the text appears to be binary data, // then perform differencing in approximately fixed-sized chunks. // The output is inspired by hexdump. @@ -145,6 +238,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { return textRecord{Diff: d, Value: textLine(s), Comment: comment} }, ) + // For all other slices of primitive types, // then perform differencing in approximately fixed-sized chunks. // The size of each chunk depends on the width of the element kind. @@ -172,7 +266,9 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { switch t.Elem().Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: ss = append(ss, fmt.Sprint(v.Index(i).Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + ss = append(ss, fmt.Sprint(v.Index(i).Uint())) + case reflect.Uint8, reflect.Uintptr: ss = append(ss, formatHex(v.Index(i).Uint())) case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: ss = append(ss, fmt.Sprint(v.Index(i).Interface())) @@ -185,7 +281,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { } // Wrap the output with appropriate type information. - var out textNode = textWrap{"{", list, "}"} + var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"} if !isText { // The "{...}" byte-sequence literal is not valid Go syntax for strings. // Emit the type for extra clarity (e.g. "string{...}"). @@ -196,12 +292,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { } switch t.Kind() { case reflect.String: - out = textWrap{"strings.Join(", out, fmt.Sprintf(", %q)", delim)} + out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} if t != reflect.TypeOf(string("")) { out = opts.FormatType(t, out) } case reflect.Slice: - out = textWrap{"bytes.Join(", out, fmt.Sprintf(", %q)", delim)} + out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} if t != reflect.TypeOf([]byte(nil)) { out = opts.FormatType(t, out) } @@ -242,9 +338,22 @@ func (opts formatOptions) formatDiffSlice( return n0 - v.Len() } + var numDiffs int + maxLen := -1 + if opts.LimitVerbosity { + maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... + opts.VerbosityLevel-- + } + groups := coalesceAdjacentEdits(name, es) groups = coalesceInterveningIdentical(groups, chunkSize/4) + maxGroup := diffStats{Name: name} for i, ds := range groups { + if maxLen >= 0 && numDiffs >= maxLen { + maxGroup = maxGroup.Append(ds) + continue + } + // Print equal. if ds.NumDiff() == 0 { // Compute the number of leading and trailing equal bytes to print. @@ -273,12 +382,18 @@ func (opts formatOptions) formatDiffSlice( } // Print unequal. + len0 := len(list) nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved) vx = vx.Slice(nx, vx.Len()) ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted) vy = vy.Slice(ny, vy.Len()) + numDiffs += len(list) - len0 + } + if maxGroup.IsZero() { + assert(vx.Len() == 0 && vy.Len() == 0) + } else { + list.AppendEllipsis(maxGroup) } - assert(vx.Len() == 0 && vy.Len() == 0) return list } diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go index 8b8fcab7b..8b12c05cd 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_text.go +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -10,12 +10,15 @@ import ( "math/rand" "strings" "time" + "unicode/utf8" "github.com/google/go-cmp/cmp/internal/flags" ) var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 +const maxColumnLength = 80 + type indentMode int func (n indentMode) appendIndent(b []byte, d diffMode) []byte { @@ -91,21 +94,22 @@ type textNode interface { // textWrap is a wrapper that concatenates a prefix and/or a suffix // to the underlying node. type textWrap struct { - Prefix string // e.g., "bytes.Buffer{" - Value textNode // textWrap | textList | textLine - Suffix string // e.g., "}" + Prefix string // e.g., "bytes.Buffer{" + Value textNode // textWrap | textList | textLine + Suffix string // e.g., "}" + Metadata interface{} // arbitrary metadata; has no effect on formatting } -func (s textWrap) Len() int { +func (s *textWrap) Len() int { return len(s.Prefix) + s.Value.Len() + len(s.Suffix) } -func (s1 textWrap) Equal(s2 textNode) bool { - if s2, ok := s2.(textWrap); ok { +func (s1 *textWrap) Equal(s2 textNode) bool { + if s2, ok := s2.(*textWrap); ok { return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix } return false } -func (s textWrap) String() string { +func (s *textWrap) String() string { var d diffMode var n indentMode _, s2 := s.formatCompactTo(nil, d) @@ -114,7 +118,7 @@ func (s textWrap) String() string { b = append(b, '\n') // Trailing newline return string(b) } -func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { +func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { n0 := len(b) // Original buffer length b = append(b, s.Prefix...) b, s.Value = s.Value.formatCompactTo(b, d) @@ -124,7 +128,7 @@ func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { } return b, s } -func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { +func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { b = append(b, s.Prefix...) b = s.Value.formatExpandedTo(b, d, n) b = append(b, s.Suffix...) @@ -136,22 +140,23 @@ func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { // of the textList.formatCompactTo method. type textList []textRecord type textRecord struct { - Diff diffMode // e.g., 0 or '-' or '+' - Key string // e.g., "MyField" - Value textNode // textWrap | textLine - Comment fmt.Stringer // e.g., "6 identical fields" + Diff diffMode // e.g., 0 or '-' or '+' + Key string // e.g., "MyField" + Value textNode // textWrap | textLine + ElideComma bool // avoid trailing comma + Comment fmt.Stringer // e.g., "6 identical fields" } // AppendEllipsis appends a new ellipsis node to the list if none already // exists at the end. If cs is non-zero it coalesces the statistics with the // previous diffStats. func (s *textList) AppendEllipsis(ds diffStats) { - hasStats := ds != diffStats{} + hasStats := !ds.IsZero() if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) { if hasStats { - *s = append(*s, textRecord{Value: textEllipsis, Comment: ds}) + *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds}) } else { - *s = append(*s, textRecord{Value: textEllipsis}) + *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true}) } return } @@ -191,7 +196,7 @@ func (s1 textList) Equal(s2 textNode) bool { } func (s textList) String() string { - return textWrap{"{", s, "}"}.String() + return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String() } func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { @@ -221,7 +226,7 @@ func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { } // Force multi-lined output when printing a removed/inserted node that // is sufficiently long. - if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > 80 { + if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength { multiLine = true } if !multiLine { @@ -236,16 +241,50 @@ func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { _, isLine := r.Value.(textLine) return r.Key == "" || !isLine }, - func(r textRecord) int { return len(r.Key) }, + func(r textRecord) int { return utf8.RuneCountInString(r.Key) }, ) alignValueLens := s.alignLens( func(r textRecord) bool { _, isLine := r.Value.(textLine) return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil }, - func(r textRecord) int { return len(r.Value.(textLine)) }, + func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) }, ) + // Format lists of simple lists in a batched form. + // If the list is sequence of only textLine values, + // then batch multiple values on a single line. + var isSimple bool + for _, r := range s { + _, isLine := r.Value.(textLine) + isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil + if !isSimple { + break + } + } + if isSimple { + n++ + var batch []byte + emitBatch := func() { + if len(batch) > 0 { + b = n.appendIndent(append(b, '\n'), d) + b = append(b, bytes.TrimRight(batch, " ")...) + batch = batch[:0] + } + } + for _, r := range s { + line := r.Value.(textLine) + if len(batch)+len(line)+len(", ") > maxColumnLength { + emitBatch() + } + batch = append(batch, line...) + batch = append(batch, ", "...) + } + emitBatch() + n-- + return n.appendIndent(append(b, '\n'), d) + } + // Format the list as a multi-lined output. n++ for i, r := range s { @@ -256,7 +295,7 @@ func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { b = alignKeyLens[i].appendChar(b, ' ') b = r.Value.formatExpandedTo(b, d|r.Diff, n) - if !r.Value.Equal(textEllipsis) { + if !r.ElideComma { b = append(b, ',') } b = alignValueLens[i].appendChar(b, ' ') @@ -332,6 +371,11 @@ type diffStats struct { NumModified int } +func (s diffStats) IsZero() bool { + s.Name = "" + return s == diffStats{} +} + func (s diffStats) NumDiff() int { return s.NumRemoved + s.NumInserted + s.NumModified } diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md index 9d92c11f1..f765a46f9 100644 --- a/vendor/github.com/google/uuid/README.md +++ b/vendor/github.com/google/uuid/README.md @@ -16,4 +16,4 @@ change is the ability to represent an invalid UUID (vs a NIL UUID). Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: -http://godoc.org/github.com/google/uuid +http://pkg.go.dev/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go index 7f9e0c6c0..14bd34072 100644 --- a/vendor/github.com/google/uuid/marshal.go +++ b/vendor/github.com/google/uuid/marshal.go @@ -16,10 +16,11 @@ func (uuid UUID) MarshalText() ([]byte, error) { // UnmarshalText implements encoding.TextUnmarshaler. func (uuid *UUID) UnmarshalText(data []byte) error { id, err := ParseBytes(data) - if err == nil { - *uuid = id + if err != nil { + return err } - return err + *uuid = id + return nil } // MarshalBinary implements encoding.BinaryMarshaler. diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go index 199a1ac65..463109629 100644 --- a/vendor/github.com/google/uuid/version1.go +++ b/vendor/github.com/google/uuid/version1.go @@ -17,12 +17,6 @@ import ( // // In most cases, New should be used. func NewUUID() (UUID, error) { - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nodeMu.Unlock() - var uuid UUID now, seq, err := GetTime() if err != nil { @@ -38,7 +32,13 @@ func NewUUID() (UUID, error) { binary.BigEndian.PutUint16(uuid[4:], timeMid) binary.BigEndian.PutUint16(uuid[6:], timeHi) binary.BigEndian.PutUint16(uuid[8:], seq) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() return uuid, nil } diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go index 84af91c9f..c110465db 100644 --- a/vendor/github.com/google/uuid/version4.go +++ b/vendor/github.com/google/uuid/version4.go @@ -27,8 +27,13 @@ func New() UUID { // equivalent to the odds of creating a few tens of trillions of UUIDs in a // year and having one duplicate. func NewRandom() (UUID, error) { + return NewRandomFromReader(rander) +} + +// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. +func NewRandomFromReader(r io.Reader) (UUID, error) { var uuid UUID - _, err := io.ReadFull(rander, uuid[:]) + _, err := io.ReadFull(r, uuid[:]) if err != nil { return Nil, err } diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore new file mode 100644 index 000000000..2233cff9d --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore @@ -0,0 +1,201 @@ +#vendor +vendor/ + +# Created by .ignore support plugin (hsz.mobi) +coverage.txt +### Go template +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +### Windows template +# Windows image file caches +Thumbs.db +ehthumbs.db + +# Folder config file +Desktop.ini + +# Recycle Bin used on file shares +$RECYCLE.BIN/ + +# Windows Installer files +*.cab +*.msi +*.msm +*.msp + +# Windows shortcuts +*.lnk +### Kate template +# Swap Files # +.*.kate-swp +.swp.* +### SublimeText template +# cache files for sublime text +*.tmlanguage.cache +*.tmPreferences.cache +*.stTheme.cache + +# workspace files are user-specific +*.sublime-workspace + +# project files should be checked into the repository, unless a significant +# proportion of contributors will probably not be using SublimeText +# *.sublime-project + +# sftp configuration file +sftp-config.json +### Linux template +*~ + +# temporary files which can be created if a process still has a handle open of a deleted file +.fuse_hidden* + +# KDE directory preferences +.directory + +# Linux trash folder which might appear on any partition or disk +.Trash-* +### JetBrains template +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff: +.idea +.idea/tasks.xml +.idea/dictionaries +.idea/vcs.xml +.idea/jsLibraryMappings.xml + +# Sensitive or high-churn files: +.idea/dataSources.ids +.idea/dataSources.xml +.idea/dataSources.local.xml +.idea/sqlDataSources.xml +.idea/dynamic.xml +.idea/uiDesigner.xml + +# Gradle: +.idea/gradle.xml +.idea/libraries + +# Mongo Explorer plugin: +.idea/mongoSettings.xml + +## File-based project format: +*.iws + +## Plugin-specific files: + +# IntelliJ +/out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties +### Xcode template +# Xcode +# +# gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore + +## Build generated +build/ +DerivedData/ + +## Various settings +*.pbxuser +!default.pbxuser +*.mode1v3 +!default.mode1v3 +*.mode2v3 +!default.mode2v3 +*.perspectivev3 +!default.perspectivev3 +xcuserdata/ + +## Other +*.moved-aside +*.xccheckout +*.xcscmblueprint +### Eclipse template + +.metadata +bin/ +tmp/ +*.tmp +*.bak +*.swp +*~.nib +local.properties +.settings/ +.loadpath +.recommenders + +# Eclipse Core +.project + +# External tool builders +.externalToolBuilders/ + +# Locally stored "Eclipse launch configurations" +*.launch + +# PyDev specific (Python IDE for Eclipse) +*.pydevproject + +# CDT-specific (C/C++ Development Tooling) +.cproject + +# JDT-specific (Eclipse Java Development Tools) +.classpath + +# Java annotation processor (APT) +.factorypath + +# PDT-specific (PHP Development Tools) +.buildpath + +# sbteclipse plugin +.target + +# Tern plugin +.tern-project + +# TeXlipse plugin +.texlipse + +# STS (Spring Tool Suite) +.springBeans + +# Code Recommenders +.recommenders/ + diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml new file mode 100644 index 000000000..2a845b96a --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml @@ -0,0 +1,25 @@ +sudo: false +language: go +# * github.com/grpc/grpc-go still supports go1.6 +# - When we drop support for go1.6 we can remove golang.org/x/net/context +# below as it is part of the Go std library since go1.7 +# * github.com/prometheus/client_golang already requires at least go1.7 since +# September 2017 +go: + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - master + +install: + - go get github.com/prometheus/client_golang/prometheus + - go get google.golang.org/grpc + - go get golang.org/x/net/context + - go get github.com/stretchr/testify +script: + - make test + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md new file mode 100644 index 000000000..19a8059e1 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md @@ -0,0 +1,24 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) +and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [1.2.0](https://github.com/grpc-ecosystem/go-grpc-prometheus/releases/tag/v1.2.0) - 2018-06-04 + +### Added + +* Provide metrics object as `prometheus.Collector`, for conventional metric registration. +* Support non-default/global Prometheus registry. +* Allow configuring counters with `prometheus.CounterOpts`. + +### Changed + +* Remove usage of deprecated `grpc.Code()`. +* Remove usage of deprecated `grpc.Errorf` and replace with `status.Errorf`. + +--- + +This changelog was started with version `v1.2.0`, for earlier versions refer to the respective [GitHub releases](https://github.com/grpc-ecosystem/go-grpc-prometheus/releases). diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE new file mode 100644 index 000000000..b2b065037 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md new file mode 100644 index 000000000..499c58355 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md @@ -0,0 +1,247 @@ +# Go gRPC Interceptors for Prometheus monitoring + +[![Travis Build](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus.svg)](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus) +[![Go Report Card](https://goreportcard.com/badge/github.com/grpc-ecosystem/go-grpc-prometheus)](http://goreportcard.com/report/grpc-ecosystem/go-grpc-prometheus) +[![GoDoc](http://img.shields.io/badge/GoDoc-Reference-blue.svg)](https://godoc.org/github.com/grpc-ecosystem/go-grpc-prometheus) +[![SourceGraph](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-prometheus/-/badge.svg)](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-prometheus/?badge) +[![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-prometheus/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-prometheus) +[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) + +[Prometheus](https://prometheus.io/) monitoring for your [gRPC Go](https://github.com/grpc/grpc-go) servers and clients. + +A sister implementation for [gRPC Java](https://github.com/grpc/grpc-java) (same metrics, same semantics) is in [grpc-ecosystem/java-grpc-prometheus](https://github.com/grpc-ecosystem/java-grpc-prometheus). + +## Interceptors + +[gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for Interceptors, i.e. middleware that is executed +by a gRPC Server before the request is passed onto the user's application logic. It is a perfect way to implement +common patterns: auth, logging and... monitoring. + +To use Interceptors in chains, please see [`go-grpc-middleware`](https://github.com/mwitkow/go-grpc-middleware). + +## Usage + +There are two types of interceptors: client-side and server-side. This package provides monitoring Interceptors for both. + +### Server-side + +```go +import "github.com/grpc-ecosystem/go-grpc-prometheus" +... + // Initialize your gRPC server's interceptor. + myServer := grpc.NewServer( + grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor), + grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor), + ) + // Register your gRPC service implementations. + myservice.RegisterMyServiceServer(s.server, &myServiceImpl{}) + // After all your registrations, make sure all of the Prometheus metrics are initialized. + grpc_prometheus.Register(myServer) + // Register Prometheus metrics handler. + http.Handle("/metrics", promhttp.Handler()) +... +``` + +### Client-side + +```go +import "github.com/grpc-ecosystem/go-grpc-prometheus" +... + clientConn, err = grpc.Dial( + address, + grpc.WithUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor), + grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor) + ) + client = pb_testproto.NewTestServiceClient(clientConn) + resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"}) +... +``` + +# Metrics + +## Labels + +All server-side metrics start with `grpc_server` as Prometheus subsystem name. All client-side metrics start with `grpc_client`. Both of them have mirror-concepts. Similarly all methods +contain the same rich labels: + + * `grpc_service` - the [gRPC service](http://www.grpc.io/docs/#defining-a-service) name, which is the combination of protobuf `package` and + the `grpc_service` section name. E.g. for `package = mwitkow.testproto` and + `service TestService` the label will be `grpc_service="mwitkow.testproto.TestService"` + * `grpc_method` - the name of the method called on the gRPC service. E.g. + `grpc_method="Ping"` + * `grpc_type` - the gRPC [type of request](http://www.grpc.io/docs/guides/concepts.html#rpc-life-cycle). + Differentiating between the two is important especially for latency measurements. + + - `unary` is single request, single response RPC + - `client_stream` is a multi-request, single response RPC + - `server_stream` is a single request, multi-response RPC + - `bidi_stream` is a multi-request, multi-response RPC + + +Additionally for completed RPCs, the following labels are used: + + * `grpc_code` - the human-readable [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go). + The list of all statuses is to long, but here are some common ones: + + - `OK` - means the RPC was successful + - `IllegalArgument` - RPC contained bad values + - `Internal` - server-side error not disclosed to the clients + +## Counters + +The counters and their up to date documentation is in [server_reporter.go](server_reporter.go) and [client_reporter.go](client_reporter.go) +the respective Prometheus handler (usually `/metrics`). + +For the purpose of this documentation we will only discuss `grpc_server` metrics. The `grpc_client` ones contain mirror concepts. + +For simplicity, let's assume we're tracking a single server-side RPC call of [`mwitkow.testproto.TestService`](examples/testproto/test.proto), +calling the method `PingList`. The call succeeds and returns 20 messages in the stream. + +First, immediately after the server receives the call it will increment the +`grpc_server_started_total` and start the handling time clock (if histograms are enabled). + +```jsoniq +grpc_server_started_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1 +``` + +Then the user logic gets invoked. It receives one message from the client containing the request +(it's a `server_stream`): + +```jsoniq +grpc_server_msg_received_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1 +``` + +The user logic may return an error, or send multiple messages back to the client. In this case, on +each of the 20 messages sent back, a counter will be incremented: + +```jsoniq +grpc_server_msg_sent_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 20 +``` + +After the call completes, its status (`OK` or other [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go)) +and the relevant call labels increment the `grpc_server_handled_total` counter. + +```jsoniq +grpc_server_handled_total{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1 +``` + +## Histograms + +[Prometheus histograms](https://prometheus.io/docs/concepts/metric_types/#histogram) are a great way +to measure latency distributions of your RPCs. However, since it is bad practice to have metrics +of [high cardinality](https://prometheus.io/docs/practices/instrumentation/#do-not-overuse-labels) +the latency monitoring metrics are disabled by default. To enable them please call the following +in your server initialization code: + +```jsoniq +grpc_prometheus.EnableHandlingTimeHistogram() +``` + +After the call completes, its handling time will be recorded in a [Prometheus histogram](https://prometheus.io/docs/concepts/metric_types/#histogram) +variable `grpc_server_handling_seconds`. The histogram variable contains three sub-metrics: + + * `grpc_server_handling_seconds_count` - the count of all completed RPCs by status and method + * `grpc_server_handling_seconds_sum` - cumulative time of RPCs by status and method, useful for + calculating average handling times + * `grpc_server_handling_seconds_bucket` - contains the counts of RPCs by status and method in respective + handling-time buckets. These buckets can be used by Prometheus to estimate SLAs (see [here](https://prometheus.io/docs/practices/histograms/)) + +The counter values will look as follows: + +```jsoniq +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.005"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.01"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.025"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.05"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.1"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.25"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.5"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="1"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="2.5"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="5"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="10"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="+Inf"} 1 +grpc_server_handling_seconds_sum{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 0.0003866430000000001 +grpc_server_handling_seconds_count{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1 +``` + + +## Useful query examples + +Prometheus philosophy is to provide raw metrics to the monitoring system, and +let the aggregations be handled there. The verbosity of above metrics make it possible to have that +flexibility. Here's a couple of useful monitoring queries: + + +### request inbound rate +```jsoniq +sum(rate(grpc_server_started_total{job="foo"}[1m])) by (grpc_service) +``` +For `job="foo"` (common label to differentiate between Prometheus monitoring targets), calculate the +rate of requests per second (1 minute window) for each gRPC `grpc_service` that the job has. Please note +how the `grpc_method` is being omitted here: all methods of a given gRPC service will be summed together. + +### unary request error rate +```jsoniq +sum(rate(grpc_server_handled_total{job="foo",grpc_type="unary",grpc_code!="OK"}[1m])) by (grpc_service) +``` +For `job="foo"`, calculate the per-`grpc_service` rate of `unary` (1:1) RPCs that failed, i.e. the +ones that didn't finish with `OK` code. + +### unary request error percentage +```jsoniq +sum(rate(grpc_server_handled_total{job="foo",grpc_type="unary",grpc_code!="OK"}[1m])) by (grpc_service) + / +sum(rate(grpc_server_started_total{job="foo",grpc_type="unary"}[1m])) by (grpc_service) + * 100.0 +``` +For `job="foo"`, calculate the percentage of failed requests by service. It's easy to notice that +this is a combination of the two above examples. This is an example of a query you would like to +[alert on](https://prometheus.io/docs/alerting/rules/) in your system for SLA violations, e.g. +"no more than 1% requests should fail". + +### average response stream size +```jsoniq +sum(rate(grpc_server_msg_sent_total{job="foo",grpc_type="server_stream"}[10m])) by (grpc_service) + / +sum(rate(grpc_server_started_total{job="foo",grpc_type="server_stream"}[10m])) by (grpc_service) +``` +For `job="foo"` what is the `grpc_service`-wide `10m` average of messages returned for all ` +server_stream` RPCs. This allows you to track the stream sizes returned by your system, e.g. allows +you to track when clients started to send "wide" queries that ret +Note the divisor is the number of started RPCs, in order to account for in-flight requests. + +### 99%-tile latency of unary requests +```jsoniq +histogram_quantile(0.99, + sum(rate(grpc_server_handling_seconds_bucket{job="foo",grpc_type="unary"}[5m])) by (grpc_service,le) +) +``` +For `job="foo"`, returns an 99%-tile [quantile estimation](https://prometheus.io/docs/practices/histograms/#quantiles) +of the handling time of RPCs per service. Please note the `5m` rate, this means that the quantile +estimation will take samples in a rolling `5m` window. When combined with other quantiles +(e.g. 50%, 90%), this query gives you tremendous insight into the responsiveness of your system +(e.g. impact of caching). + +### percentage of slow unary queries (>250ms) +```jsoniq +100.0 - ( +sum(rate(grpc_server_handling_seconds_bucket{job="foo",grpc_type="unary",le="0.25"}[5m])) by (grpc_service) + / +sum(rate(grpc_server_handling_seconds_count{job="foo",grpc_type="unary"}[5m])) by (grpc_service) +) * 100.0 +``` +For `job="foo"` calculate the by-`grpc_service` fraction of slow requests that took longer than `0.25` +seconds. This query is relatively complex, since the Prometheus aggregations use `le` (less or equal) +buckets, meaning that counting "fast" requests fractions is easier. However, simple maths helps. +This is an example of a query you would like to alert on in your system for SLA violations, +e.g. "less than 1% of requests are slower than 250ms". + + +## Status + +This code has been used since August 2015 as the basis for monitoring of *production* gRPC micro services at [Improbable](https://improbable.io). + +## License + +`go-grpc-prometheus` is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details. diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go new file mode 100644 index 000000000..751a4c72d --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go @@ -0,0 +1,39 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +// gRPC Prometheus monitoring interceptors for client-side gRPC. + +package grpc_prometheus + +import ( + prom "github.com/prometheus/client_golang/prometheus" +) + +var ( + // DefaultClientMetrics is the default instance of ClientMetrics. It is + // intended to be used in conjunction the default Prometheus metrics + // registry. + DefaultClientMetrics = NewClientMetrics() + + // UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs. + UnaryClientInterceptor = DefaultClientMetrics.UnaryClientInterceptor() + + // StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs. + StreamClientInterceptor = DefaultClientMetrics.StreamClientInterceptor() +) + +func init() { + prom.MustRegister(DefaultClientMetrics.clientStartedCounter) + prom.MustRegister(DefaultClientMetrics.clientHandledCounter) + prom.MustRegister(DefaultClientMetrics.clientStreamMsgReceived) + prom.MustRegister(DefaultClientMetrics.clientStreamMsgSent) +} + +// EnableClientHandlingTimeHistogram turns on recording of handling time of +// RPCs. Histogram metrics can be very expensive for Prometheus to retain and +// query. This function acts on the DefaultClientMetrics variable and the +// default Prometheus metrics registry. +func EnableClientHandlingTimeHistogram(opts ...HistogramOption) { + DefaultClientMetrics.EnableClientHandlingTimeHistogram(opts...) + prom.Register(DefaultClientMetrics.clientHandledHistogram) +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go new file mode 100644 index 000000000..9b476f983 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go @@ -0,0 +1,170 @@ +package grpc_prometheus + +import ( + "io" + + prom "github.com/prometheus/client_golang/prometheus" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ClientMetrics represents a collection of metrics to be registered on a +// Prometheus metrics registry for a gRPC client. +type ClientMetrics struct { + clientStartedCounter *prom.CounterVec + clientHandledCounter *prom.CounterVec + clientStreamMsgReceived *prom.CounterVec + clientStreamMsgSent *prom.CounterVec + clientHandledHistogramEnabled bool + clientHandledHistogramOpts prom.HistogramOpts + clientHandledHistogram *prom.HistogramVec +} + +// NewClientMetrics returns a ClientMetrics object. Use a new instance of +// ClientMetrics when not using the default Prometheus metrics registry, for +// example when wanting to control which metrics are added to a registry as +// opposed to automatically adding metrics via init functions. +func NewClientMetrics(counterOpts ...CounterOption) *ClientMetrics { + opts := counterOptions(counterOpts) + return &ClientMetrics{ + clientStartedCounter: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_client_started_total", + Help: "Total number of RPCs started on the client.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + + clientHandledCounter: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_client_handled_total", + Help: "Total number of RPCs completed by the client, regardless of success or failure.", + }), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}), + + clientStreamMsgReceived: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_client_msg_received_total", + Help: "Total number of RPC stream messages received by the client.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + + clientStreamMsgSent: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_client_msg_sent_total", + Help: "Total number of gRPC stream messages sent by the client.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + + clientHandledHistogramEnabled: false, + clientHandledHistogramOpts: prom.HistogramOpts{ + Name: "grpc_client_handling_seconds", + Help: "Histogram of response latency (seconds) of the gRPC until it is finished by the application.", + Buckets: prom.DefBuckets, + }, + clientHandledHistogram: nil, + } +} + +// Describe sends the super-set of all possible descriptors of metrics +// collected by this Collector to the provided channel and returns once +// the last descriptor has been sent. +func (m *ClientMetrics) Describe(ch chan<- *prom.Desc) { + m.clientStartedCounter.Describe(ch) + m.clientHandledCounter.Describe(ch) + m.clientStreamMsgReceived.Describe(ch) + m.clientStreamMsgSent.Describe(ch) + if m.clientHandledHistogramEnabled { + m.clientHandledHistogram.Describe(ch) + } +} + +// Collect is called by the Prometheus registry when collecting +// metrics. The implementation sends each collected metric via the +// provided channel and returns once the last metric has been sent. +func (m *ClientMetrics) Collect(ch chan<- prom.Metric) { + m.clientStartedCounter.Collect(ch) + m.clientHandledCounter.Collect(ch) + m.clientStreamMsgReceived.Collect(ch) + m.clientStreamMsgSent.Collect(ch) + if m.clientHandledHistogramEnabled { + m.clientHandledHistogram.Collect(ch) + } +} + +// EnableClientHandlingTimeHistogram turns on recording of handling time of RPCs. +// Histogram metrics can be very expensive for Prometheus to retain and query. +func (m *ClientMetrics) EnableClientHandlingTimeHistogram(opts ...HistogramOption) { + for _, o := range opts { + o(&m.clientHandledHistogramOpts) + } + if !m.clientHandledHistogramEnabled { + m.clientHandledHistogram = prom.NewHistogramVec( + m.clientHandledHistogramOpts, + []string{"grpc_type", "grpc_service", "grpc_method"}, + ) + } + m.clientHandledHistogramEnabled = true +} + +// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs. +func (m *ClientMetrics) UnaryClientInterceptor() func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + monitor := newClientReporter(m, Unary, method) + monitor.SentMessage() + err := invoker(ctx, method, req, reply, cc, opts...) + if err != nil { + monitor.ReceivedMessage() + } + st, _ := status.FromError(err) + monitor.Handled(st.Code()) + return err + } +} + +// StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs. +func (m *ClientMetrics) StreamClientInterceptor() func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + monitor := newClientReporter(m, clientStreamType(desc), method) + clientStream, err := streamer(ctx, desc, cc, method, opts...) + if err != nil { + st, _ := status.FromError(err) + monitor.Handled(st.Code()) + return nil, err + } + return &monitoredClientStream{clientStream, monitor}, nil + } +} + +func clientStreamType(desc *grpc.StreamDesc) grpcType { + if desc.ClientStreams && !desc.ServerStreams { + return ClientStream + } else if !desc.ClientStreams && desc.ServerStreams { + return ServerStream + } + return BidiStream +} + +// monitoredClientStream wraps grpc.ClientStream allowing each Sent/Recv of message to increment counters. +type monitoredClientStream struct { + grpc.ClientStream + monitor *clientReporter +} + +func (s *monitoredClientStream) SendMsg(m interface{}) error { + err := s.ClientStream.SendMsg(m) + if err == nil { + s.monitor.SentMessage() + } + return err +} + +func (s *monitoredClientStream) RecvMsg(m interface{}) error { + err := s.ClientStream.RecvMsg(m) + if err == nil { + s.monitor.ReceivedMessage() + } else if err == io.EOF { + s.monitor.Handled(codes.OK) + } else { + st, _ := status.FromError(err) + s.monitor.Handled(st.Code()) + } + return err +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go new file mode 100644 index 000000000..cbf153229 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go @@ -0,0 +1,46 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_prometheus + +import ( + "time" + + "google.golang.org/grpc/codes" +) + +type clientReporter struct { + metrics *ClientMetrics + rpcType grpcType + serviceName string + methodName string + startTime time.Time +} + +func newClientReporter(m *ClientMetrics, rpcType grpcType, fullMethod string) *clientReporter { + r := &clientReporter{ + metrics: m, + rpcType: rpcType, + } + if r.metrics.clientHandledHistogramEnabled { + r.startTime = time.Now() + } + r.serviceName, r.methodName = splitMethodName(fullMethod) + r.metrics.clientStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() + return r +} + +func (r *clientReporter) ReceivedMessage() { + r.metrics.clientStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() +} + +func (r *clientReporter) SentMessage() { + r.metrics.clientStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() +} + +func (r *clientReporter) Handled(code codes.Code) { + r.metrics.clientHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc() + if r.metrics.clientHandledHistogramEnabled { + r.metrics.clientHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds()) + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile new file mode 100644 index 000000000..74c084223 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile @@ -0,0 +1,16 @@ +SHELL="/bin/bash" + +GOFILES_NOVENDOR = $(shell go list ./... | grep -v /vendor/) + +all: vet fmt test + +fmt: + go fmt $(GOFILES_NOVENDOR) + +vet: + go vet $(GOFILES_NOVENDOR) + +test: vet + ./scripts/test_all.sh + +.PHONY: all vet test diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go new file mode 100644 index 000000000..9d51aec98 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go @@ -0,0 +1,41 @@ +package grpc_prometheus + +import ( + prom "github.com/prometheus/client_golang/prometheus" +) + +// A CounterOption lets you add options to Counter metrics using With* funcs. +type CounterOption func(*prom.CounterOpts) + +type counterOptions []CounterOption + +func (co counterOptions) apply(o prom.CounterOpts) prom.CounterOpts { + for _, f := range co { + f(&o) + } + return o +} + +// WithConstLabels allows you to add ConstLabels to Counter metrics. +func WithConstLabels(labels prom.Labels) CounterOption { + return func(o *prom.CounterOpts) { + o.ConstLabels = labels + } +} + +// A HistogramOption lets you add options to Histogram metrics using With* +// funcs. +type HistogramOption func(*prom.HistogramOpts) + +// WithHistogramBuckets allows you to specify custom bucket ranges for histograms if EnableHandlingTimeHistogram is on. +func WithHistogramBuckets(buckets []float64) HistogramOption { + return func(o *prom.HistogramOpts) { o.Buckets = buckets } +} + +// WithHistogramConstLabels allows you to add custom ConstLabels to +// histograms metrics. +func WithHistogramConstLabels(labels prom.Labels) HistogramOption { + return func(o *prom.HistogramOpts) { + o.ConstLabels = labels + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go new file mode 100644 index 000000000..322f99046 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go @@ -0,0 +1,48 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +// gRPC Prometheus monitoring interceptors for server-side gRPC. + +package grpc_prometheus + +import ( + prom "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc" +) + +var ( + // DefaultServerMetrics is the default instance of ServerMetrics. It is + // intended to be used in conjunction the default Prometheus metrics + // registry. + DefaultServerMetrics = NewServerMetrics() + + // UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs. + UnaryServerInterceptor = DefaultServerMetrics.UnaryServerInterceptor() + + // StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs. + StreamServerInterceptor = DefaultServerMetrics.StreamServerInterceptor() +) + +func init() { + prom.MustRegister(DefaultServerMetrics.serverStartedCounter) + prom.MustRegister(DefaultServerMetrics.serverHandledCounter) + prom.MustRegister(DefaultServerMetrics.serverStreamMsgReceived) + prom.MustRegister(DefaultServerMetrics.serverStreamMsgSent) +} + +// Register takes a gRPC server and pre-initializes all counters to 0. This +// allows for easier monitoring in Prometheus (no missing metrics), and should +// be called *after* all services have been registered with the server. This +// function acts on the DefaultServerMetrics variable. +func Register(server *grpc.Server) { + DefaultServerMetrics.InitializeMetrics(server) +} + +// EnableHandlingTimeHistogram turns on recording of handling time +// of RPCs. Histogram metrics can be very expensive for Prometheus +// to retain and query. This function acts on the DefaultServerMetrics +// variable and the default Prometheus metrics registry. +func EnableHandlingTimeHistogram(opts ...HistogramOption) { + DefaultServerMetrics.EnableHandlingTimeHistogram(opts...) + prom.Register(DefaultServerMetrics.serverHandledHistogram) +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go new file mode 100644 index 000000000..5b1467e7a --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go @@ -0,0 +1,185 @@ +package grpc_prometheus + +import ( + prom "github.com/prometheus/client_golang/prometheus" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/status" +) + +// ServerMetrics represents a collection of metrics to be registered on a +// Prometheus metrics registry for a gRPC server. +type ServerMetrics struct { + serverStartedCounter *prom.CounterVec + serverHandledCounter *prom.CounterVec + serverStreamMsgReceived *prom.CounterVec + serverStreamMsgSent *prom.CounterVec + serverHandledHistogramEnabled bool + serverHandledHistogramOpts prom.HistogramOpts + serverHandledHistogram *prom.HistogramVec +} + +// NewServerMetrics returns a ServerMetrics object. Use a new instance of +// ServerMetrics when not using the default Prometheus metrics registry, for +// example when wanting to control which metrics are added to a registry as +// opposed to automatically adding metrics via init functions. +func NewServerMetrics(counterOpts ...CounterOption) *ServerMetrics { + opts := counterOptions(counterOpts) + return &ServerMetrics{ + serverStartedCounter: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_server_started_total", + Help: "Total number of RPCs started on the server.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + serverHandledCounter: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_server_handled_total", + Help: "Total number of RPCs completed on the server, regardless of success or failure.", + }), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}), + serverStreamMsgReceived: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_server_msg_received_total", + Help: "Total number of RPC stream messages received on the server.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + serverStreamMsgSent: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_server_msg_sent_total", + Help: "Total number of gRPC stream messages sent by the server.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + serverHandledHistogramEnabled: false, + serverHandledHistogramOpts: prom.HistogramOpts{ + Name: "grpc_server_handling_seconds", + Help: "Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.", + Buckets: prom.DefBuckets, + }, + serverHandledHistogram: nil, + } +} + +// EnableHandlingTimeHistogram enables histograms being registered when +// registering the ServerMetrics on a Prometheus registry. Histograms can be +// expensive on Prometheus servers. It takes options to configure histogram +// options such as the defined buckets. +func (m *ServerMetrics) EnableHandlingTimeHistogram(opts ...HistogramOption) { + for _, o := range opts { + o(&m.serverHandledHistogramOpts) + } + if !m.serverHandledHistogramEnabled { + m.serverHandledHistogram = prom.NewHistogramVec( + m.serverHandledHistogramOpts, + []string{"grpc_type", "grpc_service", "grpc_method"}, + ) + } + m.serverHandledHistogramEnabled = true +} + +// Describe sends the super-set of all possible descriptors of metrics +// collected by this Collector to the provided channel and returns once +// the last descriptor has been sent. +func (m *ServerMetrics) Describe(ch chan<- *prom.Desc) { + m.serverStartedCounter.Describe(ch) + m.serverHandledCounter.Describe(ch) + m.serverStreamMsgReceived.Describe(ch) + m.serverStreamMsgSent.Describe(ch) + if m.serverHandledHistogramEnabled { + m.serverHandledHistogram.Describe(ch) + } +} + +// Collect is called by the Prometheus registry when collecting +// metrics. The implementation sends each collected metric via the +// provided channel and returns once the last metric has been sent. +func (m *ServerMetrics) Collect(ch chan<- prom.Metric) { + m.serverStartedCounter.Collect(ch) + m.serverHandledCounter.Collect(ch) + m.serverStreamMsgReceived.Collect(ch) + m.serverStreamMsgSent.Collect(ch) + if m.serverHandledHistogramEnabled { + m.serverHandledHistogram.Collect(ch) + } +} + +// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs. +func (m *ServerMetrics) UnaryServerInterceptor() func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + monitor := newServerReporter(m, Unary, info.FullMethod) + monitor.ReceivedMessage() + resp, err := handler(ctx, req) + st, _ := status.FromError(err) + monitor.Handled(st.Code()) + if err == nil { + monitor.SentMessage() + } + return resp, err + } +} + +// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs. +func (m *ServerMetrics) StreamServerInterceptor() func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + monitor := newServerReporter(m, streamRPCType(info), info.FullMethod) + err := handler(srv, &monitoredServerStream{ss, monitor}) + st, _ := status.FromError(err) + monitor.Handled(st.Code()) + return err + } +} + +// InitializeMetrics initializes all metrics, with their appropriate null +// value, for all gRPC methods registered on a gRPC server. This is useful, to +// ensure that all metrics exist when collecting and querying. +func (m *ServerMetrics) InitializeMetrics(server *grpc.Server) { + serviceInfo := server.GetServiceInfo() + for serviceName, info := range serviceInfo { + for _, mInfo := range info.Methods { + preRegisterMethod(m, serviceName, &mInfo) + } + } +} + +func streamRPCType(info *grpc.StreamServerInfo) grpcType { + if info.IsClientStream && !info.IsServerStream { + return ClientStream + } else if !info.IsClientStream && info.IsServerStream { + return ServerStream + } + return BidiStream +} + +// monitoredStream wraps grpc.ServerStream allowing each Sent/Recv of message to increment counters. +type monitoredServerStream struct { + grpc.ServerStream + monitor *serverReporter +} + +func (s *monitoredServerStream) SendMsg(m interface{}) error { + err := s.ServerStream.SendMsg(m) + if err == nil { + s.monitor.SentMessage() + } + return err +} + +func (s *monitoredServerStream) RecvMsg(m interface{}) error { + err := s.ServerStream.RecvMsg(m) + if err == nil { + s.monitor.ReceivedMessage() + } + return err +} + +// preRegisterMethod is invoked on Register of a Server, allowing all gRPC services labels to be pre-populated. +func preRegisterMethod(metrics *ServerMetrics, serviceName string, mInfo *grpc.MethodInfo) { + methodName := mInfo.Name + methodType := string(typeFromMethodInfo(mInfo)) + // These are just references (no increments), as just referencing will create the labels but not set values. + metrics.serverStartedCounter.GetMetricWithLabelValues(methodType, serviceName, methodName) + metrics.serverStreamMsgReceived.GetMetricWithLabelValues(methodType, serviceName, methodName) + metrics.serverStreamMsgSent.GetMetricWithLabelValues(methodType, serviceName, methodName) + if metrics.serverHandledHistogramEnabled { + metrics.serverHandledHistogram.GetMetricWithLabelValues(methodType, serviceName, methodName) + } + for _, code := range allCodes { + metrics.serverHandledCounter.GetMetricWithLabelValues(methodType, serviceName, methodName, code.String()) + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go new file mode 100644 index 000000000..aa9db5401 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go @@ -0,0 +1,46 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_prometheus + +import ( + "time" + + "google.golang.org/grpc/codes" +) + +type serverReporter struct { + metrics *ServerMetrics + rpcType grpcType + serviceName string + methodName string + startTime time.Time +} + +func newServerReporter(m *ServerMetrics, rpcType grpcType, fullMethod string) *serverReporter { + r := &serverReporter{ + metrics: m, + rpcType: rpcType, + } + if r.metrics.serverHandledHistogramEnabled { + r.startTime = time.Now() + } + r.serviceName, r.methodName = splitMethodName(fullMethod) + r.metrics.serverStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() + return r +} + +func (r *serverReporter) ReceivedMessage() { + r.metrics.serverStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() +} + +func (r *serverReporter) SentMessage() { + r.metrics.serverStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() +} + +func (r *serverReporter) Handled(code codes.Code) { + r.metrics.serverHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc() + if r.metrics.serverHandledHistogramEnabled { + r.metrics.serverHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds()) + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go new file mode 100644 index 000000000..7987de35f --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go @@ -0,0 +1,50 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_prometheus + +import ( + "strings" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +type grpcType string + +const ( + Unary grpcType = "unary" + ClientStream grpcType = "client_stream" + ServerStream grpcType = "server_stream" + BidiStream grpcType = "bidi_stream" +) + +var ( + allCodes = []codes.Code{ + codes.OK, codes.Canceled, codes.Unknown, codes.InvalidArgument, codes.DeadlineExceeded, codes.NotFound, + codes.AlreadyExists, codes.PermissionDenied, codes.Unauthenticated, codes.ResourceExhausted, + codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.Unimplemented, codes.Internal, + codes.Unavailable, codes.DataLoss, + } +) + +func splitMethodName(fullMethodName string) (string, string) { + fullMethodName = strings.TrimPrefix(fullMethodName, "/") // remove leading slash + if i := strings.Index(fullMethodName, "/"); i >= 0 { + return fullMethodName[:i], fullMethodName[i+1:] + } + return "unknown", "unknown" +} + +func typeFromMethodInfo(mInfo *grpc.MethodInfo) grpcType { + if !mInfo.IsClientStream && !mInfo.IsServerStream { + return Unary + } + if mInfo.IsClientStream && !mInfo.IsServerStream { + return ClientStream + } + if !mInfo.IsClientStream && mInfo.IsServerStream { + return ServerStream + } + return BidiStream +} diff --git a/vendor/github.com/munnerz/goautoneg/LICENSE b/vendor/github.com/munnerz/goautoneg/LICENSE new file mode 100644 index 000000000..bbc7b897c --- /dev/null +++ b/vendor/github.com/munnerz/goautoneg/LICENSE @@ -0,0 +1,31 @@ +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/munnerz/goautoneg/Makefile b/vendor/github.com/munnerz/goautoneg/Makefile new file mode 100644 index 000000000..e33ee1730 --- /dev/null +++ b/vendor/github.com/munnerz/goautoneg/Makefile @@ -0,0 +1,13 @@ +include $(GOROOT)/src/Make.inc + +TARG=bitbucket.org/ww/goautoneg +GOFILES=autoneg.go + +include $(GOROOT)/src/Make.pkg + +format: + gofmt -w *.go + +docs: + gomake clean + godoc ${TARG} > README.txt diff --git a/vendor/github.com/munnerz/goautoneg/README.txt b/vendor/github.com/munnerz/goautoneg/README.txt new file mode 100644 index 000000000..7723656d5 --- /dev/null +++ b/vendor/github.com/munnerz/goautoneg/README.txt @@ -0,0 +1,67 @@ +PACKAGE + +package goautoneg +import "bitbucket.org/ww/goautoneg" + +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +FUNCTIONS + +func Negotiate(header string, alternatives []string) (content_type string) +Negotiate the most appropriate content_type given the accept header +and a list of alternatives. + +func ParseAccept(header string) (accept []Accept) +Parse an Accept Header string returning a sorted list +of clauses + + +TYPES + +type Accept struct { + Type, SubType string + Q float32 + Params map[string]string +} +Structure to represent a clause in an HTTP Accept Header + + +SUBDIRECTORIES + + .hg diff --git a/vendor/github.com/munnerz/goautoneg/autoneg.go b/vendor/github.com/munnerz/goautoneg/autoneg.go new file mode 100644 index 000000000..1dd1cad64 --- /dev/null +++ b/vendor/github.com/munnerz/goautoneg/autoneg.go @@ -0,0 +1,189 @@ +/* +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +package goautoneg + +import ( + "sort" + "strconv" + "strings" +) + +// Structure to represent a clause in an HTTP Accept Header +type Accept struct { + Type, SubType string + Q float64 + Params map[string]string +} + +// acceptSlice is defined to implement sort interface. +type acceptSlice []Accept + +func (slice acceptSlice) Len() int { + return len(slice) +} + +func (slice acceptSlice) Less(i, j int) bool { + ai, aj := slice[i], slice[j] + if ai.Q > aj.Q { + return true + } + if ai.Type != "*" && aj.Type == "*" { + return true + } + if ai.SubType != "*" && aj.SubType == "*" { + return true + } + return false +} + +func (slice acceptSlice) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +func stringTrimSpaceCutset(r rune) bool { + return r == ' ' +} + +func nextSplitElement(s, sep string) (item string, remaining string) { + if index := strings.Index(s, sep); index != -1 { + return s[:index], s[index+1:] + } + return s, "" +} + +// Parse an Accept Header string returning a sorted list +// of clauses +func ParseAccept(header string) acceptSlice { + partsCount := 0 + remaining := header + for len(remaining) > 0 { + partsCount++ + _, remaining = nextSplitElement(remaining, ",") + } + accept := make(acceptSlice, 0, partsCount) + + remaining = header + var part string + for len(remaining) > 0 { + part, remaining = nextSplitElement(remaining, ",") + part = strings.TrimFunc(part, stringTrimSpaceCutset) + + a := Accept{ + Q: 1.0, + } + + sp, remainingPart := nextSplitElement(part, ";") + + sp0, spRemaining := nextSplitElement(sp, "/") + a.Type = strings.TrimFunc(sp0, stringTrimSpaceCutset) + + switch { + case len(spRemaining) == 0: + if a.Type == "*" { + a.SubType = "*" + } else { + continue + } + default: + var sp1 string + sp1, spRemaining = nextSplitElement(spRemaining, "/") + if len(spRemaining) > 0 { + continue + } + a.SubType = strings.TrimFunc(sp1, stringTrimSpaceCutset) + } + + if len(remainingPart) == 0 { + accept = append(accept, a) + continue + } + + a.Params = make(map[string]string) + for len(remainingPart) > 0 { + sp, remainingPart = nextSplitElement(remainingPart, ";") + sp0, spRemaining = nextSplitElement(sp, "=") + if len(spRemaining) == 0 { + continue + } + var sp1 string + sp1, spRemaining = nextSplitElement(spRemaining, "=") + if len(spRemaining) != 0 { + continue + } + token := strings.TrimFunc(sp0, stringTrimSpaceCutset) + if token == "q" { + a.Q, _ = strconv.ParseFloat(sp1, 32) + } else { + a.Params[token] = strings.TrimFunc(sp1, stringTrimSpaceCutset) + } + } + + accept = append(accept, a) + } + + sort.Sort(accept) + return accept +} + +// Negotiate the most appropriate content_type given the accept header +// and a list of alternatives. +func Negotiate(header string, alternatives []string) (content_type string) { + asp := make([][]string, 0, len(alternatives)) + for _, ctype := range alternatives { + asp = append(asp, strings.SplitN(ctype, "/", 2)) + } + for _, clause := range ParseAccept(header) { + for i, ctsp := range asp { + if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { + content_type = alternatives[i] + return + } + if clause.Type == ctsp[0] && clause.SubType == "*" { + content_type = alternatives[i] + return + } + if clause.Type == "*" && clause.SubType == "*" { + content_type = alternatives[i] + return + } + } + } + return +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go new file mode 100644 index 000000000..7681877a8 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go @@ -0,0 +1,46 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "fmt" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil/promlint" +) + +// CollectAndLint registers the provided Collector with a newly created pedantic +// Registry. It then calls GatherAndLint with that Registry and with the +// provided metricNames. +func CollectAndLint(c prometheus.Collector, metricNames ...string) ([]promlint.Problem, error) { + reg := prometheus.NewPedanticRegistry() + if err := reg.Register(c); err != nil { + return nil, fmt.Errorf("registering collector failed: %s", err) + } + return GatherAndLint(reg, metricNames...) +} + +// GatherAndLint gathers all metrics from the provided Gatherer and checks them +// with the linter in the promlint package. If any metricNames are provided, +// only metrics with those names are checked. +func GatherAndLint(g prometheus.Gatherer, metricNames ...string) ([]promlint.Problem, error) { + got, err := g.Gather() + if err != nil { + return nil, fmt.Errorf("gathering metrics failed: %s", err) + } + if metricNames != nil { + got = filterMetrics(got, metricNames) + } + return promlint.NewWithMetricFamilies(got).Lint() +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go new file mode 100644 index 000000000..ec8061706 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go @@ -0,0 +1,386 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package promlint provides a linter for Prometheus metrics. +package promlint + +import ( + "fmt" + "io" + "regexp" + "sort" + "strings" + + "github.com/prometheus/common/expfmt" + + dto "github.com/prometheus/client_model/go" +) + +// A Linter is a Prometheus metrics linter. It identifies issues with metric +// names, types, and metadata, and reports them to the caller. +type Linter struct { + // The linter will read metrics in the Prometheus text format from r and + // then lint it, _and_ it will lint the metrics provided directly as + // MetricFamily proto messages in mfs. Note, however, that the current + // constructor functions New and NewWithMetricFamilies only ever set one + // of them. + r io.Reader + mfs []*dto.MetricFamily +} + +// A Problem is an issue detected by a Linter. +type Problem struct { + // The name of the metric indicated by this Problem. + Metric string + + // A description of the issue for this Problem. + Text string +} + +// newProblem is helper function to create a Problem. +func newProblem(mf *dto.MetricFamily, text string) Problem { + return Problem{ + Metric: mf.GetName(), + Text: text, + } +} + +// New creates a new Linter that reads an input stream of Prometheus metrics in +// the Prometheus text exposition format. +func New(r io.Reader) *Linter { + return &Linter{ + r: r, + } +} + +// NewWithMetricFamilies creates a new Linter that reads from a slice of +// MetricFamily protobuf messages. +func NewWithMetricFamilies(mfs []*dto.MetricFamily) *Linter { + return &Linter{ + mfs: mfs, + } +} + +// Lint performs a linting pass, returning a slice of Problems indicating any +// issues found in the metrics stream. The slice is sorted by metric name +// and issue description. +func (l *Linter) Lint() ([]Problem, error) { + var problems []Problem + + if l.r != nil { + d := expfmt.NewDecoder(l.r, expfmt.FmtText) + + mf := &dto.MetricFamily{} + for { + if err := d.Decode(mf); err != nil { + if err == io.EOF { + break + } + + return nil, err + } + + problems = append(problems, lint(mf)...) + } + } + for _, mf := range l.mfs { + problems = append(problems, lint(mf)...) + } + + // Ensure deterministic output. + sort.SliceStable(problems, func(i, j int) bool { + if problems[i].Metric == problems[j].Metric { + return problems[i].Text < problems[j].Text + } + return problems[i].Metric < problems[j].Metric + }) + + return problems, nil +} + +// lint is the entry point for linting a single metric. +func lint(mf *dto.MetricFamily) []Problem { + fns := []func(mf *dto.MetricFamily) []Problem{ + lintHelp, + lintMetricUnits, + lintCounter, + lintHistogramSummaryReserved, + lintMetricTypeInName, + lintReservedChars, + lintCamelCase, + lintUnitAbbreviations, + } + + var problems []Problem + for _, fn := range fns { + problems = append(problems, fn(mf)...) + } + + // TODO(mdlayher): lint rules for specific metrics types. + return problems +} + +// lintHelp detects issues related to the help text for a metric. +func lintHelp(mf *dto.MetricFamily) []Problem { + var problems []Problem + + // Expect all metrics to have help text available. + if mf.Help == nil { + problems = append(problems, newProblem(mf, "no help text")) + } + + return problems +} + +// lintMetricUnits detects issues with metric unit names. +func lintMetricUnits(mf *dto.MetricFamily) []Problem { + var problems []Problem + + unit, base, ok := metricUnits(*mf.Name) + if !ok { + // No known units detected. + return nil + } + + // Unit is already a base unit. + if unit == base { + return nil + } + + problems = append(problems, newProblem(mf, fmt.Sprintf("use base unit %q instead of %q", base, unit))) + + return problems +} + +// lintCounter detects issues specific to counters, as well as patterns that should +// only be used with counters. +func lintCounter(mf *dto.MetricFamily) []Problem { + var problems []Problem + + isCounter := mf.GetType() == dto.MetricType_COUNTER + isUntyped := mf.GetType() == dto.MetricType_UNTYPED + hasTotalSuffix := strings.HasSuffix(mf.GetName(), "_total") + + switch { + case isCounter && !hasTotalSuffix: + problems = append(problems, newProblem(mf, `counter metrics should have "_total" suffix`)) + case !isUntyped && !isCounter && hasTotalSuffix: + problems = append(problems, newProblem(mf, `non-counter metrics should not have "_total" suffix`)) + } + + return problems +} + +// lintHistogramSummaryReserved detects when other types of metrics use names or labels +// reserved for use by histograms and/or summaries. +func lintHistogramSummaryReserved(mf *dto.MetricFamily) []Problem { + // These rules do not apply to untyped metrics. + t := mf.GetType() + if t == dto.MetricType_UNTYPED { + return nil + } + + var problems []Problem + + isHistogram := t == dto.MetricType_HISTOGRAM + isSummary := t == dto.MetricType_SUMMARY + + n := mf.GetName() + + if !isHistogram && strings.HasSuffix(n, "_bucket") { + problems = append(problems, newProblem(mf, `non-histogram metrics should not have "_bucket" suffix`)) + } + if !isHistogram && !isSummary && strings.HasSuffix(n, "_count") { + problems = append(problems, newProblem(mf, `non-histogram and non-summary metrics should not have "_count" suffix`)) + } + if !isHistogram && !isSummary && strings.HasSuffix(n, "_sum") { + problems = append(problems, newProblem(mf, `non-histogram and non-summary metrics should not have "_sum" suffix`)) + } + + for _, m := range mf.GetMetric() { + for _, l := range m.GetLabel() { + ln := l.GetName() + + if !isHistogram && ln == "le" { + problems = append(problems, newProblem(mf, `non-histogram metrics should not have "le" label`)) + } + if !isSummary && ln == "quantile" { + problems = append(problems, newProblem(mf, `non-summary metrics should not have "quantile" label`)) + } + } + } + + return problems +} + +// lintMetricTypeInName detects when metric types are included in the metric name. +func lintMetricTypeInName(mf *dto.MetricFamily) []Problem { + var problems []Problem + n := strings.ToLower(mf.GetName()) + + for i, t := range dto.MetricType_name { + if i == int32(dto.MetricType_UNTYPED) { + continue + } + + typename := strings.ToLower(t) + if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { + problems = append(problems, newProblem(mf, fmt.Sprintf(`metric name should not include type '%s'`, typename))) + } + } + return problems +} + +// lintReservedChars detects colons in metric names. +func lintReservedChars(mf *dto.MetricFamily) []Problem { + var problems []Problem + if strings.Contains(mf.GetName(), ":") { + problems = append(problems, newProblem(mf, "metric names should not contain ':'")) + } + return problems +} + +var camelCase = regexp.MustCompile(`[a-z][A-Z]`) + +// lintCamelCase detects metric names and label names written in camelCase. +func lintCamelCase(mf *dto.MetricFamily) []Problem { + var problems []Problem + if camelCase.FindString(mf.GetName()) != "" { + problems = append(problems, newProblem(mf, "metric names should be written in 'snake_case' not 'camelCase'")) + } + + for _, m := range mf.GetMetric() { + for _, l := range m.GetLabel() { + if camelCase.FindString(l.GetName()) != "" { + problems = append(problems, newProblem(mf, "label names should be written in 'snake_case' not 'camelCase'")) + } + } + } + return problems +} + +// lintUnitAbbreviations detects abbreviated units in the metric name. +func lintUnitAbbreviations(mf *dto.MetricFamily) []Problem { + var problems []Problem + n := strings.ToLower(mf.GetName()) + for _, s := range unitAbbreviations { + if strings.Contains(n, "_"+s+"_") || strings.HasSuffix(n, "_"+s) { + problems = append(problems, newProblem(mf, "metric names should not contain abbreviated units")) + } + } + return problems +} + +// metricUnits attempts to detect known unit types used as part of a metric name, +// e.g. "foo_bytes_total" or "bar_baz_milligrams". +func metricUnits(m string) (unit string, base string, ok bool) { + ss := strings.Split(m, "_") + + for unit, base := range units { + // Also check for "no prefix". + for _, p := range append(unitPrefixes, "") { + for _, s := range ss { + // Attempt to explicitly match a known unit with a known prefix, + // as some words may look like "units" when matching suffix. + // + // As an example, "thermometers" should not match "meters", but + // "kilometers" should. + if s == p+unit { + return p + unit, base, true + } + } + } + } + + return "", "", false +} + +// Units and their possible prefixes recognized by this library. More can be +// added over time as needed. +var ( + // map a unit to the appropriate base unit. + units = map[string]string{ + // Base units. + "amperes": "amperes", + "bytes": "bytes", + "celsius": "celsius", // Also allow Celsius because it is common in typical Prometheus use cases. + "grams": "grams", + "joules": "joules", + "kelvin": "kelvin", // SI base unit, used in special cases (e.g. color temperature, scientific measurements). + "meters": "meters", // Both American and international spelling permitted. + "metres": "metres", + "seconds": "seconds", + "volts": "volts", + + // Non base units. + // Time. + "minutes": "seconds", + "hours": "seconds", + "days": "seconds", + "weeks": "seconds", + // Temperature. + "kelvins": "kelvin", + "fahrenheit": "celsius", + "rankine": "celsius", + // Length. + "inches": "meters", + "yards": "meters", + "miles": "meters", + // Bytes. + "bits": "bytes", + // Energy. + "calories": "joules", + // Mass. + "pounds": "grams", + "ounces": "grams", + } + + unitPrefixes = []string{ + "pico", + "nano", + "micro", + "milli", + "centi", + "deci", + "deca", + "hecto", + "kilo", + "kibi", + "mega", + "mibi", + "giga", + "gibi", + "tera", + "tebi", + "peta", + "pebi", + } + + // Common abbreviations that we'd like to discourage. + unitAbbreviations = []string{ + "s", + "ms", + "us", + "ns", + "sec", + "b", + "kb", + "mb", + "gb", + "tb", + "pb", + "m", + "h", + "d", + } +) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go new file mode 100644 index 000000000..9af60ce1d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go @@ -0,0 +1,230 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package testutil provides helpers to test code using the prometheus package +// of client_golang. +// +// While writing unit tests to verify correct instrumentation of your code, it's +// a common mistake to mostly test the instrumentation library instead of your +// own code. Rather than verifying that a prometheus.Counter's value has changed +// as expected or that it shows up in the exposition after registration, it is +// in general more robust and more faithful to the concept of unit tests to use +// mock implementations of the prometheus.Counter and prometheus.Registerer +// interfaces that simply assert that the Add or Register methods have been +// called with the expected arguments. However, this might be overkill in simple +// scenarios. The ToFloat64 function is provided for simple inspection of a +// single-value metric, but it has to be used with caution. +// +// End-to-end tests to verify all or larger parts of the metrics exposition can +// be implemented with the CollectAndCompare or GatherAndCompare functions. The +// most appropriate use is not so much testing instrumentation of your code, but +// testing custom prometheus.Collector implementations and in particular whole +// exporters, i.e. programs that retrieve telemetry data from a 3rd party source +// and convert it into Prometheus metrics. +// +// In a similar pattern, CollectAndLint and GatherAndLint can be used to detect +// metrics that have issues with their name, type, or metadata without being +// necessarily invalid, e.g. a counter with a name missing the “_total” suffix. +package testutil + +import ( + "bytes" + "fmt" + "io" + + "github.com/prometheus/common/expfmt" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/internal" +) + +// ToFloat64 collects all Metrics from the provided Collector. It expects that +// this results in exactly one Metric being collected, which must be a Gauge, +// Counter, or Untyped. In all other cases, ToFloat64 panics. ToFloat64 returns +// the value of the collected Metric. +// +// The Collector provided is typically a simple instance of Gauge or Counter, or +// – less commonly – a GaugeVec or CounterVec with exactly one element. But any +// Collector fulfilling the prerequisites described above will do. +// +// Use this function with caution. It is computationally very expensive and thus +// not suited at all to read values from Metrics in regular code. This is really +// only for testing purposes, and even for testing, other approaches are often +// more appropriate (see this package's documentation). +// +// A clear anti-pattern would be to use a metric type from the prometheus +// package to track values that are also needed for something else than the +// exposition of Prometheus metrics. For example, you would like to track the +// number of items in a queue because your code should reject queuing further +// items if a certain limit is reached. It is tempting to track the number of +// items in a prometheus.Gauge, as it is then easily available as a metric for +// exposition, too. However, then you would need to call ToFloat64 in your +// regular code, potentially quite often. The recommended way is to track the +// number of items conventionally (in the way you would have done it without +// considering Prometheus metrics) and then expose the number with a +// prometheus.GaugeFunc. +func ToFloat64(c prometheus.Collector) float64 { + var ( + m prometheus.Metric + mCount int + mChan = make(chan prometheus.Metric) + done = make(chan struct{}) + ) + + go func() { + for m = range mChan { + mCount++ + } + close(done) + }() + + c.Collect(mChan) + close(mChan) + <-done + + if mCount != 1 { + panic(fmt.Errorf("collected %d metrics instead of exactly 1", mCount)) + } + + pb := &dto.Metric{} + m.Write(pb) + if pb.Gauge != nil { + return pb.Gauge.GetValue() + } + if pb.Counter != nil { + return pb.Counter.GetValue() + } + if pb.Untyped != nil { + return pb.Untyped.GetValue() + } + panic(fmt.Errorf("collected a non-gauge/counter/untyped metric: %s", pb)) +} + +// CollectAndCount registers the provided Collector with a newly created +// pedantic Registry. It then calls GatherAndCount with that Registry and with +// the provided metricNames. In the unlikely case that the registration or the +// gathering fails, this function panics. (This is inconsistent with the other +// CollectAnd… functions in this package and has historical reasons. Changing +// the function signature would be a breaking change and will therefore only +// happen with the next major version bump.) +func CollectAndCount(c prometheus.Collector, metricNames ...string) int { + reg := prometheus.NewPedanticRegistry() + if err := reg.Register(c); err != nil { + panic(fmt.Errorf("registering collector failed: %s", err)) + } + result, err := GatherAndCount(reg, metricNames...) + if err != nil { + panic(err) + } + return result +} + +// GatherAndCount gathers all metrics from the provided Gatherer and counts +// them. It returns the number of metric children in all gathered metric +// families together. If any metricNames are provided, only metrics with those +// names are counted. +func GatherAndCount(g prometheus.Gatherer, metricNames ...string) (int, error) { + got, err := g.Gather() + if err != nil { + return 0, fmt.Errorf("gathering metrics failed: %s", err) + } + if metricNames != nil { + got = filterMetrics(got, metricNames) + } + + result := 0 + for _, mf := range got { + result += len(mf.GetMetric()) + } + return result, nil +} + +// CollectAndCompare registers the provided Collector with a newly created +// pedantic Registry. It then calls GatherAndCompare with that Registry and with +// the provided metricNames. +func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames ...string) error { + reg := prometheus.NewPedanticRegistry() + if err := reg.Register(c); err != nil { + return fmt.Errorf("registering collector failed: %s", err) + } + return GatherAndCompare(reg, expected, metricNames...) +} + +// GatherAndCompare gathers all metrics from the provided Gatherer and compares +// it to an expected output read from the provided Reader in the Prometheus text +// exposition format. If any metricNames are provided, only metrics with those +// names are compared. +func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ...string) error { + got, err := g.Gather() + if err != nil { + return fmt.Errorf("gathering metrics failed: %s", err) + } + if metricNames != nil { + got = filterMetrics(got, metricNames) + } + var tp expfmt.TextParser + wantRaw, err := tp.TextToMetricFamilies(expected) + if err != nil { + return fmt.Errorf("parsing expected metrics failed: %s", err) + } + want := internal.NormalizeMetricFamilies(wantRaw) + + return compare(got, want) +} + +// compare encodes both provided slices of metric families into the text format, +// compares their string message, and returns an error if they do not match. +// The error contains the encoded text of both the desired and the actual +// result. +func compare(got, want []*dto.MetricFamily) error { + var gotBuf, wantBuf bytes.Buffer + enc := expfmt.NewEncoder(&gotBuf, expfmt.FmtText) + for _, mf := range got { + if err := enc.Encode(mf); err != nil { + return fmt.Errorf("encoding gathered metrics failed: %s", err) + } + } + enc = expfmt.NewEncoder(&wantBuf, expfmt.FmtText) + for _, mf := range want { + if err := enc.Encode(mf); err != nil { + return fmt.Errorf("encoding expected metrics failed: %s", err) + } + } + + if wantBuf.String() != gotBuf.String() { + return fmt.Errorf(` +metric output does not match expectation; want: + +%s +got: + +%s`, wantBuf.String(), gotBuf.String()) + + } + return nil +} + +func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFamily { + var filtered []*dto.MetricFamily + for _, m := range metrics { + for _, name := range names { + if m.GetName() == name { + filtered = append(filtered, m) + break + } + } + } + return filtered +} diff --git a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..9a1aff412 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +## Prometheus Community Code of Conduct + +Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index 31d42f712..b9fb589aa 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -407,6 +407,50 @@ func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { return cpuinfo, nil } +func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { + return nil, errors.New("invalid cpuinfo file: " + firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo := []CPUInfo{firstcpu} + i := 0 + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + i = int(v) + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + cpuinfo[i].Processor = uint(v) + case "hart": + cpuinfo[i].CoreID = field[1] + case "isa": + cpuinfo[i].ModelName = field[1] + } + } + return cpuinfo, nil +} + +func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode + return nil, errors.New("not implemented") +} + // firstNonEmptyLine advances the scanner to the first non-empty line // and returns the contents of that line func firstNonEmptyLine(scanner *bufio.Scanner) string { diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_arm.go b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go similarity index 97% rename from vendor/github.com/prometheus/procfs/cpuinfo_arm.go rename to vendor/github.com/prometheus/procfs/cpuinfo_armx.go index 835550770..44b590ed3 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_arm.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go @@ -12,6 +12,7 @@ // limitations under the License. // +build linux +// +build arm arm64 package procfs diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsle.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsle.go deleted file mode 100644 index 22d93f8ef..000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_mipsle.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux - -package procfs - -var parseCPUInfo = parseCPUInfoMips diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mips64le.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go similarity index 94% rename from vendor/github.com/prometheus/procfs/cpuinfo_mips64le.go rename to vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go index 22d93f8ef..91e272573 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_mips64le.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go @@ -12,6 +12,7 @@ // limitations under the License. // +build linux +// +build mips mipsle mips64 mips64le package procfs diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_arm64.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go similarity index 82% rename from vendor/github.com/prometheus/procfs/cpuinfo_arm64.go rename to vendor/github.com/prometheus/procfs/cpuinfo_others.go index 4f5d172a3..95b5b4ec4 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_arm64.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go @@ -12,8 +12,8 @@ // limitations under the License. // +build linux -// +build arm64 +// +build !386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x package procfs -var parseCPUInfo = parseCPUInfoARM +var parseCPUInfo = parseCPUInfoDummy diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppc64le.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppc64le.go deleted file mode 100644 index 64aee9c63..000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_ppc64le.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux - -package procfs - -var parseCPUInfo = parseCPUInfoPPC diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppc64.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go similarity index 96% rename from vendor/github.com/prometheus/procfs/cpuinfo_ppc64.go rename to vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go index 64aee9c63..6068bd571 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_ppc64.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go @@ -12,6 +12,7 @@ // limitations under the License. // +build linux +// +build ppc64 ppc64le package procfs diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_default.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go similarity index 100% rename from vendor/github.com/prometheus/procfs/cpuinfo_default.go rename to vendor/github.com/prometheus/procfs/cpuinfo_x86.go diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar index 868c8573d..12494d742 100644 --- a/vendor/github.com/prometheus/procfs/fixtures.ttar +++ b/vendor/github.com/prometheus/procfs/fixtures.ttar @@ -467,7 +467,7 @@ Pid: 26231 PPid: 1 TracerPid: 0 Uid: 1000 1000 1000 0 -Gid: 0 0 0 0 +Gid: 1001 1001 1001 0 FDSize: 128 Groups: NStgid: 1 @@ -1966,7 +1966,7 @@ Lines: 1 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/mdstat -Lines: 56 +Lines: 60 Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] sdd1[10](S) sdd2[11](S) @@ -1989,6 +1989,10 @@ md8 : active raid1 sdb1[1] sda1[0] sdc[2](S) sde[3](S) 195310144 blocks [2/2] [UU] [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec +md201 : active raid1 sda3[0] sdb3[1] + 1993728 blocks super 1.2 [2/2] [UU] + [=>...................] check = 5.7% (114176/1993728) finish=0.2min speed=114176K/sec + md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1](F) 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] bitmap: 0/30 pages [0KB], 65536KB chunk @@ -3754,6 +3758,73 @@ Path: fixtures/sys/class/powercap/intel-rapl:0:0/uevent Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/powercap/intel-rapl:a +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_max_power_uw +Lines: 1 +95000000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_name +Lines: 1 +long_term +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_power_limit_uw +Lines: 1 +4090000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_time_window_us +Lines: 1 +999424 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_max_power_uw +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_name +Lines: 1 +short_term +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_power_limit_uw +Lines: 1 +4090000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_time_window_us +Lines: 1 +2440 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/enabled +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/energy_uj +Lines: 1 +240422366267 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/max_energy_range_uj +Lines: 1 +262143328850 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/name +Lines: 1 +package-10 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/uevent +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/sys/class/thermal Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go index beefdf02e..da3a941d6 100644 --- a/vendor/github.com/prometheus/procfs/kernel_random.go +++ b/vendor/github.com/prometheus/procfs/kernel_random.go @@ -25,7 +25,7 @@ import ( type KernelRandom struct { // EntropyAvaliable gives the available entropy, in bits. EntropyAvaliable *uint64 - // PoolSize gives the size of the entropy pool, in bytes. + // PoolSize gives the size of the entropy pool, in bits. PoolSize *uint64 // URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded. URandomMinReseedSeconds *uint64 diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index 3e9362a94..98e37aa8c 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -107,11 +107,14 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { syncedBlocks := size recovering := strings.Contains(lines[syncLineIdx], "recovery") resyncing := strings.Contains(lines[syncLineIdx], "resync") + checking := strings.Contains(lines[syncLineIdx], "check") // Append recovery and resyncing state info. - if recovering || resyncing { + if recovering || resyncing || checking { if recovering { state = "recovering" + } else if checking { + state = "checking" } else { state = "resyncing" } diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index c58346d91..6edd8333b 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -72,8 +72,10 @@ type ProcStatus struct { // Number of involuntary context switches. NonVoluntaryCtxtSwitches uint64 - // UIDs of the process (Real, effective, saved set, and filesystem UIDs (GIDs)) + // UIDs of the process (Real, effective, saved set, and filesystem UIDs) UIDs [4]string + // GIDs of the process (Real, effective, saved set, and filesystem GIDs) + GIDs [4]string } // NewStatus returns the current status information of the process. @@ -119,6 +121,8 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt s.Name = vString case "Uid": copy(s.UIDs[:], strings.Split(vString, "\t")) + case "Gid": + copy(s.GIDs[:], strings.Split(vString, "\t")) case "VmPeak": s.VmPeak = vUintBytes case "VmSize": diff --git a/vendor/go.etcd.io/etcd/LICENSE b/vendor/go.etcd.io/etcd/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/go.etcd.io/etcd/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.etcd.io/etcd/NOTICE b/vendor/go.etcd.io/etcd/NOTICE new file mode 100644 index 000000000..b39ddfa5c --- /dev/null +++ b/vendor/go.etcd.io/etcd/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/go.etcd.io/etcd/auth/authpb/auth.pb.go b/vendor/go.etcd.io/etcd/auth/authpb/auth.pb.go new file mode 100644 index 000000000..7e038df01 --- /dev/null +++ b/vendor/go.etcd.io/etcd/auth/authpb/auth.pb.go @@ -0,0 +1,977 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: auth.proto + +/* + Package authpb is a generated protocol buffer package. + + It is generated from these files: + auth.proto + + It has these top-level messages: + UserAddOptions + User + Permission + Role +*/ +package authpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Permission_Type int32 + +const ( + READ Permission_Type = 0 + WRITE Permission_Type = 1 + READWRITE Permission_Type = 2 +) + +var Permission_Type_name = map[int32]string{ + 0: "READ", + 1: "WRITE", + 2: "READWRITE", +} +var Permission_Type_value = map[string]int32{ + "READ": 0, + "WRITE": 1, + "READWRITE": 2, +} + +func (x Permission_Type) String() string { + return proto.EnumName(Permission_Type_name, int32(x)) +} +func (Permission_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorAuth, []int{2, 0} } + +type UserAddOptions struct { + NoPassword bool `protobuf:"varint,1,opt,name=no_password,json=noPassword,proto3" json:"no_password,omitempty"` +} + +func (m *UserAddOptions) Reset() { *m = UserAddOptions{} } +func (m *UserAddOptions) String() string { return proto.CompactTextString(m) } +func (*UserAddOptions) ProtoMessage() {} +func (*UserAddOptions) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{0} } + +// User is a single entry in the bucket authUsers +type User struct { + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + Roles []string `protobuf:"bytes,3,rep,name=roles" json:"roles,omitempty"` + Options *UserAddOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` +} + +func (m *User) Reset() { *m = User{} } +func (m *User) String() string { return proto.CompactTextString(m) } +func (*User) ProtoMessage() {} +func (*User) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1} } + +// Permission is a single entity +type Permission struct { + PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` +} + +func (m *Permission) Reset() { *m = Permission{} } +func (m *Permission) String() string { return proto.CompactTextString(m) } +func (*Permission) ProtoMessage() {} +func (*Permission) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{2} } + +// Role is a single entry in the bucket authRoles +type Role struct { + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission" json:"keyPermission,omitempty"` +} + +func (m *Role) Reset() { *m = Role{} } +func (m *Role) String() string { return proto.CompactTextString(m) } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{3} } + +func init() { + proto.RegisterType((*UserAddOptions)(nil), "authpb.UserAddOptions") + proto.RegisterType((*User)(nil), "authpb.User") + proto.RegisterType((*Permission)(nil), "authpb.Permission") + proto.RegisterType((*Role)(nil), "authpb.Role") + proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value) +} +func (m *UserAddOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserAddOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.NoPassword { + dAtA[i] = 0x8 + i++ + if m.NoPassword { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *User) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *User) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Password) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.Options != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.Options.Size())) + n1, err := m.Options.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + return i, nil +} + +func (m *Permission) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Permission) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PermType != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.PermType)) + } + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) + } + return i, nil +} + +func (m *Role) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Role) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.KeyPermission) > 0 { + for _, msg := range m.KeyPermission { + dAtA[i] = 0x12 + i++ + i = encodeVarintAuth(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeVarintAuth(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *UserAddOptions) Size() (n int) { + var l int + _ = l + if m.NoPassword { + n += 2 + } + return n +} + +func (m *User) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + l = len(s) + n += 1 + l + sovAuth(uint64(l)) + } + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovAuth(uint64(l)) + } + return n +} + +func (m *Permission) Size() (n int) { + var l int + _ = l + if m.PermType != 0 { + n += 1 + sovAuth(uint64(m.PermType)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + return n +} + +func (m *Role) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if len(m.KeyPermission) > 0 { + for _, e := range m.KeyPermission { + l = e.Size() + n += 1 + l + sovAuth(uint64(l)) + } + } + return n +} + +func sovAuth(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozAuth(x uint64) (n int) { + return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *UserAddOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserAddOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserAddOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoPassword", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NoPassword = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *User) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: User: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) + if m.Name == nil { + m.Name = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...) + if m.Password == nil { + m.Password = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &UserAddOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Permission) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Permission: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Permission: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PermType", wireType) + } + m.PermType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PermType |= (Permission_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Role) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Role: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) + if m.Name == nil { + m.Name = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyPermission", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KeyPermission = append(m.KeyPermission, &Permission{}) + if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAuth(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthAuth + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipAuth(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) } + +var fileDescriptorAuth = []byte{ + // 338 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xcf, 0x4e, 0xea, 0x40, + 0x14, 0xc6, 0x3b, 0xb4, 0x70, 0xdb, 0xc3, 0x85, 0x90, 0x13, 0x72, 0x6f, 0x83, 0x49, 0x6d, 0xba, + 0x6a, 0x5c, 0x54, 0x85, 0x8d, 0x5b, 0x8c, 0x2c, 0x5c, 0x49, 0x26, 0x18, 0x97, 0xa4, 0xa4, 0x13, + 0x24, 0xc0, 0x4c, 0x33, 0x83, 0x31, 0x6c, 0x7c, 0x0e, 0x17, 0x3e, 0x10, 0x4b, 0x1e, 0x41, 0xf0, + 0x45, 0x4c, 0x67, 0xf8, 0x13, 0xa2, 0xbb, 0xef, 0x7c, 0xe7, 0xfb, 0x66, 0x7e, 0x99, 0x01, 0x48, + 0x5f, 0x16, 0xcf, 0x49, 0x2e, 0xc5, 0x42, 0x60, 0xa5, 0xd0, 0xf9, 0xa8, 0xd5, 0x1c, 0x8b, 0xb1, + 0xd0, 0xd6, 0x65, 0xa1, 0xcc, 0x36, 0xba, 0x86, 0xfa, 0xa3, 0x62, 0xb2, 0x9b, 0x65, 0x0f, 0xf9, + 0x62, 0x22, 0xb8, 0xc2, 0x73, 0xa8, 0x72, 0x31, 0xcc, 0x53, 0xa5, 0x5e, 0x85, 0xcc, 0x7c, 0x12, + 0x92, 0xd8, 0xa5, 0xc0, 0x45, 0x7f, 0xe7, 0x44, 0x6f, 0xe0, 0x14, 0x15, 0x44, 0x70, 0x78, 0x3a, + 0x67, 0x3a, 0xf1, 0x97, 0x6a, 0x8d, 0x2d, 0x70, 0x0f, 0xcd, 0x92, 0xf6, 0x0f, 0x33, 0x36, 0xa1, + 0x2c, 0xc5, 0x8c, 0x29, 0xdf, 0x0e, 0xed, 0xd8, 0xa3, 0x66, 0xc0, 0x2b, 0xf8, 0x23, 0xcc, 0xcd, + 0xbe, 0x13, 0x92, 0xb8, 0xda, 0xfe, 0x97, 0x18, 0xe0, 0xe4, 0x94, 0x8b, 0xee, 0x63, 0xd1, 0x07, + 0x01, 0xe8, 0x33, 0x39, 0x9f, 0x28, 0x35, 0x11, 0x1c, 0x3b, 0xe0, 0xe6, 0x4c, 0xce, 0x07, 0xcb, + 0xdc, 0xa0, 0xd4, 0xdb, 0xff, 0xf7, 0x27, 0x1c, 0x53, 0x49, 0xb1, 0xa6, 0x87, 0x20, 0x36, 0xc0, + 0x9e, 0xb2, 0xe5, 0x0e, 0xb1, 0x90, 0x78, 0x06, 0x9e, 0x4c, 0xf9, 0x98, 0x0d, 0x19, 0xcf, 0x7c, + 0xdb, 0xa0, 0x6b, 0xa3, 0xc7, 0xb3, 0xe8, 0x02, 0x1c, 0x5d, 0x73, 0xc1, 0xa1, 0xbd, 0xee, 0x5d, + 0xc3, 0x42, 0x0f, 0xca, 0x4f, 0xf4, 0x7e, 0xd0, 0x6b, 0x10, 0xac, 0x81, 0x57, 0x98, 0x66, 0x2c, + 0x45, 0x03, 0x70, 0xa8, 0x98, 0xb1, 0x5f, 0x9f, 0xe7, 0x06, 0x6a, 0x53, 0xb6, 0x3c, 0x62, 0xf9, + 0xa5, 0xd0, 0x8e, 0xab, 0x6d, 0xfc, 0x09, 0x4c, 0x4f, 0x83, 0xb7, 0xfe, 0x6a, 0x13, 0x58, 0xeb, + 0x4d, 0x60, 0xad, 0xb6, 0x01, 0x59, 0x6f, 0x03, 0xf2, 0xb9, 0x0d, 0xc8, 0xfb, 0x57, 0x60, 0x8d, + 0x2a, 0xfa, 0x23, 0x3b, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x61, 0x66, 0xc6, 0x9d, 0xf4, 0x01, + 0x00, 0x00, +} diff --git a/vendor/go.etcd.io/etcd/auth/authpb/auth.proto b/vendor/go.etcd.io/etcd/auth/authpb/auth.proto new file mode 100644 index 000000000..8f82b7cf1 --- /dev/null +++ b/vendor/go.etcd.io/etcd/auth/authpb/auth.proto @@ -0,0 +1,42 @@ +syntax = "proto3"; +package authpb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; +option (gogoproto.goproto_enum_prefix_all) = false; + +message UserAddOptions { + bool no_password = 1; +}; + +// User is a single entry in the bucket authUsers +message User { + bytes name = 1; + bytes password = 2; + repeated string roles = 3; + UserAddOptions options = 4; +} + +// Permission is a single entity +message Permission { + enum Type { + READ = 0; + WRITE = 1; + READWRITE = 2; + } + Type permType = 1; + + bytes key = 2; + bytes range_end = 3; +} + +// Role is a single entry in the bucket authRoles +message Role { + bytes name = 1; + + repeated Permission keyPermission = 2; +} diff --git a/vendor/go.etcd.io/etcd/clientv3/README.md b/vendor/go.etcd.io/etcd/clientv3/README.md new file mode 100644 index 000000000..6c6fe7c67 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/README.md @@ -0,0 +1,85 @@ +# etcd/clientv3 + +[![Docs](https://img.shields.io/badge/docs-latest-green.svg)](https://etcd.io/docs) +[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/go.etcd.io/etcd/clientv3) + +`etcd/clientv3` is the official Go etcd client for v3. + +## Install + +```bash +go get go.etcd.io/etcd/clientv3 +``` + +## Get started + +Create client using `clientv3.New`: + +```go +cli, err := clientv3.New(clientv3.Config{ + Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, + DialTimeout: 5 * time.Second, +}) +if err != nil { + // handle error! +} +defer cli.Close() +``` + +etcd v3 uses [`gRPC`](https://www.grpc.io) for remote procedure calls. And `clientv3` uses +[`grpc-go`](https://github.com/grpc/grpc-go) to connect to etcd. Make sure to close the client after using it. +If the client is not closed, the connection will have leaky goroutines. To specify client request timeout, +pass `context.WithTimeout` to APIs: + +```go +ctx, cancel := context.WithTimeout(context.Background(), timeout) +resp, err := cli.Put(ctx, "sample_key", "sample_value") +cancel() +if err != nil { + // handle error! +} +// use the response +``` + +For full compatibility, it is recommended to vendor builds using etcd's vendored packages, using tools like `golang/dep`, as in [vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories). + +## Error Handling + +etcd client returns 2 types of errors: + +1. context error: canceled or deadline exceeded. +2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes). + +Here is the example code to handle client errors: + +```go +resp, err := cli.Put(ctx, "", "") +if err != nil { + switch err { + case context.Canceled: + log.Fatalf("ctx is canceled by another routine: %v", err) + case context.DeadlineExceeded: + log.Fatalf("ctx is attached with a deadline is exceeded: %v", err) + case rpctypes.ErrEmptyKey: + log.Fatalf("client-side error: %v", err) + default: + log.Fatalf("bad cluster endpoints, which are not etcd servers: %v", err) + } +} +``` + +## Metrics + +The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/etcd-io/etcd/blob/master/clientv3/example_metrics_test.go). + +## Namespacing + +The [namespace](https://godoc.org/go.etcd.io/etcd/clientv3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix. + +## Request size limit + +Client request size limit is configurable via `clientv3.Config.MaxCallSendMsgSize` and `MaxCallRecvMsgSize` in bytes. If none given, client request send limit defaults to 2 MiB including gRPC overhead bytes. And receive limit defaults to `math.MaxInt32`. + +## Examples + +More code examples can be found at [GoDoc](https://godoc.org/go.etcd.io/etcd/clientv3). diff --git a/vendor/go.etcd.io/etcd/clientv3/auth.go b/vendor/go.etcd.io/etcd/clientv3/auth.go new file mode 100644 index 000000000..c954f1bf4 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/auth.go @@ -0,0 +1,242 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "fmt" + "strings" + + "go.etcd.io/etcd/auth/authpb" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "google.golang.org/grpc" +) + +type ( + AuthEnableResponse pb.AuthEnableResponse + AuthDisableResponse pb.AuthDisableResponse + AuthenticateResponse pb.AuthenticateResponse + AuthUserAddResponse pb.AuthUserAddResponse + AuthUserDeleteResponse pb.AuthUserDeleteResponse + AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse + AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse + AuthUserGetResponse pb.AuthUserGetResponse + AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse + AuthRoleAddResponse pb.AuthRoleAddResponse + AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse + AuthRoleGetResponse pb.AuthRoleGetResponse + AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse + AuthRoleDeleteResponse pb.AuthRoleDeleteResponse + AuthUserListResponse pb.AuthUserListResponse + AuthRoleListResponse pb.AuthRoleListResponse + + PermissionType authpb.Permission_Type + Permission authpb.Permission +) + +const ( + PermRead = authpb.READ + PermWrite = authpb.WRITE + PermReadWrite = authpb.READWRITE +) + +type UserAddOptions authpb.UserAddOptions + +type Auth interface { + // AuthEnable enables auth of an etcd cluster. + AuthEnable(ctx context.Context) (*AuthEnableResponse, error) + + // AuthDisable disables auth of an etcd cluster. + AuthDisable(ctx context.Context) (*AuthDisableResponse, error) + + // UserAdd adds a new user to an etcd cluster. + UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) + + // UserAddWithOptions adds a new user to an etcd cluster with some options. + UserAddWithOptions(ctx context.Context, name string, password string, opt *UserAddOptions) (*AuthUserAddResponse, error) + + // UserDelete deletes a user from an etcd cluster. + UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) + + // UserChangePassword changes a password of a user. + UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) + + // UserGrantRole grants a role to a user. + UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) + + // UserGet gets a detailed information of a user. + UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) + + // UserList gets a list of all users. + UserList(ctx context.Context) (*AuthUserListResponse, error) + + // UserRevokeRole revokes a role of a user. + UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) + + // RoleAdd adds a new role to an etcd cluster. + RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) + + // RoleGrantPermission grants a permission to a role. + RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) + + // RoleGet gets a detailed information of a role. + RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) + + // RoleList gets a list of all roles. + RoleList(ctx context.Context) (*AuthRoleListResponse, error) + + // RoleRevokePermission revokes a permission from a role. + RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) + + // RoleDelete deletes a role. + RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) +} + +type authClient struct { + remote pb.AuthClient + callOpts []grpc.CallOption +} + +func NewAuth(c *Client) Auth { + api := &authClient{remote: RetryAuthClient(c)} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func (auth *authClient) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { + resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...) + return (*AuthEnableResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { + resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...) + return (*AuthDisableResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) { + resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: &authpb.UserAddOptions{NoPassword: false}}, auth.callOpts...) + return (*AuthUserAddResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserAddWithOptions(ctx context.Context, name string, password string, options *UserAddOptions) (*AuthUserAddResponse, error) { + resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: (*authpb.UserAddOptions)(options)}, auth.callOpts...) + return (*AuthUserAddResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) { + resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...) + return (*AuthUserDeleteResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) { + resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...) + return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) { + resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...) + return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { + resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...) + return (*AuthUserGetResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserList(ctx context.Context) (*AuthUserListResponse, error) { + resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...) + return (*AuthUserListResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) { + resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...) + return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) { + resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...) + return (*AuthRoleAddResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) { + perm := &authpb.Permission{ + Key: []byte(key), + RangeEnd: []byte(rangeEnd), + PermType: authpb.Permission_Type(permType), + } + resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...) + return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { + resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...) + return (*AuthRoleGetResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { + resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...) + return (*AuthRoleListResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) { + resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: []byte(key), RangeEnd: []byte(rangeEnd)}, auth.callOpts...) + return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) { + resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...) + return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err) +} + +func StrToPermissionType(s string) (PermissionType, error) { + val, ok := authpb.Permission_Type_value[strings.ToUpper(s)] + if ok { + return PermissionType(val), nil + } + return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s) +} + +type authenticator struct { + conn *grpc.ClientConn // conn in-use + remote pb.AuthClient + callOpts []grpc.CallOption +} + +func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) { + resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...) + return (*AuthenticateResponse)(resp), toErr(ctx, err) +} + +func (auth *authenticator) close() { + auth.conn.Close() +} + +func newAuthenticator(ctx context.Context, target string, opts []grpc.DialOption, c *Client) (*authenticator, error) { + conn, err := grpc.DialContext(ctx, target, opts...) + if err != nil { + return nil, err + } + + api := &authenticator{ + conn: conn, + remote: pb.NewAuthClient(conn), + } + if c != nil { + api.callOpts = c.callOpts + } + return api, nil +} diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go b/vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go new file mode 100644 index 000000000..d02a7eec7 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go @@ -0,0 +1,293 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package balancer implements client balancer. +package balancer + +import ( + "strconv" + "sync" + "time" + + "go.etcd.io/etcd/clientv3/balancer/connectivity" + "go.etcd.io/etcd/clientv3/balancer/picker" + + "go.uber.org/zap" + "google.golang.org/grpc/balancer" + grpcconnectivity "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" + _ "google.golang.org/grpc/resolver/dns" // register DNS resolver + _ "google.golang.org/grpc/resolver/passthrough" // register passthrough resolver +) + +// Config defines balancer configurations. +type Config struct { + // Policy configures balancer policy. + Policy picker.Policy + + // Picker implements gRPC picker. + // Leave empty if "Policy" field is not custom. + // TODO: currently custom policy is not supported. + // Picker picker.Picker + + // Name defines an additional name for balancer. + // Useful for balancer testing to avoid register conflicts. + // If empty, defaults to policy name. + Name string + + // Logger configures balancer logging. + // If nil, logs are discarded. + Logger *zap.Logger +} + +// RegisterBuilder creates and registers a builder. Since this function calls balancer.Register, it +// must be invoked at initialization time. +func RegisterBuilder(cfg Config) { + bb := &builder{cfg} + balancer.Register(bb) + + bb.cfg.Logger.Debug( + "registered balancer", + zap.String("policy", bb.cfg.Policy.String()), + zap.String("name", bb.cfg.Name), + ) +} + +type builder struct { + cfg Config +} + +// Build is called initially when creating "ccBalancerWrapper". +// "grpc.Dial" is called to this client connection. +// Then, resolved addresses will be handled via "HandleResolvedAddrs". +func (b *builder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + bb := &baseBalancer{ + id: strconv.FormatInt(time.Now().UnixNano(), 36), + policy: b.cfg.Policy, + name: b.cfg.Name, + lg: b.cfg.Logger, + + addrToSc: make(map[resolver.Address]balancer.SubConn), + scToAddr: make(map[balancer.SubConn]resolver.Address), + scToSt: make(map[balancer.SubConn]grpcconnectivity.State), + + currentConn: nil, + connectivityRecorder: connectivity.New(b.cfg.Logger), + + // initialize picker always returns "ErrNoSubConnAvailable" + picker: picker.NewErr(balancer.ErrNoSubConnAvailable), + } + + // TODO: support multiple connections + bb.mu.Lock() + bb.currentConn = cc + bb.mu.Unlock() + + bb.lg.Info( + "built balancer", + zap.String("balancer-id", bb.id), + zap.String("policy", bb.policy.String()), + zap.String("resolver-target", cc.Target()), + ) + return bb +} + +// Name implements "grpc/balancer.Builder" interface. +func (b *builder) Name() string { return b.cfg.Name } + +// Balancer defines client balancer interface. +type Balancer interface { + // Balancer is called on specified client connection. Client initiates gRPC + // connection with "grpc.Dial(addr, grpc.WithBalancerName)", and then those resolved + // addresses are passed to "grpc/balancer.Balancer.HandleResolvedAddrs". + // For each resolved address, balancer calls "balancer.ClientConn.NewSubConn". + // "grpc/balancer.Balancer.HandleSubConnStateChange" is called when connectivity state + // changes, thus requires failover logic in this method. + balancer.Balancer + + // Picker calls "Pick" for every client request. + picker.Picker +} + +type baseBalancer struct { + id string + policy picker.Policy + name string + lg *zap.Logger + + mu sync.RWMutex + + addrToSc map[resolver.Address]balancer.SubConn + scToAddr map[balancer.SubConn]resolver.Address + scToSt map[balancer.SubConn]grpcconnectivity.State + + currentConn balancer.ClientConn + connectivityRecorder connectivity.Recorder + + picker picker.Picker +} + +// HandleResolvedAddrs implements "grpc/balancer.Balancer" interface. +// gRPC sends initial or updated resolved addresses from "Build". +func (bb *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + if err != nil { + bb.lg.Warn("HandleResolvedAddrs called with error", zap.String("balancer-id", bb.id), zap.Error(err)) + return + } + bb.lg.Info("resolved", + zap.String("picker", bb.picker.String()), + zap.String("balancer-id", bb.id), + zap.Strings("addresses", addrsToStrings(addrs)), + ) + + bb.mu.Lock() + defer bb.mu.Unlock() + + resolved := make(map[resolver.Address]struct{}) + for _, addr := range addrs { + resolved[addr] = struct{}{} + if _, ok := bb.addrToSc[addr]; !ok { + sc, err := bb.currentConn.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{}) + if err != nil { + bb.lg.Warn("NewSubConn failed", zap.String("picker", bb.picker.String()), zap.String("balancer-id", bb.id), zap.Error(err), zap.String("address", addr.Addr)) + continue + } + bb.lg.Info("created subconn", zap.String("address", addr.Addr)) + bb.addrToSc[addr] = sc + bb.scToAddr[sc] = addr + bb.scToSt[sc] = grpcconnectivity.Idle + sc.Connect() + } + } + + for addr, sc := range bb.addrToSc { + if _, ok := resolved[addr]; !ok { + // was removed by resolver or failed to create subconn + bb.currentConn.RemoveSubConn(sc) + delete(bb.addrToSc, addr) + + bb.lg.Info( + "removed subconn", + zap.String("picker", bb.picker.String()), + zap.String("balancer-id", bb.id), + zap.String("address", addr.Addr), + zap.String("subconn", scToString(sc)), + ) + + // Keep the state of this sc in bb.scToSt until sc's state becomes Shutdown. + // The entry will be deleted in HandleSubConnStateChange. + // (DO NOT) delete(bb.scToAddr, sc) + // (DO NOT) delete(bb.scToSt, sc) + } + } +} + +// HandleSubConnStateChange implements "grpc/balancer.Balancer" interface. +func (bb *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s grpcconnectivity.State) { + bb.mu.Lock() + defer bb.mu.Unlock() + + old, ok := bb.scToSt[sc] + if !ok { + bb.lg.Warn( + "state change for an unknown subconn", + zap.String("picker", bb.picker.String()), + zap.String("balancer-id", bb.id), + zap.String("subconn", scToString(sc)), + zap.Int("subconn-size", len(bb.scToAddr)), + zap.String("state", s.String()), + ) + return + } + + bb.lg.Info( + "state changed", + zap.String("picker", bb.picker.String()), + zap.String("balancer-id", bb.id), + zap.Bool("connected", s == grpcconnectivity.Ready), + zap.String("subconn", scToString(sc)), + zap.Int("subconn-size", len(bb.scToAddr)), + zap.String("address", bb.scToAddr[sc].Addr), + zap.String("old-state", old.String()), + zap.String("new-state", s.String()), + ) + + bb.scToSt[sc] = s + switch s { + case grpcconnectivity.Idle: + sc.Connect() + case grpcconnectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scToSt. Remove state for this sc here. + delete(bb.scToAddr, sc) + delete(bb.scToSt, sc) + } + + oldAggrState := bb.connectivityRecorder.GetCurrentState() + bb.connectivityRecorder.RecordTransition(old, s) + + // Update balancer picker when one of the following happens: + // - this sc became ready from not-ready + // - this sc became not-ready from ready + // - the aggregated state of balancer became TransientFailure from non-TransientFailure + // - the aggregated state of balancer became non-TransientFailure from TransientFailure + if (s == grpcconnectivity.Ready) != (old == grpcconnectivity.Ready) || + (bb.connectivityRecorder.GetCurrentState() == grpcconnectivity.TransientFailure) != (oldAggrState == grpcconnectivity.TransientFailure) { + bb.updatePicker() + } + + bb.currentConn.UpdateBalancerState(bb.connectivityRecorder.GetCurrentState(), bb.picker) +} + +func (bb *baseBalancer) updatePicker() { + if bb.connectivityRecorder.GetCurrentState() == grpcconnectivity.TransientFailure { + bb.picker = picker.NewErr(balancer.ErrTransientFailure) + bb.lg.Info( + "updated picker to transient error picker", + zap.String("picker", bb.picker.String()), + zap.String("balancer-id", bb.id), + zap.String("policy", bb.policy.String()), + ) + return + } + + // only pass ready subconns to picker + scToAddr := make(map[balancer.SubConn]resolver.Address) + for addr, sc := range bb.addrToSc { + if st, ok := bb.scToSt[sc]; ok && st == grpcconnectivity.Ready { + scToAddr[sc] = addr + } + } + + bb.picker = picker.New(picker.Config{ + Policy: bb.policy, + Logger: bb.lg, + SubConnToResolverAddress: scToAddr, + }) + bb.lg.Info( + "updated picker", + zap.String("picker", bb.picker.String()), + zap.String("balancer-id", bb.id), + zap.String("policy", bb.policy.String()), + zap.Strings("subconn-ready", scsToStrings(scToAddr)), + zap.Int("subconn-size", len(scToAddr)), + ) +} + +// Close implements "grpc/balancer.Balancer" interface. +// Close is a nop because base balancer doesn't have internal state to clean up, +// and it doesn't need to call RemoveSubConn for the SubConns. +func (bb *baseBalancer) Close() { + // TODO +} diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/connectivity/connectivity.go b/vendor/go.etcd.io/etcd/clientv3/balancer/connectivity/connectivity.go new file mode 100644 index 000000000..4c4ad363a --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/balancer/connectivity/connectivity.go @@ -0,0 +1,93 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package connectivity implements client connectivity operations. +package connectivity + +import ( + "sync" + + "go.uber.org/zap" + "google.golang.org/grpc/connectivity" +) + +// Recorder records gRPC connectivity. +type Recorder interface { + GetCurrentState() connectivity.State + RecordTransition(oldState, newState connectivity.State) +} + +// New returns a new Recorder. +func New(lg *zap.Logger) Recorder { + return &recorder{lg: lg} +} + +// recorder takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// ref. https://github.com/grpc/grpc-go/blob/master/balancer/balancer.go +type recorder struct { + lg *zap.Logger + + mu sync.RWMutex + + cur connectivity.State + + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transientFailure. +} + +func (rc *recorder) GetCurrentState() (state connectivity.State) { + rc.mu.RLock() + defer rc.mu.RUnlock() + return rc.cur +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else the aggregated state is TransientFailure. +// +// Idle and Shutdown are not considered. +// +// ref. https://github.com/grpc/grpc-go/blob/master/balancer/balancer.go +func (rc *recorder) RecordTransition(oldState, newState connectivity.State) { + rc.mu.Lock() + defer rc.mu.Unlock() + + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + rc.numReady += updateVal + case connectivity.Connecting: + rc.numConnecting += updateVal + case connectivity.TransientFailure: + rc.numTransientFailure += updateVal + default: + rc.lg.Warn("connectivity recorder received unknown state", zap.String("connectivity-state", state.String())) + } + } + + switch { // must be exclusive, no overlap + case rc.numReady > 0: + rc.cur = connectivity.Ready + case rc.numConnecting > 0: + rc.cur = connectivity.Connecting + default: + rc.cur = connectivity.TransientFailure + } +} diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/doc.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/doc.go new file mode 100644 index 000000000..35dabf553 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package picker defines/implements client balancer picker policy. +package picker diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go new file mode 100644 index 000000000..f4b941d65 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go @@ -0,0 +1,39 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package picker + +import ( + "context" + + "google.golang.org/grpc/balancer" +) + +// NewErr returns a picker that always returns err on "Pick". +func NewErr(err error) Picker { + return &errPicker{p: Error, err: err} +} + +type errPicker struct { + p Policy + err error +} + +func (ep *errPicker) String() string { + return ep.p.String() +} + +func (ep *errPicker) Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) { + return nil, nil, ep.err +} diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker.go new file mode 100644 index 000000000..bd1a5d25e --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker.go @@ -0,0 +1,91 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package picker + +import ( + "fmt" + + "go.uber.org/zap" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +// Picker defines balancer Picker methods. +type Picker interface { + balancer.Picker + String() string +} + +// Config defines picker configuration. +type Config struct { + // Policy specifies etcd clientv3's built in balancer policy. + Policy Policy + + // Logger defines picker logging object. + Logger *zap.Logger + + // SubConnToResolverAddress maps each gRPC sub-connection to an address. + // Basically, it is a list of addresses that the Picker can pick from. + SubConnToResolverAddress map[balancer.SubConn]resolver.Address +} + +// Policy defines balancer picker policy. +type Policy uint8 + +const ( + // Error is error picker policy. + Error Policy = iota + + // RoundrobinBalanced balances loads over multiple endpoints + // and implements failover in roundrobin fashion. + RoundrobinBalanced + + // Custom defines custom balancer picker. + // TODO: custom picker is not supported yet. + Custom +) + +func (p Policy) String() string { + switch p { + case Error: + return "picker-error" + + case RoundrobinBalanced: + return "picker-roundrobin-balanced" + + case Custom: + panic("'custom' picker policy is not supported yet") + + default: + panic(fmt.Errorf("invalid balancer picker policy (%d)", p)) + } +} + +// New creates a new Picker. +func New(cfg Config) Picker { + switch cfg.Policy { + case Error: + panic("'error' picker policy is not supported here; use 'picker.NewErr'") + + case RoundrobinBalanced: + return newRoundrobinBalanced(cfg) + + case Custom: + panic("'custom' picker policy is not supported yet") + + default: + panic(fmt.Errorf("invalid balancer picker policy (%d)", cfg.Policy)) + } +} diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go new file mode 100644 index 000000000..e3971ecc4 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go @@ -0,0 +1,95 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package picker + +import ( + "context" + "sync" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +// newRoundrobinBalanced returns a new roundrobin balanced picker. +func newRoundrobinBalanced(cfg Config) Picker { + scs := make([]balancer.SubConn, 0, len(cfg.SubConnToResolverAddress)) + for sc := range cfg.SubConnToResolverAddress { + scs = append(scs, sc) + } + return &rrBalanced{ + p: RoundrobinBalanced, + lg: cfg.Logger, + scs: scs, + scToAddr: cfg.SubConnToResolverAddress, + } +} + +type rrBalanced struct { + p Policy + + lg *zap.Logger + + mu sync.RWMutex + next int + scs []balancer.SubConn + scToAddr map[balancer.SubConn]resolver.Address +} + +func (rb *rrBalanced) String() string { return rb.p.String() } + +// Pick is called for every client request. +func (rb *rrBalanced) Pick(ctx context.Context, opts balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) { + rb.mu.RLock() + n := len(rb.scs) + rb.mu.RUnlock() + if n == 0 { + return nil, nil, balancer.ErrNoSubConnAvailable + } + + rb.mu.Lock() + cur := rb.next + sc := rb.scs[cur] + picked := rb.scToAddr[sc].Addr + rb.next = (rb.next + 1) % len(rb.scs) + rb.mu.Unlock() + + rb.lg.Debug( + "picked", + zap.String("picker", rb.p.String()), + zap.String("address", picked), + zap.Int("subconn-index", cur), + zap.Int("subconn-size", n), + ) + + doneFunc := func(info balancer.DoneInfo) { + // TODO: error handling? + fss := []zapcore.Field{ + zap.Error(info.Err), + zap.String("picker", rb.p.String()), + zap.String("address", picked), + zap.Bool("success", info.Err == nil), + zap.Bool("bytes-sent", info.BytesSent), + zap.Bool("bytes-received", info.BytesReceived), + } + if info.Err == nil { + rb.lg.Debug("balancer done", fss...) + } else { + rb.lg.Warn("balancer failed", fss...) + } + } + return sc, doneFunc, nil +} diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/resolver/endpoint/endpoint.go b/vendor/go.etcd.io/etcd/clientv3/balancer/resolver/endpoint/endpoint.go new file mode 100644 index 000000000..2837bd418 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/balancer/resolver/endpoint/endpoint.go @@ -0,0 +1,247 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package endpoint resolves etcd entpoints using grpc targets of the form 'endpoint:///'. +package endpoint + +import ( + "context" + "fmt" + "net" + "net/url" + "strings" + "sync" + + "google.golang.org/grpc/resolver" +) + +const scheme = "endpoint" + +var ( + targetPrefix = fmt.Sprintf("%s://", scheme) + + bldr *builder +) + +func init() { + bldr = &builder{ + resolverGroups: make(map[string]*ResolverGroup), + } + resolver.Register(bldr) +} + +type builder struct { + mu sync.RWMutex + resolverGroups map[string]*ResolverGroup +} + +// NewResolverGroup creates a new ResolverGroup with the given id. +func NewResolverGroup(id string) (*ResolverGroup, error) { + return bldr.newResolverGroup(id) +} + +// ResolverGroup keeps all endpoints of resolvers using a common endpoint:/// target +// up-to-date. +type ResolverGroup struct { + mu sync.RWMutex + id string + endpoints []string + resolvers []*Resolver +} + +func (e *ResolverGroup) addResolver(r *Resolver) { + e.mu.Lock() + addrs := epsToAddrs(e.endpoints...) + e.resolvers = append(e.resolvers, r) + e.mu.Unlock() + r.cc.NewAddress(addrs) +} + +func (e *ResolverGroup) removeResolver(r *Resolver) { + e.mu.Lock() + for i, er := range e.resolvers { + if er == r { + e.resolvers = append(e.resolvers[:i], e.resolvers[i+1:]...) + break + } + } + e.mu.Unlock() +} + +// SetEndpoints updates the endpoints for ResolverGroup. All registered resolver are updated +// immediately with the new endpoints. +func (e *ResolverGroup) SetEndpoints(endpoints []string) { + addrs := epsToAddrs(endpoints...) + e.mu.Lock() + e.endpoints = endpoints + for _, r := range e.resolvers { + r.cc.NewAddress(addrs) + } + e.mu.Unlock() +} + +// Target constructs a endpoint target using the endpoint id of the ResolverGroup. +func (e *ResolverGroup) Target(endpoint string) string { + return Target(e.id, endpoint) +} + +// Target constructs a endpoint resolver target. +func Target(id, endpoint string) string { + return fmt.Sprintf("%s://%s/%s", scheme, id, endpoint) +} + +// IsTarget checks if a given target string in an endpoint resolver target. +func IsTarget(target string) bool { + return strings.HasPrefix(target, "endpoint://") +} + +func (e *ResolverGroup) Close() { + bldr.close(e.id) +} + +// Build creates or reuses an etcd resolver for the etcd cluster name identified by the authority part of the target. +func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + if len(target.Authority) < 1 { + return nil, fmt.Errorf("'etcd' target scheme requires non-empty authority identifying etcd cluster being routed to") + } + id := target.Authority + es, err := b.getResolverGroup(id) + if err != nil { + return nil, fmt.Errorf("failed to build resolver: %v", err) + } + r := &Resolver{ + endpointID: id, + cc: cc, + } + es.addResolver(r) + return r, nil +} + +func (b *builder) newResolverGroup(id string) (*ResolverGroup, error) { + b.mu.RLock() + _, ok := b.resolverGroups[id] + b.mu.RUnlock() + if ok { + return nil, fmt.Errorf("Endpoint already exists for id: %s", id) + } + + es := &ResolverGroup{id: id} + b.mu.Lock() + b.resolverGroups[id] = es + b.mu.Unlock() + return es, nil +} + +func (b *builder) getResolverGroup(id string) (*ResolverGroup, error) { + b.mu.RLock() + es, ok := b.resolverGroups[id] + b.mu.RUnlock() + if !ok { + return nil, fmt.Errorf("ResolverGroup not found for id: %s", id) + } + return es, nil +} + +func (b *builder) close(id string) { + b.mu.Lock() + delete(b.resolverGroups, id) + b.mu.Unlock() +} + +func (b *builder) Scheme() string { + return scheme +} + +// Resolver provides a resolver for a single etcd cluster, identified by name. +type Resolver struct { + endpointID string + cc resolver.ClientConn + sync.RWMutex +} + +// TODO: use balancer.epsToAddrs +func epsToAddrs(eps ...string) (addrs []resolver.Address) { + addrs = make([]resolver.Address, 0, len(eps)) + for _, ep := range eps { + addrs = append(addrs, resolver.Address{Addr: ep}) + } + return addrs +} + +func (*Resolver) ResolveNow(o resolver.ResolveNowOptions) {} + +func (r *Resolver) Close() { + es, err := bldr.getResolverGroup(r.endpointID) + if err != nil { + return + } + es.removeResolver(r) +} + +// ParseEndpoint endpoint parses an endpoint of the form +// (http|https)://*|(unix|unixs)://) +// and returns a protocol ('tcp' or 'unix'), +// host (or filepath if a unix socket), +// scheme (http, https, unix, unixs). +func ParseEndpoint(endpoint string) (proto string, host string, scheme string) { + proto = "tcp" + host = endpoint + url, uerr := url.Parse(endpoint) + if uerr != nil || !strings.Contains(endpoint, "://") { + return proto, host, scheme + } + scheme = url.Scheme + + // strip scheme:// prefix since grpc dials by host + host = url.Host + switch url.Scheme { + case "http", "https": + case "unix", "unixs": + proto = "unix" + host = url.Host + url.Path + default: + proto, host = "", "" + } + return proto, host, scheme +} + +// ParseTarget parses a endpoint:/// string and returns the parsed id and endpoint. +// If the target is malformed, an error is returned. +func ParseTarget(target string) (string, string, error) { + noPrefix := strings.TrimPrefix(target, targetPrefix) + if noPrefix == target { + return "", "", fmt.Errorf("malformed target, %s prefix is required: %s", targetPrefix, target) + } + parts := strings.SplitN(noPrefix, "/", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("malformed target, expected %s:///, but got %s", scheme, target) + } + return parts[0], parts[1], nil +} + +// Dialer dials a endpoint using net.Dialer. +// Context cancelation and timeout are supported. +func Dialer(ctx context.Context, dialEp string) (net.Conn, error) { + proto, host, _ := ParseEndpoint(dialEp) + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + dialer := &net.Dialer{} + if deadline, ok := ctx.Deadline(); ok { + dialer.Deadline = deadline + } + return dialer.DialContext(ctx, proto, host) +} diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/utils.go b/vendor/go.etcd.io/etcd/clientv3/balancer/utils.go new file mode 100644 index 000000000..48eb87507 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/balancer/utils.go @@ -0,0 +1,68 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package balancer + +import ( + "fmt" + "net/url" + "sort" + "sync/atomic" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +func scToString(sc balancer.SubConn) string { + return fmt.Sprintf("%p", sc) +} + +func scsToStrings(scs map[balancer.SubConn]resolver.Address) (ss []string) { + ss = make([]string, 0, len(scs)) + for sc, a := range scs { + ss = append(ss, fmt.Sprintf("%s (%s)", a.Addr, scToString(sc))) + } + sort.Strings(ss) + return ss +} + +func addrsToStrings(addrs []resolver.Address) (ss []string) { + ss = make([]string, len(addrs)) + for i := range addrs { + ss[i] = addrs[i].Addr + } + sort.Strings(ss) + return ss +} + +func epsToAddrs(eps ...string) (addrs []resolver.Address) { + addrs = make([]resolver.Address, 0, len(eps)) + for _, ep := range eps { + u, err := url.Parse(ep) + if err != nil { + addrs = append(addrs, resolver.Address{Addr: ep, Type: resolver.Backend}) + continue + } + addrs = append(addrs, resolver.Address{Addr: u.Host, Type: resolver.Backend}) + } + return addrs +} + +var genN = new(uint32) + +func genName() string { + now := time.Now().UnixNano() + return fmt.Sprintf("%X%X", now, atomic.AddUint32(genN, 1)) +} diff --git a/vendor/go.etcd.io/etcd/clientv3/client.go b/vendor/go.etcd.io/etcd/clientv3/client.go new file mode 100644 index 000000000..a35ec679a --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/client.go @@ -0,0 +1,664 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "errors" + "fmt" + "net" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/google/uuid" + "go.etcd.io/etcd/clientv3/balancer" + "go.etcd.io/etcd/clientv3/balancer/picker" + "go.etcd.io/etcd/clientv3/balancer/resolver/endpoint" + "go.etcd.io/etcd/clientv3/credentials" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + "go.etcd.io/etcd/pkg/logutil" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + grpccredentials "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/status" +) + +var ( + ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints") + ErrOldCluster = errors.New("etcdclient: old cluster version") + + roundRobinBalancerName = fmt.Sprintf("etcd-%s", picker.RoundrobinBalanced.String()) +) + +func init() { + lg := zap.NewNop() + if os.Getenv("ETCD_CLIENT_DEBUG") != "" { + lcfg := logutil.DefaultZapLoggerConfig + lcfg.Level = zap.NewAtomicLevelAt(zap.DebugLevel) + + var err error + lg, err = lcfg.Build() // info level logging + if err != nil { + panic(err) + } + } + + // TODO: support custom balancer + balancer.RegisterBuilder(balancer.Config{ + Policy: picker.RoundrobinBalanced, + Name: roundRobinBalancerName, + Logger: lg, + }) +} + +// Client provides and manages an etcd v3 client session. +type Client struct { + Cluster + KV + Lease + Watcher + Auth + Maintenance + + conn *grpc.ClientConn + + cfg Config + creds grpccredentials.TransportCredentials + resolverGroup *endpoint.ResolverGroup + mu *sync.RWMutex + + ctx context.Context + cancel context.CancelFunc + + // Username is a user name for authentication. + Username string + // Password is a password for authentication. + Password string + authTokenBundle credentials.Bundle + + callOpts []grpc.CallOption + + lg *zap.Logger +} + +// New creates a new etcdv3 client from a given configuration. +func New(cfg Config) (*Client, error) { + if len(cfg.Endpoints) == 0 { + return nil, ErrNoAvailableEndpoints + } + + return newClient(&cfg) +} + +// NewCtxClient creates a client with a context but no underlying grpc +// connection. This is useful for embedded cases that override the +// service interface implementations and do not need connection management. +func NewCtxClient(ctx context.Context) *Client { + cctx, cancel := context.WithCancel(ctx) + return &Client{ctx: cctx, cancel: cancel} +} + +// NewFromURL creates a new etcdv3 client from a URL. +func NewFromURL(url string) (*Client, error) { + return New(Config{Endpoints: []string{url}}) +} + +// NewFromURLs creates a new etcdv3 client from URLs. +func NewFromURLs(urls []string) (*Client, error) { + return New(Config{Endpoints: urls}) +} + +// Close shuts down the client's etcd connections. +func (c *Client) Close() error { + c.cancel() + if c.Watcher != nil { + c.Watcher.Close() + } + if c.Lease != nil { + c.Lease.Close() + } + if c.resolverGroup != nil { + c.resolverGroup.Close() + } + if c.conn != nil { + return toErr(c.ctx, c.conn.Close()) + } + return c.ctx.Err() +} + +// Ctx is a context for "out of band" messages (e.g., for sending +// "clean up" message when another context is canceled). It is +// canceled on client Close(). +func (c *Client) Ctx() context.Context { return c.ctx } + +// Endpoints lists the registered endpoints for the client. +func (c *Client) Endpoints() []string { + // copy the slice; protect original endpoints from being changed + c.mu.RLock() + defer c.mu.RUnlock() + eps := make([]string, len(c.cfg.Endpoints)) + copy(eps, c.cfg.Endpoints) + return eps +} + +// SetEndpoints updates client's endpoints. +func (c *Client) SetEndpoints(eps ...string) { + c.mu.Lock() + defer c.mu.Unlock() + c.cfg.Endpoints = eps + c.resolverGroup.SetEndpoints(eps) +} + +// Sync synchronizes client's endpoints with the known endpoints from the etcd membership. +func (c *Client) Sync(ctx context.Context) error { + mresp, err := c.MemberList(ctx) + if err != nil { + return err + } + var eps []string + for _, m := range mresp.Members { + eps = append(eps, m.ClientURLs...) + } + c.SetEndpoints(eps...) + return nil +} + +func (c *Client) autoSync() { + if c.cfg.AutoSyncInterval == time.Duration(0) { + return + } + + for { + select { + case <-c.ctx.Done(): + return + case <-time.After(c.cfg.AutoSyncInterval): + ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second) + err := c.Sync(ctx) + cancel() + if err != nil && err != c.ctx.Err() { + lg.Lvl(4).Infof("Auto sync endpoints failed: %v", err) + } + } + } +} + +func (c *Client) processCreds(scheme string) (creds grpccredentials.TransportCredentials) { + creds = c.creds + switch scheme { + case "unix": + case "http": + creds = nil + case "https", "unixs": + if creds != nil { + break + } + creds = credentials.NewBundle(credentials.Config{}).TransportCredentials() + default: + creds = nil + } + return creds +} + +// dialSetupOpts gives the dial opts prior to any authentication. +func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) { + if c.cfg.DialKeepAliveTime > 0 { + params := keepalive.ClientParameters{ + Time: c.cfg.DialKeepAliveTime, + Timeout: c.cfg.DialKeepAliveTimeout, + PermitWithoutStream: c.cfg.PermitWithoutStream, + } + opts = append(opts, grpc.WithKeepaliveParams(params)) + } + opts = append(opts, dopts...) + + dialer := endpoint.Dialer + if creds != nil { + opts = append(opts, grpc.WithTransportCredentials(creds)) + // gRPC load balancer workaround. See credentials.transportCredential for details. + if credsDialer, ok := creds.(TransportCredentialsWithDialer); ok { + dialer = credsDialer.Dialer + } + } else { + opts = append(opts, grpc.WithInsecure()) + } + opts = append(opts, grpc.WithContextDialer(dialer)) + + // Interceptor retry and backoff. + // TODO: Replace all of clientv3/retry.go with interceptor based retry, or with + // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#retry-policy + // once it is available. + rrBackoff := withBackoff(c.roundRobinQuorumBackoff(defaultBackoffWaitBetween, defaultBackoffJitterFraction)) + opts = append(opts, + // Disable stream retry by default since go-grpc-middleware/retry does not support client streams. + // Streams that are safe to retry are enabled individually. + grpc.WithStreamInterceptor(c.streamClientInterceptor(c.lg, withMax(0), rrBackoff)), + grpc.WithUnaryInterceptor(c.unaryClientInterceptor(c.lg, withMax(defaultUnaryMaxRetries), rrBackoff)), + ) + + return opts, nil +} + +// Dial connects to a single endpoint using the client's config. +func (c *Client) Dial(ep string) (*grpc.ClientConn, error) { + creds, err := c.directDialCreds(ep) + if err != nil { + return nil, err + } + // Use the grpc passthrough resolver to directly dial a single endpoint. + // This resolver passes through the 'unix' and 'unixs' endpoints schemes used + // by etcd without modification, allowing us to directly dial endpoints and + // using the same dial functions that we use for load balancer dialing. + return c.dial(fmt.Sprintf("passthrough:///%s", ep), creds) +} + +func (c *Client) getToken(ctx context.Context) error { + var err error // return last error in a case of fail + var auth *authenticator + + eps := c.Endpoints() + for _, ep := range eps { + // use dial options without dopts to avoid reusing the client balancer + var dOpts []grpc.DialOption + _, host, _ := endpoint.ParseEndpoint(ep) + target := c.resolverGroup.Target(host) + creds := c.dialWithBalancerCreds(ep) + dOpts, err = c.dialSetupOpts(creds, c.cfg.DialOptions...) + if err != nil { + err = fmt.Errorf("failed to configure auth dialer: %v", err) + continue + } + dOpts = append(dOpts, grpc.WithBalancerName(roundRobinBalancerName)) + auth, err = newAuthenticator(ctx, target, dOpts, c) + if err != nil { + continue + } + defer auth.close() + + var resp *AuthenticateResponse + resp, err = auth.authenticate(ctx, c.Username, c.Password) + if err != nil { + // return err without retrying other endpoints + if err == rpctypes.ErrAuthNotEnabled { + return err + } + continue + } + + c.authTokenBundle.UpdateAuthToken(resp.Token) + return nil + } + + return err +} + +// dialWithBalancer dials the client's current load balanced resolver group. The scheme of the host +// of the provided endpoint determines the scheme used for all endpoints of the client connection. +func (c *Client) dialWithBalancer(ep string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) { + _, host, _ := endpoint.ParseEndpoint(ep) + target := c.resolverGroup.Target(host) + creds := c.dialWithBalancerCreds(ep) + return c.dial(target, creds, dopts...) +} + +// dial configures and dials any grpc balancer target. +func (c *Client) dial(target string, creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) { + opts, err := c.dialSetupOpts(creds, dopts...) + if err != nil { + return nil, fmt.Errorf("failed to configure dialer: %v", err) + } + + if c.Username != "" && c.Password != "" { + c.authTokenBundle = credentials.NewBundle(credentials.Config{}) + + ctx, cancel := c.ctx, func() {} + if c.cfg.DialTimeout > 0 { + ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout) + } + + err = c.getToken(ctx) + if err != nil { + if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled { + if err == ctx.Err() && ctx.Err() != c.ctx.Err() { + err = context.DeadlineExceeded + } + cancel() + return nil, err + } + } else { + opts = append(opts, grpc.WithPerRPCCredentials(c.authTokenBundle.PerRPCCredentials())) + } + cancel() + } + + opts = append(opts, c.cfg.DialOptions...) + + dctx := c.ctx + if c.cfg.DialTimeout > 0 { + var cancel context.CancelFunc + dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout) + defer cancel() // TODO: Is this right for cases where grpc.WithBlock() is not set on the dial options? + } + + conn, err := grpc.DialContext(dctx, target, opts...) + if err != nil { + return nil, err + } + return conn, nil +} + +func (c *Client) directDialCreds(ep string) (grpccredentials.TransportCredentials, error) { + _, host, scheme := endpoint.ParseEndpoint(ep) + creds := c.creds + if len(scheme) != 0 { + creds = c.processCreds(scheme) + if creds != nil { + clone := creds.Clone() + // Set the server name must to the endpoint hostname without port since grpc + // otherwise attempts to check if x509 cert is valid for the full endpoint + // including the scheme and port, which fails. + overrideServerName, _, err := net.SplitHostPort(host) + if err != nil { + // Either the host didn't have a port or the host could not be parsed. Either way, continue with the + // original host string. + overrideServerName = host + } + clone.OverrideServerName(overrideServerName) + creds = clone + } + } + return creds, nil +} + +func (c *Client) dialWithBalancerCreds(ep string) grpccredentials.TransportCredentials { + _, _, scheme := endpoint.ParseEndpoint(ep) + creds := c.creds + if len(scheme) != 0 { + creds = c.processCreds(scheme) + } + return creds +} + +func newClient(cfg *Config) (*Client, error) { + if cfg == nil { + cfg = &Config{} + } + var creds grpccredentials.TransportCredentials + if cfg.TLS != nil { + creds = credentials.NewBundle(credentials.Config{TLSConfig: cfg.TLS}).TransportCredentials() + } + + // use a temporary skeleton client to bootstrap first connection + baseCtx := context.TODO() + if cfg.Context != nil { + baseCtx = cfg.Context + } + + ctx, cancel := context.WithCancel(baseCtx) + client := &Client{ + conn: nil, + cfg: *cfg, + creds: creds, + ctx: ctx, + cancel: cancel, + mu: new(sync.RWMutex), + callOpts: defaultCallOpts, + } + + lcfg := logutil.DefaultZapLoggerConfig + if cfg.LogConfig != nil { + lcfg = *cfg.LogConfig + } + var err error + client.lg, err = lcfg.Build() + if err != nil { + return nil, err + } + + if cfg.Username != "" && cfg.Password != "" { + client.Username = cfg.Username + client.Password = cfg.Password + } + if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 { + if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize { + return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize) + } + callOpts := []grpc.CallOption{ + defaultFailFast, + defaultMaxCallSendMsgSize, + defaultMaxCallRecvMsgSize, + } + if cfg.MaxCallSendMsgSize > 0 { + callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize) + } + if cfg.MaxCallRecvMsgSize > 0 { + callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize) + } + client.callOpts = callOpts + } + + // Prepare a 'endpoint:///' resolver for the client and create a endpoint target to pass + // to dial so the client knows to use this resolver. + client.resolverGroup, err = endpoint.NewResolverGroup(fmt.Sprintf("client-%s", uuid.New().String())) + if err != nil { + client.cancel() + return nil, err + } + client.resolverGroup.SetEndpoints(cfg.Endpoints) + + if len(cfg.Endpoints) < 1 { + return nil, fmt.Errorf("at least one Endpoint must is required in client config") + } + dialEndpoint := cfg.Endpoints[0] + + // Use a provided endpoint target so that for https:// without any tls config given, then + // grpc will assume the certificate server name is the endpoint host. + conn, err := client.dialWithBalancer(dialEndpoint, grpc.WithBalancerName(roundRobinBalancerName)) + if err != nil { + client.cancel() + client.resolverGroup.Close() + return nil, err + } + // TODO: With the old grpc balancer interface, we waited until the dial timeout + // for the balancer to be ready. Is there an equivalent wait we should do with the new grpc balancer interface? + client.conn = conn + + client.Cluster = NewCluster(client) + client.KV = NewKV(client) + client.Lease = NewLease(client) + client.Watcher = NewWatcher(client) + client.Auth = NewAuth(client) + client.Maintenance = NewMaintenance(client) + + if cfg.RejectOldCluster { + if err := client.checkVersion(); err != nil { + client.Close() + return nil, err + } + } + + go client.autoSync() + return client, nil +} + +// roundRobinQuorumBackoff retries against quorum between each backoff. +// This is intended for use with a round robin load balancer. +func (c *Client) roundRobinQuorumBackoff(waitBetween time.Duration, jitterFraction float64) backoffFunc { + return func(attempt uint) time.Duration { + // after each round robin across quorum, backoff for our wait between duration + n := uint(len(c.Endpoints())) + quorum := (n/2 + 1) + if attempt%quorum == 0 { + c.lg.Debug("backoff", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum), zap.Duration("waitBetween", waitBetween), zap.Float64("jitterFraction", jitterFraction)) + return jitterUp(waitBetween, jitterFraction) + } + c.lg.Debug("backoff skipped", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum)) + return 0 + } +} + +func (c *Client) checkVersion() (err error) { + var wg sync.WaitGroup + + eps := c.Endpoints() + errc := make(chan error, len(eps)) + ctx, cancel := context.WithCancel(c.ctx) + if c.cfg.DialTimeout > 0 { + cancel() + ctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout) + } + + wg.Add(len(eps)) + for _, ep := range eps { + // if cluster is current, any endpoint gives a recent version + go func(e string) { + defer wg.Done() + resp, rerr := c.Status(ctx, e) + if rerr != nil { + errc <- rerr + return + } + vs := strings.Split(resp.Version, ".") + maj, min := 0, 0 + if len(vs) >= 2 { + var serr error + if maj, serr = strconv.Atoi(vs[0]); serr != nil { + errc <- serr + return + } + if min, serr = strconv.Atoi(vs[1]); serr != nil { + errc <- serr + return + } + } + if maj < 3 || (maj == 3 && min < 2) { + rerr = ErrOldCluster + } + errc <- rerr + }(ep) + } + // wait for success + for range eps { + if err = <-errc; err == nil { + break + } + } + cancel() + wg.Wait() + return err +} + +// ActiveConnection returns the current in-use connection +func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn } + +// isHaltErr returns true if the given error and context indicate no forward +// progress can be made, even after reconnecting. +func isHaltErr(ctx context.Context, err error) bool { + if ctx != nil && ctx.Err() != nil { + return true + } + if err == nil { + return false + } + ev, _ := status.FromError(err) + // Unavailable codes mean the system will be right back. + // (e.g., can't connect, lost leader) + // Treat Internal codes as if something failed, leaving the + // system in an inconsistent state, but retrying could make progress. + // (e.g., failed in middle of send, corrupted frame) + // TODO: are permanent Internal errors possible from grpc? + return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal +} + +// isUnavailableErr returns true if the given error is an unavailable error +func isUnavailableErr(ctx context.Context, err error) bool { + if ctx != nil && ctx.Err() != nil { + return false + } + if err == nil { + return false + } + ev, ok := status.FromError(err) + if ok { + // Unavailable codes mean the system will be right back. + // (e.g., can't connect, lost leader) + return ev.Code() == codes.Unavailable + } + return false +} + +func toErr(ctx context.Context, err error) error { + if err == nil { + return nil + } + err = rpctypes.Error(err) + if _, ok := err.(rpctypes.EtcdError); ok { + return err + } + if ev, ok := status.FromError(err); ok { + code := ev.Code() + switch code { + case codes.DeadlineExceeded: + fallthrough + case codes.Canceled: + if ctx.Err() != nil { + err = ctx.Err() + } + } + } + return err +} + +func canceledByCaller(stopCtx context.Context, err error) bool { + if stopCtx.Err() == nil || err == nil { + return false + } + + return err == context.Canceled || err == context.DeadlineExceeded +} + +// IsConnCanceled returns true, if error is from a closed gRPC connection. +// ref. https://github.com/grpc/grpc-go/pull/1854 +func IsConnCanceled(err error) bool { + if err == nil { + return false + } + + // >= gRPC v1.23.x + s, ok := status.FromError(err) + if ok { + // connection is canceled or server has already closed the connection + return s.Code() == codes.Canceled || s.Message() == "transport is closing" + } + + // >= gRPC v1.10.x + if err == context.Canceled { + return true + } + + // <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")' + return strings.Contains(err.Error(), "grpc: the client connection is closing") +} + +// TransportCredentialsWithDialer is for a gRPC load balancer workaround. See credentials.transportCredential for details. +type TransportCredentialsWithDialer interface { + grpccredentials.TransportCredentials + Dialer(ctx context.Context, dialEp string) (net.Conn, error) +} diff --git a/vendor/go.etcd.io/etcd/clientv3/cluster.go b/vendor/go.etcd.io/etcd/clientv3/cluster.go new file mode 100644 index 000000000..ce97e5c85 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/cluster.go @@ -0,0 +1,141 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/pkg/types" + + "google.golang.org/grpc" +) + +type ( + Member pb.Member + MemberListResponse pb.MemberListResponse + MemberAddResponse pb.MemberAddResponse + MemberRemoveResponse pb.MemberRemoveResponse + MemberUpdateResponse pb.MemberUpdateResponse + MemberPromoteResponse pb.MemberPromoteResponse +) + +type Cluster interface { + // MemberList lists the current cluster membership. + MemberList(ctx context.Context) (*MemberListResponse, error) + + // MemberAdd adds a new member into the cluster. + MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) + + // MemberAddAsLearner adds a new learner member into the cluster. + MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) + + // MemberRemove removes an existing member from the cluster. + MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) + + // MemberUpdate updates the peer addresses of the member. + MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) + + // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. + MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) +} + +type cluster struct { + remote pb.ClusterClient + callOpts []grpc.CallOption +} + +func NewCluster(c *Client) Cluster { + api := &cluster{remote: RetryClusterClient(c)} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster { + api := &cluster{remote: remote} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { + return c.memberAdd(ctx, peerAddrs, false) +} + +func (c *cluster) MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { + return c.memberAdd(ctx, peerAddrs, true) +} + +func (c *cluster) memberAdd(ctx context.Context, peerAddrs []string, isLearner bool) (*MemberAddResponse, error) { + // fail-fast before panic in rafthttp + if _, err := types.NewURLs(peerAddrs); err != nil { + return nil, err + } + + r := &pb.MemberAddRequest{ + PeerURLs: peerAddrs, + IsLearner: isLearner, + } + resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*MemberAddResponse)(resp), nil +} + +func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) { + r := &pb.MemberRemoveRequest{ID: id} + resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*MemberRemoveResponse)(resp), nil +} + +func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) { + // fail-fast before panic in rafthttp + if _, err := types.NewURLs(peerAddrs); err != nil { + return nil, err + } + + // it is safe to retry on update. + r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} + resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...) + if err == nil { + return (*MemberUpdateResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { + // it is safe to retry on list. + resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, c.callOpts...) + if err == nil { + return (*MemberListResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (c *cluster) MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) { + r := &pb.MemberPromoteRequest{ID: id} + resp, err := c.remote.MemberPromote(ctx, r, c.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*MemberPromoteResponse)(resp), nil +} diff --git a/vendor/go.etcd.io/etcd/clientv3/compact_op.go b/vendor/go.etcd.io/etcd/clientv3/compact_op.go new file mode 100644 index 000000000..5779713d3 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/compact_op.go @@ -0,0 +1,51 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" +) + +// CompactOp represents a compact operation. +type CompactOp struct { + revision int64 + physical bool +} + +// CompactOption configures compact operation. +type CompactOption func(*CompactOp) + +func (op *CompactOp) applyCompactOpts(opts []CompactOption) { + for _, opt := range opts { + opt(op) + } +} + +// OpCompact wraps slice CompactOption to create a CompactOp. +func OpCompact(rev int64, opts ...CompactOption) CompactOp { + ret := CompactOp{revision: rev} + ret.applyCompactOpts(opts) + return ret +} + +func (op CompactOp) toRequest() *pb.CompactionRequest { + return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical} +} + +// WithCompactPhysical makes Compact wait until all compacted entries are +// removed from the etcd server's storage. +func WithCompactPhysical() CompactOption { + return func(op *CompactOp) { op.physical = true } +} diff --git a/vendor/go.etcd.io/etcd/clientv3/compare.go b/vendor/go.etcd.io/etcd/clientv3/compare.go new file mode 100644 index 000000000..01ed68e94 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/compare.go @@ -0,0 +1,140 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" +) + +type CompareTarget int +type CompareResult int + +const ( + CompareVersion CompareTarget = iota + CompareCreated + CompareModified + CompareValue +) + +type Cmp pb.Compare + +func Compare(cmp Cmp, result string, v interface{}) Cmp { + var r pb.Compare_CompareResult + + switch result { + case "=": + r = pb.Compare_EQUAL + case "!=": + r = pb.Compare_NOT_EQUAL + case ">": + r = pb.Compare_GREATER + case "<": + r = pb.Compare_LESS + default: + panic("Unknown result op") + } + + cmp.Result = r + switch cmp.Target { + case pb.Compare_VALUE: + val, ok := v.(string) + if !ok { + panic("bad compare value") + } + cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)} + case pb.Compare_VERSION: + cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)} + case pb.Compare_CREATE: + cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)} + case pb.Compare_MOD: + cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)} + case pb.Compare_LEASE: + cmp.TargetUnion = &pb.Compare_Lease{Lease: mustInt64orLeaseID(v)} + default: + panic("Unknown compare type") + } + return cmp +} + +func Value(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_VALUE} +} + +func Version(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_VERSION} +} + +func CreateRevision(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_CREATE} +} + +func ModRevision(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_MOD} +} + +// LeaseValue compares a key's LeaseID to a value of your choosing. The empty +// LeaseID is 0, otherwise known as `NoLease`. +func LeaseValue(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_LEASE} +} + +// KeyBytes returns the byte slice holding with the comparison key. +func (cmp *Cmp) KeyBytes() []byte { return cmp.Key } + +// WithKeyBytes sets the byte slice for the comparison key. +func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key } + +// ValueBytes returns the byte slice holding the comparison value, if any. +func (cmp *Cmp) ValueBytes() []byte { + if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok { + return tu.Value + } + return nil +} + +// WithValueBytes sets the byte slice for the comparison's value. +func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v } + +// WithRange sets the comparison to scan the range [key, end). +func (cmp Cmp) WithRange(end string) Cmp { + cmp.RangeEnd = []byte(end) + return cmp +} + +// WithPrefix sets the comparison to scan all keys prefixed by the key. +func (cmp Cmp) WithPrefix() Cmp { + cmp.RangeEnd = getPrefix(cmp.Key) + return cmp +} + +// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise. +func mustInt64(val interface{}) int64 { + if v, ok := val.(int64); ok { + return v + } + if v, ok := val.(int); ok { + return int64(v) + } + panic("bad value") +} + +// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an +// int64 otherwise. +func mustInt64orLeaseID(val interface{}) int64 { + if v, ok := val.(LeaseID); ok { + return int64(v) + } + return mustInt64(val) +} diff --git a/vendor/go.etcd.io/etcd/clientv3/config.go b/vendor/go.etcd.io/etcd/clientv3/config.go new file mode 100644 index 000000000..11d447d57 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/config.go @@ -0,0 +1,88 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "crypto/tls" + "time" + + "go.uber.org/zap" + "google.golang.org/grpc" +) + +type Config struct { + // Endpoints is a list of URLs. + Endpoints []string `json:"endpoints"` + + // AutoSyncInterval is the interval to update endpoints with its latest members. + // 0 disables auto-sync. By default auto-sync is disabled. + AutoSyncInterval time.Duration `json:"auto-sync-interval"` + + // DialTimeout is the timeout for failing to establish a connection. + DialTimeout time.Duration `json:"dial-timeout"` + + // DialKeepAliveTime is the time after which client pings the server to see if + // transport is alive. + DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"` + + // DialKeepAliveTimeout is the time that the client waits for a response for the + // keep-alive probe. If the response is not received in this time, the connection is closed. + DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"` + + // MaxCallSendMsgSize is the client-side request send limit in bytes. + // If 0, it defaults to 2.0 MiB (2 * 1024 * 1024). + // Make sure that "MaxCallSendMsgSize" < server-side default send/recv limit. + // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes"). + MaxCallSendMsgSize int + + // MaxCallRecvMsgSize is the client-side response receive limit. + // If 0, it defaults to "math.MaxInt32", because range response can + // easily exceed request send limits. + // Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit. + // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes"). + MaxCallRecvMsgSize int + + // TLS holds the client secure credentials, if any. + TLS *tls.Config + + // Username is a user name for authentication. + Username string `json:"username"` + + // Password is a password for authentication. + Password string `json:"password"` + + // RejectOldCluster when set will refuse to create a client against an outdated cluster. + RejectOldCluster bool `json:"reject-old-cluster"` + + // DialOptions is a list of dial options for the grpc client (e.g., for interceptors). + // For example, pass "grpc.WithBlock()" to block until the underlying connection is up. + // Without this, Dial returns immediately and connecting the server happens in background. + DialOptions []grpc.DialOption + + // Context is the default client context; it can be used to cancel grpc dial out and + // other operations that do not have an explicit context. + Context context.Context + + // LogConfig configures client-side logger. + // If nil, use the default logger. + // TODO: configure gRPC logger + LogConfig *zap.Config + + // PermitWithoutStream when set will allow client to send keepalive pings to server without any active streams(RPCs). + PermitWithoutStream bool `json:"permit-without-stream"` + + // TODO: support custom balancer picker +} diff --git a/vendor/go.etcd.io/etcd/clientv3/credentials/credentials.go b/vendor/go.etcd.io/etcd/clientv3/credentials/credentials.go new file mode 100644 index 000000000..63389c08b --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/credentials/credentials.go @@ -0,0 +1,173 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package credentials implements gRPC credential interface with etcd specific logic. +// e.g., client handshake with custom authority parameter +package credentials + +import ( + "context" + "crypto/tls" + "net" + "sync" + + "go.etcd.io/etcd/clientv3/balancer/resolver/endpoint" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + grpccredentials "google.golang.org/grpc/credentials" +) + +// Config defines gRPC credential configuration. +type Config struct { + TLSConfig *tls.Config +} + +// Bundle defines gRPC credential interface. +type Bundle interface { + grpccredentials.Bundle + UpdateAuthToken(token string) +} + +// NewBundle constructs a new gRPC credential bundle. +func NewBundle(cfg Config) Bundle { + return &bundle{ + tc: newTransportCredential(cfg.TLSConfig), + rc: newPerRPCCredential(), + } +} + +// bundle implements "grpccredentials.Bundle" interface. +type bundle struct { + tc *transportCredential + rc *perRPCCredential +} + +func (b *bundle) TransportCredentials() grpccredentials.TransportCredentials { + return b.tc +} + +func (b *bundle) PerRPCCredentials() grpccredentials.PerRPCCredentials { + return b.rc +} + +func (b *bundle) NewWithMode(mode string) (grpccredentials.Bundle, error) { + // no-op + return nil, nil +} + +// transportCredential implements "grpccredentials.TransportCredentials" interface. +// transportCredential wraps TransportCredentials to track which +// addresses are dialed for which endpoints, and then sets the authority when checking the endpoint's cert to the +// hostname or IP of the dialed endpoint. +// This is a workaround of a gRPC load balancer issue. gRPC uses the dialed target's service name as the authority when +// checking all endpoint certs, which does not work for etcd servers using their hostname or IP as the Subject Alternative Name +// in their TLS certs. +// To enable, include both WithTransportCredentials(creds) and WithContextDialer(creds.Dialer) +// when dialing. +type transportCredential struct { + gtc grpccredentials.TransportCredentials + mu sync.Mutex + // addrToEndpoint maps from the connection addresses that are dialed to the hostname or IP of the + // endpoint provided to the dialer when dialing + addrToEndpoint map[string]string +} + +func newTransportCredential(cfg *tls.Config) *transportCredential { + return &transportCredential{ + gtc: grpccredentials.NewTLS(cfg), + addrToEndpoint: map[string]string{}, + } +} + +func (tc *transportCredential) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) { + // Set the authority when checking the endpoint's cert to the hostname or IP of the dialed endpoint + tc.mu.Lock() + dialEp, ok := tc.addrToEndpoint[rawConn.RemoteAddr().String()] + tc.mu.Unlock() + if ok { + _, host, _ := endpoint.ParseEndpoint(dialEp) + authority = host + } + return tc.gtc.ClientHandshake(ctx, authority, rawConn) +} + +// return true if given string is an IP. +func isIP(ep string) bool { + return net.ParseIP(ep) != nil +} + +func (tc *transportCredential) ServerHandshake(rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) { + return tc.gtc.ServerHandshake(rawConn) +} + +func (tc *transportCredential) Info() grpccredentials.ProtocolInfo { + return tc.gtc.Info() +} + +func (tc *transportCredential) Clone() grpccredentials.TransportCredentials { + copy := map[string]string{} + tc.mu.Lock() + for k, v := range tc.addrToEndpoint { + copy[k] = v + } + tc.mu.Unlock() + return &transportCredential{ + gtc: tc.gtc.Clone(), + addrToEndpoint: copy, + } +} + +func (tc *transportCredential) OverrideServerName(serverNameOverride string) error { + return tc.gtc.OverrideServerName(serverNameOverride) +} + +func (tc *transportCredential) Dialer(ctx context.Context, dialEp string) (net.Conn, error) { + // Keep track of which addresses are dialed for which endpoints + conn, err := endpoint.Dialer(ctx, dialEp) + if conn != nil { + tc.mu.Lock() + tc.addrToEndpoint[conn.RemoteAddr().String()] = dialEp + tc.mu.Unlock() + } + return conn, err +} + +// perRPCCredential implements "grpccredentials.PerRPCCredentials" interface. +type perRPCCredential struct { + authToken string + authTokenMu sync.RWMutex +} + +func newPerRPCCredential() *perRPCCredential { return &perRPCCredential{} } + +func (rc *perRPCCredential) RequireTransportSecurity() bool { return false } + +func (rc *perRPCCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) { + rc.authTokenMu.RLock() + authToken := rc.authToken + rc.authTokenMu.RUnlock() + return map[string]string{rpctypes.TokenFieldNameGRPC: authToken}, nil +} + +func (b *bundle) UpdateAuthToken(token string) { + if b.rc == nil { + return + } + b.rc.UpdateAuthToken(token) +} + +func (rc *perRPCCredential) UpdateAuthToken(token string) { + rc.authTokenMu.Lock() + rc.authToken = token + rc.authTokenMu.Unlock() +} diff --git a/vendor/go.etcd.io/etcd/clientv3/ctx.go b/vendor/go.etcd.io/etcd/clientv3/ctx.go new file mode 100644 index 000000000..542219837 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/ctx.go @@ -0,0 +1,64 @@ +// Copyright 2020 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "strings" + + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + "go.etcd.io/etcd/version" + "google.golang.org/grpc/metadata" +) + +// WithRequireLeader requires client requests to only succeed +// when the cluster has a leader. +func WithRequireLeader(ctx context.Context) context.Context { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { // no outgoing metadata ctx key, create one + md = metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) + return metadata.NewOutgoingContext(ctx, md) + } + copied := md.Copy() // avoid racey updates + // overwrite/add 'hasleader' key/value + metadataSet(copied, rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) + return metadata.NewOutgoingContext(ctx, copied) +} + +// embeds client version +func withVersion(ctx context.Context) context.Context { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { // no outgoing metadata ctx key, create one + md = metadata.Pairs(rpctypes.MetadataClientAPIVersionKey, version.APIVersion) + return metadata.NewOutgoingContext(ctx, md) + } + copied := md.Copy() // avoid racey updates + // overwrite/add version key/value + metadataSet(copied, rpctypes.MetadataClientAPIVersionKey, version.APIVersion) + return metadata.NewOutgoingContext(ctx, copied) +} + +func metadataGet(md metadata.MD, k string) []string { + k = strings.ToLower(k) + return md[k] +} + +func metadataSet(md metadata.MD, k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = vals +} diff --git a/vendor/go.etcd.io/etcd/clientv3/doc.go b/vendor/go.etcd.io/etcd/clientv3/doc.go new file mode 100644 index 000000000..913cd2825 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/doc.go @@ -0,0 +1,106 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package clientv3 implements the official Go etcd client for v3. +// +// Create client using `clientv3.New`: +// +// // expect dial time-out on ipv4 blackhole +// _, err := clientv3.New(clientv3.Config{ +// Endpoints: []string{"http://254.0.0.1:12345"}, +// DialTimeout: 2 * time.Second, +// }) +// +// // etcd clientv3 >= v3.2.10, grpc/grpc-go >= v1.7.3 +// if err == context.DeadlineExceeded { +// // handle errors +// } +// +// // etcd clientv3 <= v3.2.9, grpc/grpc-go <= v1.2.1 +// if err == grpc.ErrClientConnTimeout { +// // handle errors +// } +// +// cli, err := clientv3.New(clientv3.Config{ +// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, +// DialTimeout: 5 * time.Second, +// }) +// if err != nil { +// // handle error! +// } +// defer cli.Close() +// +// Make sure to close the client after using it. If the client is not closed, the +// connection will have leaky goroutines. +// +// To specify a client request timeout, wrap the context with context.WithTimeout: +// +// ctx, cancel := context.WithTimeout(context.Background(), timeout) +// resp, err := kvc.Put(ctx, "sample_key", "sample_value") +// cancel() +// if err != nil { +// // handle error! +// } +// // use the response +// +// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed. +// Clients are safe for concurrent use by multiple goroutines. +// +// etcd client returns 3 types of errors: +// +// 1. context error: canceled or deadline exceeded. +// 2. gRPC status error: e.g. when clock drifts in server-side before client's context deadline exceeded. +// 3. gRPC error: see https://github.com/etcd-io/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go +// +// Here is the example code to handle client errors: +// +// resp, err := kvc.Put(ctx, "", "") +// if err != nil { +// if err == context.Canceled { +// // ctx is canceled by another routine +// } else if err == context.DeadlineExceeded { +// // ctx is attached with a deadline and it exceeded +// } else if err == rpctypes.ErrEmptyKey { +// // client-side error: key is not provided +// } else if ev, ok := status.FromError(err); ok { +// code := ev.Code() +// if code == codes.DeadlineExceeded { +// // server-side context might have timed-out first (due to clock skew) +// // while original client-side context is not timed-out yet +// } +// } else { +// // bad cluster endpoints, which are not etcd servers +// } +// } +// +// go func() { cli.Close() }() +// _, err := kvc.Get(ctx, "a") +// if err != nil { +// // with etcd clientv3 <= v3.3 +// if err == context.Canceled { +// // grpc balancer calls 'Get' with an inflight client.Close +// } else if err == grpc.ErrClientConnClosing { // <= gRCP v1.7.x +// // grpc balancer calls 'Get' after client.Close. +// } +// // with etcd clientv3 >= v3.4 +// if clientv3.IsConnCanceled(err) { +// // gRPC client connection is closed +// } +// } +// +// The grpc load balancer is registered statically and is shared across etcd clients. +// To enable detailed load balancer logging, set the ETCD_CLIENT_DEBUG environment +// variable. E.g. "ETCD_CLIENT_DEBUG=1". +// +package clientv3 diff --git a/vendor/go.etcd.io/etcd/clientv3/kv.go b/vendor/go.etcd.io/etcd/clientv3/kv.go new file mode 100644 index 000000000..2b7864ad8 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/kv.go @@ -0,0 +1,177 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + + "google.golang.org/grpc" +) + +type ( + CompactResponse pb.CompactionResponse + PutResponse pb.PutResponse + GetResponse pb.RangeResponse + DeleteResponse pb.DeleteRangeResponse + TxnResponse pb.TxnResponse +) + +type KV interface { + // Put puts a key-value pair into etcd. + // Note that key,value can be plain bytes array and string is + // an immutable representation of that bytes array. + // To get a string of bytes, do string([]byte{0x10, 0x20}). + Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) + + // Get retrieves keys. + // By default, Get will return the value for "key", if any. + // When passed WithRange(end), Get will return the keys in the range [key, end). + // When passed WithFromKey(), Get returns keys greater than or equal to key. + // When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision; + // if the required revision is compacted, the request will fail with ErrCompacted . + // When passed WithLimit(limit), the number of returned keys is bounded by limit. + // When passed WithSort(), the keys will be sorted. + Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) + + // Delete deletes a key, or optionally using WithRange(end), [key, end). + Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) + + // Compact compacts etcd KV history before the given rev. + Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) + + // Do applies a single Op on KV without a transaction. + // Do is useful when creating arbitrary operations to be issued at a + // later time; the user can range over the operations, calling Do to + // execute them. Get/Put/Delete, on the other hand, are best suited + // for when the operation should be issued at the time of declaration. + Do(ctx context.Context, op Op) (OpResponse, error) + + // Txn creates a transaction. + Txn(ctx context.Context) Txn +} + +type OpResponse struct { + put *PutResponse + get *GetResponse + del *DeleteResponse + txn *TxnResponse +} + +func (op OpResponse) Put() *PutResponse { return op.put } +func (op OpResponse) Get() *GetResponse { return op.get } +func (op OpResponse) Del() *DeleteResponse { return op.del } +func (op OpResponse) Txn() *TxnResponse { return op.txn } + +func (resp *PutResponse) OpResponse() OpResponse { + return OpResponse{put: resp} +} +func (resp *GetResponse) OpResponse() OpResponse { + return OpResponse{get: resp} +} +func (resp *DeleteResponse) OpResponse() OpResponse { + return OpResponse{del: resp} +} +func (resp *TxnResponse) OpResponse() OpResponse { + return OpResponse{txn: resp} +} + +type kv struct { + remote pb.KVClient + callOpts []grpc.CallOption +} + +func NewKV(c *Client) KV { + api := &kv{remote: RetryKVClient(c)} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func NewKVFromKVClient(remote pb.KVClient, c *Client) KV { + api := &kv{remote: remote} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) { + r, err := kv.Do(ctx, OpPut(key, val, opts...)) + return r.put, toErr(ctx, err) +} + +func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) { + r, err := kv.Do(ctx, OpGet(key, opts...)) + return r.get, toErr(ctx, err) +} + +func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) { + r, err := kv.Do(ctx, OpDelete(key, opts...)) + return r.del, toErr(ctx, err) +} + +func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) { + resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*CompactResponse)(resp), err +} + +func (kv *kv) Txn(ctx context.Context) Txn { + return &txn{ + kv: kv, + ctx: ctx, + callOpts: kv.callOpts, + } +} + +func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { + var err error + switch op.t { + case tRange: + var resp *pb.RangeResponse + resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...) + if err == nil { + return OpResponse{get: (*GetResponse)(resp)}, nil + } + case tPut: + var resp *pb.PutResponse + r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} + resp, err = kv.remote.Put(ctx, r, kv.callOpts...) + if err == nil { + return OpResponse{put: (*PutResponse)(resp)}, nil + } + case tDeleteRange: + var resp *pb.DeleteRangeResponse + r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} + resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...) + if err == nil { + return OpResponse{del: (*DeleteResponse)(resp)}, nil + } + case tTxn: + var resp *pb.TxnResponse + resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...) + if err == nil { + return OpResponse{txn: (*TxnResponse)(resp)}, nil + } + default: + panic("Unknown op") + } + return OpResponse{}, toErr(ctx, err) +} diff --git a/vendor/go.etcd.io/etcd/clientv3/lease.go b/vendor/go.etcd.io/etcd/clientv3/lease.go new file mode 100644 index 000000000..c2796fc96 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/lease.go @@ -0,0 +1,596 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "sync" + "time" + + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +type ( + LeaseRevokeResponse pb.LeaseRevokeResponse + LeaseID int64 +) + +// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse. +type LeaseGrantResponse struct { + *pb.ResponseHeader + ID LeaseID + TTL int64 + Error string +} + +// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse. +type LeaseKeepAliveResponse struct { + *pb.ResponseHeader + ID LeaseID + TTL int64 +} + +// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse. +type LeaseTimeToLiveResponse struct { + *pb.ResponseHeader + ID LeaseID `json:"id"` + + // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. Expired lease will return -1. + TTL int64 `json:"ttl"` + + // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. + GrantedTTL int64 `json:"granted-ttl"` + + // Keys is the list of keys attached to this lease. + Keys [][]byte `json:"keys"` +} + +// LeaseStatus represents a lease status. +type LeaseStatus struct { + ID LeaseID `json:"id"` + // TODO: TTL int64 +} + +// LeaseLeasesResponse wraps the protobuf message LeaseLeasesResponse. +type LeaseLeasesResponse struct { + *pb.ResponseHeader + Leases []LeaseStatus `json:"leases"` +} + +const ( + // defaultTTL is the assumed lease TTL used for the first keepalive + // deadline before the actual TTL is known to the client. + defaultTTL = 5 * time.Second + // NoLease is a lease ID for the absence of a lease. + NoLease LeaseID = 0 + + // retryConnWait is how long to wait before retrying request due to an error + retryConnWait = 500 * time.Millisecond +) + +// LeaseResponseChSize is the size of buffer to store unsent lease responses. +// WARNING: DO NOT UPDATE. +// Only for testing purposes. +var LeaseResponseChSize = 16 + +// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error. +// +// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected. +type ErrKeepAliveHalted struct { + Reason error +} + +func (e ErrKeepAliveHalted) Error() string { + s := "etcdclient: leases keep alive halted" + if e.Reason != nil { + s += ": " + e.Reason.Error() + } + return s +} + +type Lease interface { + // Grant creates a new lease. + Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) + + // Revoke revokes the given lease. + Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) + + // TimeToLive retrieves the lease information of the given lease ID. + TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) + + // Leases retrieves all leases. + Leases(ctx context.Context) (*LeaseLeasesResponse, error) + + // KeepAlive attempts to keep the given lease alive forever. If the keepalive responses posted + // to the channel are not consumed promptly the channel may become full. When full, the lease + // client will continue sending keep alive requests to the etcd server, but will drop responses + // until there is capacity on the channel to send more responses. + // + // If client keep alive loop halts with an unexpected error (e.g. "etcdserver: no leader") or + // canceled by the caller (e.g. context.Canceled), KeepAlive returns a ErrKeepAliveHalted error + // containing the error reason. + // + // The returned "LeaseKeepAliveResponse" channel closes if underlying keep + // alive stream is interrupted in some way the client cannot handle itself; + // given context "ctx" is canceled or timed out. + // + // TODO(v4.0): post errors to last keep alive message before closing + // (see https://github.com/etcd-io/etcd/pull/7866) + KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) + + // KeepAliveOnce renews the lease once. The response corresponds to the + // first message from calling KeepAlive. If the response has a recoverable + // error, KeepAliveOnce will retry the RPC with a new keep alive message. + // + // In most of the cases, Keepalive should be used instead of KeepAliveOnce. + KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) + + // Close releases all resources Lease keeps for efficient communication + // with the etcd server. + Close() error +} + +type lessor struct { + mu sync.Mutex // guards all fields + + // donec is closed and loopErr is set when recvKeepAliveLoop stops + donec chan struct{} + loopErr error + + remote pb.LeaseClient + + stream pb.Lease_LeaseKeepAliveClient + streamCancel context.CancelFunc + + stopCtx context.Context + stopCancel context.CancelFunc + + keepAlives map[LeaseID]*keepAlive + + // firstKeepAliveTimeout is the timeout for the first keepalive request + // before the actual TTL is known to the lease client + firstKeepAliveTimeout time.Duration + + // firstKeepAliveOnce ensures stream starts after first KeepAlive call. + firstKeepAliveOnce sync.Once + + callOpts []grpc.CallOption + + lg *zap.Logger +} + +// keepAlive multiplexes a keepalive for a lease over multiple channels +type keepAlive struct { + chs []chan<- *LeaseKeepAliveResponse + ctxs []context.Context + // deadline is the time the keep alive channels close if no response + deadline time.Time + // nextKeepAlive is when to send the next keep alive message + nextKeepAlive time.Time + // donec is closed on lease revoke, expiration, or cancel. + donec chan struct{} +} + +func NewLease(c *Client) Lease { + return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second) +} + +func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease { + l := &lessor{ + donec: make(chan struct{}), + keepAlives: make(map[LeaseID]*keepAlive), + remote: remote, + firstKeepAliveTimeout: keepAliveTimeout, + lg: c.lg, + } + if l.firstKeepAliveTimeout == time.Second { + l.firstKeepAliveTimeout = defaultTTL + } + if c != nil { + l.callOpts = c.callOpts + } + reqLeaderCtx := WithRequireLeader(context.Background()) + l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx) + return l +} + +func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) { + r := &pb.LeaseGrantRequest{TTL: ttl} + resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...) + if err == nil { + gresp := &LeaseGrantResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + Error: resp.Error, + } + return gresp, nil + } + return nil, toErr(ctx, err) +} + +func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) { + r := &pb.LeaseRevokeRequest{ID: int64(id)} + resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...) + if err == nil { + return (*LeaseRevokeResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) { + r := toLeaseTimeToLiveRequest(id, opts...) + resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...) + if err == nil { + gresp := &LeaseTimeToLiveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + GrantedTTL: resp.GrantedTTL, + Keys: resp.Keys, + } + return gresp, nil + } + return nil, toErr(ctx, err) +} + +func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) { + resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, l.callOpts...) + if err == nil { + leases := make([]LeaseStatus, len(resp.Leases)) + for i := range resp.Leases { + leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)} + } + return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil + } + return nil, toErr(ctx, err) +} + +func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) { + ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize) + + l.mu.Lock() + // ensure that recvKeepAliveLoop is still running + select { + case <-l.donec: + err := l.loopErr + l.mu.Unlock() + close(ch) + return ch, ErrKeepAliveHalted{Reason: err} + default: + } + ka, ok := l.keepAlives[id] + if !ok { + // create fresh keep alive + ka = &keepAlive{ + chs: []chan<- *LeaseKeepAliveResponse{ch}, + ctxs: []context.Context{ctx}, + deadline: time.Now().Add(l.firstKeepAliveTimeout), + nextKeepAlive: time.Now(), + donec: make(chan struct{}), + } + l.keepAlives[id] = ka + } else { + // add channel and context to existing keep alive + ka.ctxs = append(ka.ctxs, ctx) + ka.chs = append(ka.chs, ch) + } + l.mu.Unlock() + + go l.keepAliveCtxCloser(ctx, id, ka.donec) + l.firstKeepAliveOnce.Do(func() { + go l.recvKeepAliveLoop() + go l.deadlineLoop() + }) + + return ch, nil +} + +func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { + for { + resp, err := l.keepAliveOnce(ctx, id) + if err == nil { + if resp.TTL <= 0 { + err = rpctypes.ErrLeaseNotFound + } + return resp, err + } + if isHaltErr(ctx, err) { + return nil, toErr(ctx, err) + } + } +} + +func (l *lessor) Close() error { + l.stopCancel() + // close for synchronous teardown if stream goroutines never launched + l.firstKeepAliveOnce.Do(func() { close(l.donec) }) + <-l.donec + return nil +} + +func (l *lessor) keepAliveCtxCloser(ctx context.Context, id LeaseID, donec <-chan struct{}) { + select { + case <-donec: + return + case <-l.donec: + return + case <-ctx.Done(): + } + + l.mu.Lock() + defer l.mu.Unlock() + + ka, ok := l.keepAlives[id] + if !ok { + return + } + + // close channel and remove context if still associated with keep alive + for i, c := range ka.ctxs { + if c == ctx { + close(ka.chs[i]) + ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...) + ka.chs = append(ka.chs[:i], ka.chs[i+1:]...) + break + } + } + // remove if no one more listeners + if len(ka.chs) == 0 { + delete(l.keepAlives, id) + } +} + +// closeRequireLeader scans keepAlives for ctxs that have require leader +// and closes the associated channels. +func (l *lessor) closeRequireLeader() { + l.mu.Lock() + defer l.mu.Unlock() + for _, ka := range l.keepAlives { + reqIdxs := 0 + // find all required leader channels, close, mark as nil + for i, ctx := range ka.ctxs { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + continue + } + ks := md[rpctypes.MetadataRequireLeaderKey] + if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader { + continue + } + close(ka.chs[i]) + ka.chs[i] = nil + reqIdxs++ + } + if reqIdxs == 0 { + continue + } + // remove all channels that required a leader from keepalive + newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs) + newCtxs := make([]context.Context, len(newChs)) + newIdx := 0 + for i := range ka.chs { + if ka.chs[i] == nil { + continue + } + newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx] + newIdx++ + } + ka.chs, ka.ctxs = newChs, newCtxs + } +} + +func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { + cctx, cancel := context.WithCancel(ctx) + defer cancel() + + stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + + err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)}) + if err != nil { + return nil, toErr(ctx, err) + } + + resp, rerr := stream.Recv() + if rerr != nil { + return nil, toErr(ctx, rerr) + } + + karesp := &LeaseKeepAliveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + } + return karesp, nil +} + +func (l *lessor) recvKeepAliveLoop() (gerr error) { + defer func() { + l.mu.Lock() + close(l.donec) + l.loopErr = gerr + for _, ka := range l.keepAlives { + ka.close() + } + l.keepAlives = make(map[LeaseID]*keepAlive) + l.mu.Unlock() + }() + + for { + stream, err := l.resetRecv() + if err != nil { + if canceledByCaller(l.stopCtx, err) { + return err + } + } else { + for { + resp, err := stream.Recv() + if err != nil { + if canceledByCaller(l.stopCtx, err) { + return err + } + + if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader { + l.closeRequireLeader() + } + break + } + + l.recvKeepAlive(resp) + } + } + + select { + case <-time.After(retryConnWait): + case <-l.stopCtx.Done(): + return l.stopCtx.Err() + } + } +} + +// resetRecv opens a new lease stream and starts sending keep alive requests. +func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { + sctx, cancel := context.WithCancel(l.stopCtx) + stream, err := l.remote.LeaseKeepAlive(sctx, append(l.callOpts, withMax(0))...) + if err != nil { + cancel() + return nil, err + } + + l.mu.Lock() + defer l.mu.Unlock() + if l.stream != nil && l.streamCancel != nil { + l.streamCancel() + } + + l.streamCancel = cancel + l.stream = stream + + go l.sendKeepAliveLoop(stream) + return stream, nil +} + +// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse +func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) { + karesp := &LeaseKeepAliveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + } + + l.mu.Lock() + defer l.mu.Unlock() + + ka, ok := l.keepAlives[karesp.ID] + if !ok { + return + } + + if karesp.TTL <= 0 { + // lease expired; close all keep alive channels + delete(l.keepAlives, karesp.ID) + ka.close() + return + } + + // send update to all channels + nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0) + ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second) + for _, ch := range ka.chs { + select { + case ch <- karesp: + default: + if l.lg != nil { + l.lg.Warn("lease keepalive response queue is full; dropping response send", + zap.Int("queue-size", len(ch)), + zap.Int("queue-capacity", cap(ch)), + ) + } + } + // still advance in order to rate-limit keep-alive sends + ka.nextKeepAlive = nextKeepAlive + } +} + +// deadlineLoop reaps any keep alive channels that have not received a response +// within the lease TTL +func (l *lessor) deadlineLoop() { + for { + select { + case <-time.After(time.Second): + case <-l.donec: + return + } + now := time.Now() + l.mu.Lock() + for id, ka := range l.keepAlives { + if ka.deadline.Before(now) { + // waited too long for response; lease may be expired + ka.close() + delete(l.keepAlives, id) + } + } + l.mu.Unlock() + } +} + +// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream. +func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { + for { + var tosend []LeaseID + + now := time.Now() + l.mu.Lock() + for id, ka := range l.keepAlives { + if ka.nextKeepAlive.Before(now) { + tosend = append(tosend, id) + } + } + l.mu.Unlock() + + for _, id := range tosend { + r := &pb.LeaseKeepAliveRequest{ID: int64(id)} + if err := stream.Send(r); err != nil { + // TODO do something with this error? + return + } + } + + select { + case <-time.After(retryConnWait): + case <-stream.Context().Done(): + return + case <-l.donec: + return + case <-l.stopCtx.Done(): + return + } + } +} + +func (ka *keepAlive) close() { + close(ka.donec) + for _, ch := range ka.chs { + close(ch) + } +} diff --git a/vendor/go.etcd.io/etcd/clientv3/logger.go b/vendor/go.etcd.io/etcd/clientv3/logger.go new file mode 100644 index 000000000..f5ae0109d --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/logger.go @@ -0,0 +1,101 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "io/ioutil" + "sync" + + "go.etcd.io/etcd/pkg/logutil" + + "google.golang.org/grpc/grpclog" +) + +var ( + lgMu sync.RWMutex + lg logutil.Logger +) + +type settableLogger struct { + l grpclog.LoggerV2 + mu sync.RWMutex +} + +func init() { + // disable client side logs by default + lg = &settableLogger{} + SetLogger(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard)) +} + +// SetLogger sets client-side Logger. +func SetLogger(l grpclog.LoggerV2) { + lgMu.Lock() + lg = logutil.NewLogger(l) + // override grpclog so that any changes happen with locking + grpclog.SetLoggerV2(lg) + lgMu.Unlock() +} + +// GetLogger returns the current logutil.Logger. +func GetLogger() logutil.Logger { + lgMu.RLock() + l := lg + lgMu.RUnlock() + return l +} + +// NewLogger returns a new Logger with logutil.Logger. +func NewLogger(gl grpclog.LoggerV2) logutil.Logger { + return &settableLogger{l: gl} +} + +func (s *settableLogger) get() grpclog.LoggerV2 { + s.mu.RLock() + l := s.l + s.mu.RUnlock() + return l +} + +// implement the grpclog.LoggerV2 interface + +func (s *settableLogger) Info(args ...interface{}) { s.get().Info(args...) } +func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) } +func (s *settableLogger) Infoln(args ...interface{}) { s.get().Infoln(args...) } +func (s *settableLogger) Warning(args ...interface{}) { s.get().Warning(args...) } +func (s *settableLogger) Warningf(format string, args ...interface{}) { + s.get().Warningf(format, args...) +} +func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) } +func (s *settableLogger) Error(args ...interface{}) { s.get().Error(args...) } +func (s *settableLogger) Errorf(format string, args ...interface{}) { + s.get().Errorf(format, args...) +} +func (s *settableLogger) Errorln(args ...interface{}) { s.get().Errorln(args...) } +func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) } +func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) } +func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) } +func (s *settableLogger) Print(args ...interface{}) { s.get().Info(args...) } +func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) } +func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) } +func (s *settableLogger) V(l int) bool { return s.get().V(l) } +func (s *settableLogger) Lvl(lvl int) grpclog.LoggerV2 { + s.mu.RLock() + l := s.l + s.mu.RUnlock() + if l.V(lvl) { + return s + } + return logutil.NewDiscardLogger() +} diff --git a/vendor/go.etcd.io/etcd/clientv3/maintenance.go b/vendor/go.etcd.io/etcd/clientv3/maintenance.go new file mode 100644 index 000000000..809b8a3b4 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/maintenance.go @@ -0,0 +1,243 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "fmt" + "io" + + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.uber.org/zap" + + "google.golang.org/grpc" +) + +type ( + DefragmentResponse pb.DefragmentResponse + AlarmResponse pb.AlarmResponse + AlarmMember pb.AlarmMember + StatusResponse pb.StatusResponse + HashKVResponse pb.HashKVResponse + MoveLeaderResponse pb.MoveLeaderResponse +) + +type Maintenance interface { + // AlarmList gets all active alarms. + AlarmList(ctx context.Context) (*AlarmResponse, error) + + // AlarmDisarm disarms a given alarm. + AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error) + + // Defragment releases wasted space from internal fragmentation on a given etcd member. + // Defragment is only needed when deleting a large number of keys and want to reclaim + // the resources. + // Defragment is an expensive operation. User should avoid defragmenting multiple members + // at the same time. + // To defragment multiple members in the cluster, user need to call defragment multiple + // times with different endpoints. + Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) + + // Status gets the status of the endpoint. + Status(ctx context.Context, endpoint string) (*StatusResponse, error) + + // HashKV returns a hash of the KV state at the time of the RPC. + // If revision is zero, the hash is computed on all keys. If the revision + // is non-zero, the hash is computed on all keys at or below the given revision. + HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) + + // Snapshot provides a reader for a point-in-time snapshot of etcd. + // If the context "ctx" is canceled or timed out, reading from returned + // "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded). + Snapshot(ctx context.Context) (io.ReadCloser, error) + + // MoveLeader requests current leader to transfer its leadership to the transferee. + // Request must be made to the leader. + MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) +} + +type maintenance struct { + lg *zap.Logger + dial func(endpoint string) (pb.MaintenanceClient, func(), error) + remote pb.MaintenanceClient + callOpts []grpc.CallOption +} + +func NewMaintenance(c *Client) Maintenance { + api := &maintenance{ + lg: c.lg, + dial: func(endpoint string) (pb.MaintenanceClient, func(), error) { + conn, err := c.Dial(endpoint) + if err != nil { + return nil, nil, fmt.Errorf("failed to dial endpoint %s with maintenance client: %v", endpoint, err) + } + cancel := func() { conn.Close() } + return RetryMaintenanceClient(c, conn), cancel, nil + }, + remote: RetryMaintenanceClient(c, c.conn), + } + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance { + api := &maintenance{ + lg: c.lg, + dial: func(string) (pb.MaintenanceClient, func(), error) { + return remote, func() {}, nil + }, + remote: remote, + } + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { + req := &pb.AlarmRequest{ + Action: pb.AlarmRequest_GET, + MemberID: 0, // all + Alarm: pb.AlarmType_NONE, // all + } + resp, err := m.remote.Alarm(ctx, req, m.callOpts...) + if err == nil { + return (*AlarmResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) { + req := &pb.AlarmRequest{ + Action: pb.AlarmRequest_DEACTIVATE, + MemberID: am.MemberID, + Alarm: am.Alarm, + } + + if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE { + ar, err := m.AlarmList(ctx) + if err != nil { + return nil, toErr(ctx, err) + } + ret := AlarmResponse{} + for _, am := range ar.Alarms { + dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am)) + if derr != nil { + return nil, toErr(ctx, derr) + } + ret.Alarms = append(ret.Alarms, dresp.Alarms...) + } + return &ret, nil + } + + resp, err := m.remote.Alarm(ctx, req, m.callOpts...) + if err == nil { + return (*AlarmResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) { + remote, cancel, err := m.dial(endpoint) + if err != nil { + return nil, toErr(ctx, err) + } + defer cancel() + resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*DefragmentResponse)(resp), nil +} + +func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) { + remote, cancel, err := m.dial(endpoint) + if err != nil { + return nil, toErr(ctx, err) + } + defer cancel() + resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*StatusResponse)(resp), nil +} + +func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) { + remote, cancel, err := m.dial(endpoint) + if err != nil { + + return nil, toErr(ctx, err) + } + defer cancel() + resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*HashKVResponse)(resp), nil +} + +func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { + ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...) + if err != nil { + return nil, toErr(ctx, err) + } + + m.lg.Info("opened snapshot stream; downloading") + pr, pw := io.Pipe() + go func() { + for { + resp, err := ss.Recv() + if err != nil { + switch err { + case io.EOF: + m.lg.Info("completed snapshot read; closing") + default: + m.lg.Warn("failed to receive from snapshot stream; closing", zap.Error(err)) + } + pw.CloseWithError(err) + return + } + + // can "resp == nil && err == nil" + // before we receive snapshot SHA digest? + // No, server sends EOF with an empty response + // after it sends SHA digest at the end + + if _, werr := pw.Write(resp.Blob); werr != nil { + pw.CloseWithError(werr) + return + } + } + }() + return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil +} + +type snapshotReadCloser struct { + ctx context.Context + io.ReadCloser +} + +func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) { + n, err = rc.ReadCloser.Read(p) + return n, toErr(rc.ctx, err) +} + +func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) { + resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...) + return (*MoveLeaderResponse)(resp), toErr(ctx, err) +} diff --git a/vendor/go.etcd.io/etcd/clientv3/op.go b/vendor/go.etcd.io/etcd/clientv3/op.go new file mode 100644 index 000000000..81ae31fd8 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/op.go @@ -0,0 +1,560 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + +type opType int + +const ( + // A default Op has opType 0, which is invalid. + tRange opType = iota + 1 + tPut + tDeleteRange + tTxn +) + +var noPrefixEnd = []byte{0} + +// Op represents an Operation that kv can execute. +type Op struct { + t opType + key []byte + end []byte + + // for range + limit int64 + sort *SortOption + serializable bool + keysOnly bool + countOnly bool + minModRev int64 + maxModRev int64 + minCreateRev int64 + maxCreateRev int64 + + // for range, watch + rev int64 + + // for watch, put, delete + prevKV bool + + // for watch + // fragmentation should be disabled by default + // if true, split watch events when total exceeds + // "--max-request-bytes" flag value + 512-byte + fragment bool + + // for put + ignoreValue bool + ignoreLease bool + + // progressNotify is for progress updates. + progressNotify bool + // createdNotify is for created event + createdNotify bool + // filters for watchers + filterPut bool + filterDelete bool + + // for put + val []byte + leaseID LeaseID + + // txn + cmps []Cmp + thenOps []Op + elseOps []Op +} + +// accessors / mutators + +// IsTxn returns true if the "Op" type is transaction. +func (op Op) IsTxn() bool { + return op.t == tTxn +} + +// Txn returns the comparison(if) operations, "then" operations, and "else" operations. +func (op Op) Txn() ([]Cmp, []Op, []Op) { + return op.cmps, op.thenOps, op.elseOps +} + +// KeyBytes returns the byte slice holding the Op's key. +func (op Op) KeyBytes() []byte { return op.key } + +// WithKeyBytes sets the byte slice for the Op's key. +func (op *Op) WithKeyBytes(key []byte) { op.key = key } + +// RangeBytes returns the byte slice holding with the Op's range end, if any. +func (op Op) RangeBytes() []byte { return op.end } + +// Rev returns the requested revision, if any. +func (op Op) Rev() int64 { return op.rev } + +// IsPut returns true iff the operation is a Put. +func (op Op) IsPut() bool { return op.t == tPut } + +// IsGet returns true iff the operation is a Get. +func (op Op) IsGet() bool { return op.t == tRange } + +// IsDelete returns true iff the operation is a Delete. +func (op Op) IsDelete() bool { return op.t == tDeleteRange } + +// IsSerializable returns true if the serializable field is true. +func (op Op) IsSerializable() bool { return op.serializable } + +// IsKeysOnly returns whether keysOnly is set. +func (op Op) IsKeysOnly() bool { return op.keysOnly } + +// IsCountOnly returns whether countOnly is set. +func (op Op) IsCountOnly() bool { return op.countOnly } + +// MinModRev returns the operation's minimum modify revision. +func (op Op) MinModRev() int64 { return op.minModRev } + +// MaxModRev returns the operation's maximum modify revision. +func (op Op) MaxModRev() int64 { return op.maxModRev } + +// MinCreateRev returns the operation's minimum create revision. +func (op Op) MinCreateRev() int64 { return op.minCreateRev } + +// MaxCreateRev returns the operation's maximum create revision. +func (op Op) MaxCreateRev() int64 { return op.maxCreateRev } + +// WithRangeBytes sets the byte slice for the Op's range end. +func (op *Op) WithRangeBytes(end []byte) { op.end = end } + +// ValueBytes returns the byte slice holding the Op's value, if any. +func (op Op) ValueBytes() []byte { return op.val } + +// WithValueBytes sets the byte slice for the Op's value. +func (op *Op) WithValueBytes(v []byte) { op.val = v } + +func (op Op) toRangeRequest() *pb.RangeRequest { + if op.t != tRange { + panic("op.t != tRange") + } + r := &pb.RangeRequest{ + Key: op.key, + RangeEnd: op.end, + Limit: op.limit, + Revision: op.rev, + Serializable: op.serializable, + KeysOnly: op.keysOnly, + CountOnly: op.countOnly, + MinModRevision: op.minModRev, + MaxModRevision: op.maxModRev, + MinCreateRevision: op.minCreateRev, + MaxCreateRevision: op.maxCreateRev, + } + if op.sort != nil { + r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order) + r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target) + } + return r +} + +func (op Op) toTxnRequest() *pb.TxnRequest { + thenOps := make([]*pb.RequestOp, len(op.thenOps)) + for i, tOp := range op.thenOps { + thenOps[i] = tOp.toRequestOp() + } + elseOps := make([]*pb.RequestOp, len(op.elseOps)) + for i, eOp := range op.elseOps { + elseOps[i] = eOp.toRequestOp() + } + cmps := make([]*pb.Compare, len(op.cmps)) + for i := range op.cmps { + cmps[i] = (*pb.Compare)(&op.cmps[i]) + } + return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps} +} + +func (op Op) toRequestOp() *pb.RequestOp { + switch op.t { + case tRange: + return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}} + case tPut: + r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} + return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}} + case tDeleteRange: + r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} + return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}} + case tTxn: + return &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}} + default: + panic("Unknown Op") + } +} + +func (op Op) isWrite() bool { + if op.t == tTxn { + for _, tOp := range op.thenOps { + if tOp.isWrite() { + return true + } + } + for _, tOp := range op.elseOps { + if tOp.isWrite() { + return true + } + } + return false + } + return op.t != tRange +} + +// OpGet returns "get" operation based on given key and operation options. +func OpGet(key string, opts ...OpOption) Op { + // WithPrefix and WithFromKey are not supported together + if isWithPrefix(opts) && isWithFromKey(opts) { + panic("`WithPrefix` and `WithFromKey` cannot be set at the same time, choose one") + } + ret := Op{t: tRange, key: []byte(key)} + ret.applyOpts(opts) + return ret +} + +// OpDelete returns "delete" operation based on given key and operation options. +func OpDelete(key string, opts ...OpOption) Op { + // WithPrefix and WithFromKey are not supported together + if isWithPrefix(opts) && isWithFromKey(opts) { + panic("`WithPrefix` and `WithFromKey` cannot be set at the same time, choose one") + } + ret := Op{t: tDeleteRange, key: []byte(key)} + ret.applyOpts(opts) + switch { + case ret.leaseID != 0: + panic("unexpected lease in delete") + case ret.limit != 0: + panic("unexpected limit in delete") + case ret.rev != 0: + panic("unexpected revision in delete") + case ret.sort != nil: + panic("unexpected sort in delete") + case ret.serializable: + panic("unexpected serializable in delete") + case ret.countOnly: + panic("unexpected countOnly in delete") + case ret.minModRev != 0, ret.maxModRev != 0: + panic("unexpected mod revision filter in delete") + case ret.minCreateRev != 0, ret.maxCreateRev != 0: + panic("unexpected create revision filter in delete") + case ret.filterDelete, ret.filterPut: + panic("unexpected filter in delete") + case ret.createdNotify: + panic("unexpected createdNotify in delete") + } + return ret +} + +// OpPut returns "put" operation based on given key-value and operation options. +func OpPut(key, val string, opts ...OpOption) Op { + ret := Op{t: tPut, key: []byte(key), val: []byte(val)} + ret.applyOpts(opts) + switch { + case ret.end != nil: + panic("unexpected range in put") + case ret.limit != 0: + panic("unexpected limit in put") + case ret.rev != 0: + panic("unexpected revision in put") + case ret.sort != nil: + panic("unexpected sort in put") + case ret.serializable: + panic("unexpected serializable in put") + case ret.countOnly: + panic("unexpected countOnly in put") + case ret.minModRev != 0, ret.maxModRev != 0: + panic("unexpected mod revision filter in put") + case ret.minCreateRev != 0, ret.maxCreateRev != 0: + panic("unexpected create revision filter in put") + case ret.filterDelete, ret.filterPut: + panic("unexpected filter in put") + case ret.createdNotify: + panic("unexpected createdNotify in put") + } + return ret +} + +// OpTxn returns "txn" operation based on given transaction conditions. +func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op { + return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps} +} + +func opWatch(key string, opts ...OpOption) Op { + ret := Op{t: tRange, key: []byte(key)} + ret.applyOpts(opts) + switch { + case ret.leaseID != 0: + panic("unexpected lease in watch") + case ret.limit != 0: + panic("unexpected limit in watch") + case ret.sort != nil: + panic("unexpected sort in watch") + case ret.serializable: + panic("unexpected serializable in watch") + case ret.countOnly: + panic("unexpected countOnly in watch") + case ret.minModRev != 0, ret.maxModRev != 0: + panic("unexpected mod revision filter in watch") + case ret.minCreateRev != 0, ret.maxCreateRev != 0: + panic("unexpected create revision filter in watch") + } + return ret +} + +func (op *Op) applyOpts(opts []OpOption) { + for _, opt := range opts { + opt(op) + } +} + +// OpOption configures Operations like Get, Put, Delete. +type OpOption func(*Op) + +// WithLease attaches a lease ID to a key in 'Put' request. +func WithLease(leaseID LeaseID) OpOption { + return func(op *Op) { op.leaseID = leaseID } +} + +// WithLimit limits the number of results to return from 'Get' request. +// If WithLimit is given a 0 limit, it is treated as no limit. +func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } } + +// WithRev specifies the store revision for 'Get' request. +// Or the start revision of 'Watch' request. +func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } } + +// WithSort specifies the ordering in 'Get' request. It requires +// 'WithRange' and/or 'WithPrefix' to be specified too. +// 'target' specifies the target to sort by: key, version, revisions, value. +// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'. +func WithSort(target SortTarget, order SortOrder) OpOption { + return func(op *Op) { + if target == SortByKey && order == SortAscend { + // If order != SortNone, server fetches the entire key-space, + // and then applies the sort and limit, if provided. + // Since by default the server returns results sorted by keys + // in lexicographically ascending order, the client should ignore + // SortOrder if the target is SortByKey. + order = SortNone + } + op.sort = &SortOption{target, order} + } +} + +// GetPrefixRangeEnd gets the range end of the prefix. +// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'. +func GetPrefixRangeEnd(prefix string) string { + return string(getPrefix([]byte(prefix))) +} + +func getPrefix(key []byte) []byte { + end := make([]byte, len(key)) + copy(end, key) + for i := len(end) - 1; i >= 0; i-- { + if end[i] < 0xff { + end[i] = end[i] + 1 + end = end[:i+1] + return end + } + } + // next prefix does not exist (e.g., 0xffff); + // default to WithFromKey policy + return noPrefixEnd +} + +// WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate +// on the keys with matching prefix. For example, 'Get(foo, WithPrefix())' +// can return 'foo1', 'foo2', and so on. +func WithPrefix() OpOption { + return func(op *Op) { + if len(op.key) == 0 { + op.key, op.end = []byte{0}, []byte{0} + return + } + op.end = getPrefix(op.key) + } +} + +// WithRange specifies the range of 'Get', 'Delete', 'Watch' requests. +// For example, 'Get' requests with 'WithRange(end)' returns +// the keys in the range [key, end). +// endKey must be lexicographically greater than start key. +func WithRange(endKey string) OpOption { + return func(op *Op) { op.end = []byte(endKey) } +} + +// WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests +// to be equal or greater than the key in the argument. +func WithFromKey() OpOption { + return func(op *Op) { + if len(op.key) == 0 { + op.key = []byte{0} + } + op.end = []byte("\x00") + } +} + +// WithSerializable makes 'Get' request serializable. By default, +// it's linearizable. Serializable requests are better for lower latency +// requirement. +func WithSerializable() OpOption { + return func(op *Op) { op.serializable = true } +} + +// WithKeysOnly makes the 'Get' request return only the keys and the corresponding +// values will be omitted. +func WithKeysOnly() OpOption { + return func(op *Op) { op.keysOnly = true } +} + +// WithCountOnly makes the 'Get' request return only the count of keys. +func WithCountOnly() OpOption { + return func(op *Op) { op.countOnly = true } +} + +// WithMinModRev filters out keys for Get with modification revisions less than the given revision. +func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } } + +// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision. +func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } } + +// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision. +func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } } + +// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision. +func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } } + +// WithFirstCreate gets the key with the oldest creation revision in the request range. +func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) } + +// WithLastCreate gets the key with the latest creation revision in the request range. +func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) } + +// WithFirstKey gets the lexically first key in the request range. +func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) } + +// WithLastKey gets the lexically last key in the request range. +func WithLastKey() []OpOption { return withTop(SortByKey, SortDescend) } + +// WithFirstRev gets the key with the oldest modification revision in the request range. +func WithFirstRev() []OpOption { return withTop(SortByModRevision, SortAscend) } + +// WithLastRev gets the key with the latest modification revision in the request range. +func WithLastRev() []OpOption { return withTop(SortByModRevision, SortDescend) } + +// withTop gets the first key over the get's prefix given a sort order +func withTop(target SortTarget, order SortOrder) []OpOption { + return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)} +} + +// WithProgressNotify makes watch server send periodic progress updates +// every 10 minutes when there is no incoming events. +// Progress updates have zero events in WatchResponse. +func WithProgressNotify() OpOption { + return func(op *Op) { + op.progressNotify = true + } +} + +// WithCreatedNotify makes watch server sends the created event. +func WithCreatedNotify() OpOption { + return func(op *Op) { + op.createdNotify = true + } +} + +// WithFilterPut discards PUT events from the watcher. +func WithFilterPut() OpOption { + return func(op *Op) { op.filterPut = true } +} + +// WithFilterDelete discards DELETE events from the watcher. +func WithFilterDelete() OpOption { + return func(op *Op) { op.filterDelete = true } +} + +// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted, +// nothing will be returned. +func WithPrevKV() OpOption { + return func(op *Op) { + op.prevKV = true + } +} + +// WithFragment to receive raw watch response with fragmentation. +// Fragmentation is disabled by default. If fragmentation is enabled, +// etcd watch server will split watch response before sending to clients +// when the total size of watch events exceed server-side request limit. +// The default server-side request limit is 1.5 MiB, which can be configured +// as "--max-request-bytes" flag value + gRPC-overhead 512 bytes. +// See "etcdserver/api/v3rpc/watch.go" for more details. +func WithFragment() OpOption { + return func(op *Op) { op.fragment = true } +} + +// WithIgnoreValue updates the key using its current value. +// This option can not be combined with non-empty values. +// Returns an error if the key does not exist. +func WithIgnoreValue() OpOption { + return func(op *Op) { + op.ignoreValue = true + } +} + +// WithIgnoreLease updates the key using its current lease. +// This option can not be combined with WithLease. +// Returns an error if the key does not exist. +func WithIgnoreLease() OpOption { + return func(op *Op) { + op.ignoreLease = true + } +} + +// LeaseOp represents an Operation that lease can execute. +type LeaseOp struct { + id LeaseID + + // for TimeToLive + attachedKeys bool +} + +// LeaseOption configures lease operations. +type LeaseOption func(*LeaseOp) + +func (op *LeaseOp) applyOpts(opts []LeaseOption) { + for _, opt := range opts { + opt(op) + } +} + +// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID. +func WithAttachedKeys() LeaseOption { + return func(op *LeaseOp) { op.attachedKeys = true } +} + +func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest { + ret := &LeaseOp{id: id} + ret.applyOpts(opts) + return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys} +} + +// isWithPrefix returns true if WithPrefix is being called in the op +func isWithPrefix(opts []OpOption) bool { return isOpFuncCalled("WithPrefix", opts) } + +// isWithFromKey returns true if WithFromKey is being called in the op +func isWithFromKey(opts []OpOption) bool { return isOpFuncCalled("WithFromKey", opts) } diff --git a/vendor/go.etcd.io/etcd/clientv3/options.go b/vendor/go.etcd.io/etcd/clientv3/options.go new file mode 100644 index 000000000..700714c08 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/options.go @@ -0,0 +1,65 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "math" + "time" + + "google.golang.org/grpc" +) + +var ( + // client-side handling retrying of request failures where data was not written to the wire or + // where server indicates it did not process the data. gRPC default is default is "FailFast(true)" + // but for etcd we default to "FailFast(false)" to minimize client request error responses due to + // transient failures. + defaultFailFast = grpc.FailFast(false) + + // client-side request send limit, gRPC default is math.MaxInt32 + // Make sure that "client-side send limit < server-side default send/recv limit" + // Same value as "embed.DefaultMaxRequestBytes" plus gRPC overhead bytes + defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024) + + // client-side response receive limit, gRPC default is 4MB + // Make sure that "client-side receive limit >= server-side default send/recv limit" + // because range response can easily exceed request send limits + // Default to math.MaxInt32; writes exceeding server-side send limit fails anyway + defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32) + + // client-side non-streaming retry limit, only applied to requests where server responds with + // a error code clearly indicating it was unable to process the request such as codes.Unavailable. + // If set to 0, retry is disabled. + defaultUnaryMaxRetries uint = 100 + + // client-side streaming retry limit, only applied to requests where server responds with + // a error code clearly indicating it was unable to process the request such as codes.Unavailable. + // If set to 0, retry is disabled. + defaultStreamMaxRetries = ^uint(0) // max uint + + // client-side retry backoff wait between requests. + defaultBackoffWaitBetween = 25 * time.Millisecond + + // client-side retry backoff default jitter fraction. + defaultBackoffJitterFraction = 0.10 +) + +// defaultCallOpts defines a list of default "gRPC.CallOption". +// Some options are exposed to "clientv3.Config". +// Defaults will be overridden by the settings in "clientv3.Config". +var defaultCallOpts = []grpc.CallOption{defaultFailFast, defaultMaxCallSendMsgSize, defaultMaxCallRecvMsgSize} + +// MaxLeaseTTL is the maximum lease TTL value +const MaxLeaseTTL = 9000000000 diff --git a/vendor/go.etcd.io/etcd/clientv3/retry.go b/vendor/go.etcd.io/etcd/clientv3/retry.go new file mode 100644 index 000000000..7e855de06 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/retry.go @@ -0,0 +1,298 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type retryPolicy uint8 + +const ( + repeatable retryPolicy = iota + nonRepeatable +) + +func (rp retryPolicy) String() string { + switch rp { + case repeatable: + return "repeatable" + case nonRepeatable: + return "nonRepeatable" + default: + return "UNKNOWN" + } +} + +// isSafeRetryImmutableRPC returns "true" when an immutable request is safe for retry. +// +// immutable requests (e.g. Get) should be retried unless it's +// an obvious server-side error (e.g. rpctypes.ErrRequestTooLarge). +// +// Returning "false" means retry should stop, since client cannot +// handle itself even with retries. +func isSafeRetryImmutableRPC(err error) bool { + eErr := rpctypes.Error(err) + if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable { + // interrupted by non-transient server-side or gRPC-side error + // client cannot handle itself (e.g. rpctypes.ErrCompacted) + return false + } + // only retry if unavailable + ev, ok := status.FromError(err) + if !ok { + // all errors from RPC is typed "grpc/status.(*statusError)" + // (ref. https://github.com/grpc/grpc-go/pull/1782) + // + // if the error type is not "grpc/status.(*statusError)", + // it could be from "Dial" + // TODO: do not retry for now + // ref. https://github.com/grpc/grpc-go/issues/1581 + return false + } + return ev.Code() == codes.Unavailable +} + +// isSafeRetryMutableRPC returns "true" when a mutable request is safe for retry. +// +// mutable requests (e.g. Put, Delete, Txn) should only be retried +// when the status code is codes.Unavailable when initial connection +// has not been established (no endpoint is up). +// +// Returning "false" means retry should stop, otherwise it violates +// write-at-most-once semantics. +func isSafeRetryMutableRPC(err error) bool { + if ev, ok := status.FromError(err); ok && ev.Code() != codes.Unavailable { + // not safe for mutable RPCs + // e.g. interrupted by non-transient error that client cannot handle itself, + // or transient error while the connection has already been established + return false + } + desc := rpctypes.ErrorDesc(err) + return desc == "there is no address available" || desc == "there is no connection available" +} + +type retryKVClient struct { + kc pb.KVClient +} + +// RetryKVClient implements a KVClient. +func RetryKVClient(c *Client) pb.KVClient { + return &retryKVClient{ + kc: pb.NewKVClient(c.conn), + } +} +func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) { + return rkv.kc.Range(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) { + return rkv.kc.Put(ctx, in, opts...) +} + +func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) { + return rkv.kc.DeleteRange(ctx, in, opts...) +} + +func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) { + return rkv.kc.Txn(ctx, in, opts...) +} + +func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) { + return rkv.kc.Compact(ctx, in, opts...) +} + +type retryLeaseClient struct { + lc pb.LeaseClient +} + +// RetryLeaseClient implements a LeaseClient. +func RetryLeaseClient(c *Client) pb.LeaseClient { + return &retryLeaseClient{ + lc: pb.NewLeaseClient(c.conn), + } +} + +func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) { + return rlc.lc.LeaseTimeToLive(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) { + return rlc.lc.LeaseLeases(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) { + return rlc.lc.LeaseGrant(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) { + return rlc.lc.LeaseRevoke(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) { + return rlc.lc.LeaseKeepAlive(ctx, append(opts, withRetryPolicy(repeatable))...) +} + +type retryClusterClient struct { + cc pb.ClusterClient +} + +// RetryClusterClient implements a ClusterClient. +func RetryClusterClient(c *Client) pb.ClusterClient { + return &retryClusterClient{ + cc: pb.NewClusterClient(c.conn), + } +} + +func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) { + return rcc.cc.MemberList(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { + return rcc.cc.MemberAdd(ctx, in, opts...) +} + +func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) { + return rcc.cc.MemberRemove(ctx, in, opts...) +} + +func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) { + return rcc.cc.MemberUpdate(ctx, in, opts...) +} + +func (rcc *retryClusterClient) MemberPromote(ctx context.Context, in *pb.MemberPromoteRequest, opts ...grpc.CallOption) (resp *pb.MemberPromoteResponse, err error) { + return rcc.cc.MemberPromote(ctx, in, opts...) +} + +type retryMaintenanceClient struct { + mc pb.MaintenanceClient +} + +// RetryMaintenanceClient implements a Maintenance. +func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient { + return &retryMaintenanceClient{ + mc: pb.NewMaintenanceClient(conn), + } +} + +func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) { + return rmc.mc.Alarm(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) { + return rmc.mc.Status(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) { + return rmc.mc.Hash(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) { + return rmc.mc.HashKV(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) { + return rmc.mc.Snapshot(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) { + return rmc.mc.MoveLeader(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) { + return rmc.mc.Defragment(ctx, in, opts...) +} + +type retryAuthClient struct { + ac pb.AuthClient +} + +// RetryAuthClient implements a AuthClient. +func RetryAuthClient(c *Client) pb.AuthClient { + return &retryAuthClient{ + ac: pb.NewAuthClient(c.conn), + } +} + +func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) { + return rac.ac.UserList(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) { + return rac.ac.UserGet(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) { + return rac.ac.RoleGet(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) { + return rac.ac.RoleList(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { + return rac.ac.AuthEnable(ctx, in, opts...) +} + +func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) { + return rac.ac.AuthDisable(ctx, in, opts...) +} + +func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) { + return rac.ac.UserAdd(ctx, in, opts...) +} + +func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) { + return rac.ac.UserDelete(ctx, in, opts...) +} + +func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) { + return rac.ac.UserChangePassword(ctx, in, opts...) +} + +func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) { + return rac.ac.UserGrantRole(ctx, in, opts...) +} + +func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) { + return rac.ac.UserRevokeRole(ctx, in, opts...) +} + +func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) { + return rac.ac.RoleAdd(ctx, in, opts...) +} + +func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) { + return rac.ac.RoleDelete(ctx, in, opts...) +} + +func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) { + return rac.ac.RoleGrantPermission(ctx, in, opts...) +} + +func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) { + return rac.ac.RoleRevokePermission(ctx, in, opts...) +} + +func (rac *retryAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) { + return rac.ac.Authenticate(ctx, in, opts...) +} diff --git a/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go b/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go new file mode 100644 index 000000000..2c266e55b --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go @@ -0,0 +1,392 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Based on github.com/grpc-ecosystem/go-grpc-middleware/retry, but modified to support the more +// fine grained error checking required by write-at-most-once retry semantics of etcd. + +package clientv3 + +import ( + "context" + "io" + "sync" + "time" + + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// unaryClientInterceptor returns a new retrying unary client interceptor. +// +// The default configuration of the interceptor is to not retry *at all*. This behaviour can be +// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). +func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.UnaryClientInterceptor { + intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) + return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + ctx = withVersion(ctx) + grpcOpts, retryOpts := filterCallOptions(opts) + callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) + // short circuit for simplicity, and avoiding allocations. + if callOpts.max == 0 { + return invoker(ctx, method, req, reply, cc, grpcOpts...) + } + var lastErr error + for attempt := uint(0); attempt < callOpts.max; attempt++ { + if err := waitRetryBackoff(ctx, attempt, callOpts); err != nil { + return err + } + logger.Debug( + "retrying of unary invoker", + zap.String("target", cc.Target()), + zap.Uint("attempt", attempt), + ) + lastErr = invoker(ctx, method, req, reply, cc, grpcOpts...) + if lastErr == nil { + return nil + } + logger.Warn( + "retrying of unary invoker failed", + zap.String("target", cc.Target()), + zap.Uint("attempt", attempt), + zap.Error(lastErr), + ) + if isContextError(lastErr) { + if ctx.Err() != nil { + // its the context deadline or cancellation. + return lastErr + } + // its the callCtx deadline or cancellation, in which case try again. + continue + } + if callOpts.retryAuth && rpctypes.Error(lastErr) == rpctypes.ErrInvalidAuthToken { + gterr := c.getToken(ctx) + if gterr != nil { + logger.Warn( + "retrying of unary invoker failed to fetch new auth token", + zap.String("target", cc.Target()), + zap.Error(gterr), + ) + return gterr // lastErr must be invalid auth token + } + continue + } + if !isSafeRetry(c.lg, lastErr, callOpts) { + return lastErr + } + } + return lastErr + } +} + +// streamClientInterceptor returns a new retrying stream client interceptor for server side streaming calls. +// +// The default configuration of the interceptor is to not retry *at all*. This behaviour can be +// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). +// +// Retry logic is available *only for ServerStreams*, i.e. 1:n streams, as the internal logic needs +// to buffer the messages sent by the client. If retry is enabled on any other streams (ClientStreams, +// BidiStreams), the retry interceptor will fail the call. +func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.StreamClientInterceptor { + intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) + return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + ctx = withVersion(ctx) + grpcOpts, retryOpts := filterCallOptions(opts) + callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) + // short circuit for simplicity, and avoiding allocations. + if callOpts.max == 0 { + return streamer(ctx, desc, cc, method, grpcOpts...) + } + if desc.ClientStreams { + return nil, status.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()") + } + newStreamer, err := streamer(ctx, desc, cc, method, grpcOpts...) + if err != nil { + logger.Error("streamer failed to create ClientStream", zap.Error(err)) + return nil, err // TODO(mwitkow): Maybe dial and transport errors should be retriable? + } + retryingStreamer := &serverStreamingRetryingStream{ + client: c, + ClientStream: newStreamer, + callOpts: callOpts, + ctx: ctx, + streamerCall: func(ctx context.Context) (grpc.ClientStream, error) { + return streamer(ctx, desc, cc, method, grpcOpts...) + }, + } + return retryingStreamer, nil + } +} + +// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a +// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish +// a new ClientStream according to the retry policy. +type serverStreamingRetryingStream struct { + grpc.ClientStream + client *Client + bufferedSends []interface{} // single message that the client can sen + receivedGood bool // indicates whether any prior receives were successful + wasClosedSend bool // indicates that CloseSend was closed + ctx context.Context + callOpts *options + streamerCall func(ctx context.Context) (grpc.ClientStream, error) + mu sync.RWMutex +} + +func (s *serverStreamingRetryingStream) setStream(clientStream grpc.ClientStream) { + s.mu.Lock() + s.ClientStream = clientStream + s.mu.Unlock() +} + +func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream { + s.mu.RLock() + defer s.mu.RUnlock() + return s.ClientStream +} + +func (s *serverStreamingRetryingStream) SendMsg(m interface{}) error { + s.mu.Lock() + s.bufferedSends = append(s.bufferedSends, m) + s.mu.Unlock() + return s.getStream().SendMsg(m) +} + +func (s *serverStreamingRetryingStream) CloseSend() error { + s.mu.Lock() + s.wasClosedSend = true + s.mu.Unlock() + return s.getStream().CloseSend() +} + +func (s *serverStreamingRetryingStream) Header() (metadata.MD, error) { + return s.getStream().Header() +} + +func (s *serverStreamingRetryingStream) Trailer() metadata.MD { + return s.getStream().Trailer() +} + +func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error { + attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m) + if !attemptRetry { + return lastErr // success or hard failure + } + + // We start off from attempt 1, because zeroth was already made on normal SendMsg(). + for attempt := uint(1); attempt < s.callOpts.max; attempt++ { + if err := waitRetryBackoff(s.ctx, attempt, s.callOpts); err != nil { + return err + } + newStream, err := s.reestablishStreamAndResendBuffer(s.ctx) + if err != nil { + s.client.lg.Error("failed reestablishStreamAndResendBuffer", zap.Error(err)) + return err // TODO(mwitkow): Maybe dial and transport errors should be retriable? + } + s.setStream(newStream) + + s.client.lg.Warn("retrying RecvMsg", zap.Error(lastErr)) + attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m) + if !attemptRetry { + return lastErr + } + } + return lastErr +} + +func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) { + s.mu.RLock() + wasGood := s.receivedGood + s.mu.RUnlock() + err := s.getStream().RecvMsg(m) + if err == nil || err == io.EOF { + s.mu.Lock() + s.receivedGood = true + s.mu.Unlock() + return false, err + } else if wasGood { + // previous RecvMsg in the stream succeeded, no retry logic should interfere + return false, err + } + if isContextError(err) { + if s.ctx.Err() != nil { + return false, err + } + // its the callCtx deadline or cancellation, in which case try again. + return true, err + } + if s.callOpts.retryAuth && rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken { + gterr := s.client.getToken(s.ctx) + if gterr != nil { + s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gterr)) + return false, err // return the original error for simplicity + } + return true, err + + } + return isSafeRetry(s.client.lg, err, s.callOpts), err +} + +func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) { + s.mu.RLock() + bufferedSends := s.bufferedSends + s.mu.RUnlock() + newStream, err := s.streamerCall(callCtx) + if err != nil { + return nil, err + } + for _, msg := range bufferedSends { + if err := newStream.SendMsg(msg); err != nil { + return nil, err + } + } + if err := newStream.CloseSend(); err != nil { + return nil, err + } + return newStream, nil +} + +func waitRetryBackoff(ctx context.Context, attempt uint, callOpts *options) error { + waitTime := time.Duration(0) + if attempt > 0 { + waitTime = callOpts.backoffFunc(attempt) + } + if waitTime > 0 { + timer := time.NewTimer(waitTime) + select { + case <-ctx.Done(): + timer.Stop() + return contextErrToGrpcErr(ctx.Err()) + case <-timer.C: + } + } + return nil +} + +// isSafeRetry returns "true", if request is safe for retry with the given error. +func isSafeRetry(lg *zap.Logger, err error, callOpts *options) bool { + if isContextError(err) { + return false + } + switch callOpts.retryPolicy { + case repeatable: + return isSafeRetryImmutableRPC(err) + case nonRepeatable: + return isSafeRetryMutableRPC(err) + default: + lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String())) + return false + } +} + +func isContextError(err error) bool { + return grpc.Code(err) == codes.DeadlineExceeded || grpc.Code(err) == codes.Canceled +} + +func contextErrToGrpcErr(err error) error { + switch err { + case context.DeadlineExceeded: + return status.Errorf(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Errorf(codes.Canceled, err.Error()) + default: + return status.Errorf(codes.Unknown, err.Error()) + } +} + +var ( + defaultOptions = &options{ + retryPolicy: nonRepeatable, + max: 0, // disable + backoffFunc: backoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10), + retryAuth: true, + } +) + +// backoffFunc denotes a family of functions that control the backoff duration between call retries. +// +// They are called with an identifier of the attempt, and should return a time the system client should +// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request +// the deadline of the request takes precedence and the wait will be interrupted before proceeding +// with the next iteration. +type backoffFunc func(attempt uint) time.Duration + +// withRetryPolicy sets the retry policy of this call. +func withRetryPolicy(rp retryPolicy) retryOption { + return retryOption{applyFunc: func(o *options) { + o.retryPolicy = rp + }} +} + +// withMax sets the maximum number of retries on this call, or this interceptor. +func withMax(maxRetries uint) retryOption { + return retryOption{applyFunc: func(o *options) { + o.max = maxRetries + }} +} + +// WithBackoff sets the `BackoffFunc `used to control time between retries. +func withBackoff(bf backoffFunc) retryOption { + return retryOption{applyFunc: func(o *options) { + o.backoffFunc = bf + }} +} + +type options struct { + retryPolicy retryPolicy + max uint + backoffFunc backoffFunc + retryAuth bool +} + +// retryOption is a grpc.CallOption that is local to clientv3's retry interceptor. +type retryOption struct { + grpc.EmptyCallOption // make sure we implement private after() and before() fields so we don't panic. + applyFunc func(opt *options) +} + +func reuseOrNewWithCallOptions(opt *options, retryOptions []retryOption) *options { + if len(retryOptions) == 0 { + return opt + } + optCopy := &options{} + *optCopy = *opt + for _, f := range retryOptions { + f.applyFunc(optCopy) + } + return optCopy +} + +func filterCallOptions(callOptions []grpc.CallOption) (grpcOptions []grpc.CallOption, retryOptions []retryOption) { + for _, opt := range callOptions { + if co, ok := opt.(retryOption); ok { + retryOptions = append(retryOptions, co) + } else { + grpcOptions = append(grpcOptions, opt) + } + } + return grpcOptions, retryOptions +} + +// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment). +// +// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms. +func backoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) backoffFunc { + return func(attempt uint) time.Duration { + return jitterUp(waitBetween, jitterFraction) + } +} diff --git a/vendor/go.etcd.io/etcd/clientv3/sort.go b/vendor/go.etcd.io/etcd/clientv3/sort.go new file mode 100644 index 000000000..2bb9d9a13 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/sort.go @@ -0,0 +1,37 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +type SortTarget int +type SortOrder int + +const ( + SortNone SortOrder = iota + SortAscend + SortDescend +) + +const ( + SortByKey SortTarget = iota + SortByVersion + SortByCreateRevision + SortByModRevision + SortByValue +) + +type SortOption struct { + Target SortTarget + Order SortOrder +} diff --git a/vendor/go.etcd.io/etcd/clientv3/txn.go b/vendor/go.etcd.io/etcd/clientv3/txn.go new file mode 100644 index 000000000..c19715da4 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/txn.go @@ -0,0 +1,151 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "sync" + + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + + "google.golang.org/grpc" +) + +// Txn is the interface that wraps mini-transactions. +// +// Txn(context.TODO()).If( +// Compare(Value(k1), ">", v1), +// Compare(Version(k1), "=", 2) +// ).Then( +// OpPut(k2,v2), OpPut(k3,v3) +// ).Else( +// OpPut(k4,v4), OpPut(k5,v5) +// ).Commit() +// +type Txn interface { + // If takes a list of comparison. If all comparisons passed in succeed, + // the operations passed into Then() will be executed. Or the operations + // passed into Else() will be executed. + If(cs ...Cmp) Txn + + // Then takes a list of operations. The Ops list will be executed, if the + // comparisons passed in If() succeed. + Then(ops ...Op) Txn + + // Else takes a list of operations. The Ops list will be executed, if the + // comparisons passed in If() fail. + Else(ops ...Op) Txn + + // Commit tries to commit the transaction. + Commit() (*TxnResponse, error) +} + +type txn struct { + kv *kv + ctx context.Context + + mu sync.Mutex + cif bool + cthen bool + celse bool + + isWrite bool + + cmps []*pb.Compare + + sus []*pb.RequestOp + fas []*pb.RequestOp + + callOpts []grpc.CallOption +} + +func (txn *txn) If(cs ...Cmp) Txn { + txn.mu.Lock() + defer txn.mu.Unlock() + + if txn.cif { + panic("cannot call If twice!") + } + + if txn.cthen { + panic("cannot call If after Then!") + } + + if txn.celse { + panic("cannot call If after Else!") + } + + txn.cif = true + + for i := range cs { + txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i])) + } + + return txn +} + +func (txn *txn) Then(ops ...Op) Txn { + txn.mu.Lock() + defer txn.mu.Unlock() + + if txn.cthen { + panic("cannot call Then twice!") + } + if txn.celse { + panic("cannot call Then after Else!") + } + + txn.cthen = true + + for _, op := range ops { + txn.isWrite = txn.isWrite || op.isWrite() + txn.sus = append(txn.sus, op.toRequestOp()) + } + + return txn +} + +func (txn *txn) Else(ops ...Op) Txn { + txn.mu.Lock() + defer txn.mu.Unlock() + + if txn.celse { + panic("cannot call Else twice!") + } + + txn.celse = true + + for _, op := range ops { + txn.isWrite = txn.isWrite || op.isWrite() + txn.fas = append(txn.fas, op.toRequestOp()) + } + + return txn +} + +func (txn *txn) Commit() (*TxnResponse, error) { + txn.mu.Lock() + defer txn.mu.Unlock() + + r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas} + + var resp *pb.TxnResponse + var err error + resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...) + if err != nil { + return nil, toErr(txn.ctx, err) + } + return (*TxnResponse)(resp), nil +} diff --git a/vendor/go.etcd.io/etcd/clientv3/utils.go b/vendor/go.etcd.io/etcd/clientv3/utils.go new file mode 100644 index 000000000..b998c41b9 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/utils.go @@ -0,0 +1,49 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "math/rand" + "reflect" + "runtime" + "strings" + "time" +) + +// jitterUp adds random jitter to the duration. +// +// This adds or subtracts time from the duration within a given jitter fraction. +// For example for 10s and jitter 0.1, it will return a time within [9s, 11s]) +// +// Reference: https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils +func jitterUp(duration time.Duration, jitter float64) time.Duration { + multiplier := jitter * (rand.Float64()*2 - 1) + return time.Duration(float64(duration) * (1 + multiplier)) +} + +// Check if the provided function is being called in the op options. +func isOpFuncCalled(op string, opts []OpOption) bool { + for _, opt := range opts { + v := reflect.ValueOf(opt) + if v.Kind() == reflect.Func { + if opFunc := runtime.FuncForPC(v.Pointer()); opFunc != nil { + if strings.Contains(opFunc.Name(), op) { + return true + } + } + } + } + return false +} diff --git a/vendor/go.etcd.io/etcd/clientv3/watch.go b/vendor/go.etcd.io/etcd/clientv3/watch.go new file mode 100644 index 000000000..66e16ad63 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/watch.go @@ -0,0 +1,1035 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + v3rpc "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + mvccpb "go.etcd.io/etcd/mvcc/mvccpb" + + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +const ( + EventTypeDelete = mvccpb.DELETE + EventTypePut = mvccpb.PUT + + closeSendErrTimeout = 250 * time.Millisecond +) + +type Event mvccpb.Event + +type WatchChan <-chan WatchResponse + +type Watcher interface { + // Watch watches on a key or prefix. The watched events will be returned + // through the returned channel. If revisions waiting to be sent over the + // watch are compacted, then the watch will be canceled by the server, the + // client will post a compacted error watch response, and the channel will close. + // If the context "ctx" is canceled or timed out, returned "WatchChan" is closed, + // and "WatchResponse" from this closed channel has zero events and nil "Err()". + // The context "ctx" MUST be canceled, as soon as watcher is no longer being used, + // to release the associated resources. + // + // If the context is "context.Background/TODO", returned "WatchChan" will + // not be closed and block until event is triggered, except when server + // returns a non-recoverable error (e.g. ErrCompacted). + // For example, when context passed with "WithRequireLeader" and the + // connected server has no leader (e.g. due to network partition), + // error "etcdserver: no leader" (ErrNoLeader) will be returned, + // and then "WatchChan" is closed with non-nil "Err()". + // In order to prevent a watch stream being stuck in a partitioned node, + // make sure to wrap context with "WithRequireLeader". + // + // Otherwise, as long as the context has not been canceled or timed out, + // watch will retry on other recoverable errors forever until reconnected. + // + // TODO: explicitly set context error in the last "WatchResponse" message and close channel? + // Currently, client contexts are overwritten with "valCtx" that never closes. + // TODO(v3.4): configure watch retry policy, limit maximum retry number + // (see https://github.com/etcd-io/etcd/issues/8980) + Watch(ctx context.Context, key string, opts ...OpOption) WatchChan + + // RequestProgress requests a progress notify response be sent in all watch channels. + RequestProgress(ctx context.Context) error + + // Close closes the watcher and cancels all watch requests. + Close() error +} + +type WatchResponse struct { + Header pb.ResponseHeader + Events []*Event + + // CompactRevision is the minimum revision the watcher may receive. + CompactRevision int64 + + // Canceled is used to indicate watch failure. + // If the watch failed and the stream was about to close, before the channel is closed, + // the channel sends a final response that has Canceled set to true with a non-nil Err(). + Canceled bool + + // Created is used to indicate the creation of the watcher. + Created bool + + closeErr error + + // cancelReason is a reason of canceling watch + cancelReason string +} + +// IsCreate returns true if the event tells that the key is newly created. +func (e *Event) IsCreate() bool { + return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision +} + +// IsModify returns true if the event tells that a new value is put on existing key. +func (e *Event) IsModify() bool { + return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision +} + +// Err is the error value if this WatchResponse holds an error. +func (wr *WatchResponse) Err() error { + switch { + case wr.closeErr != nil: + return v3rpc.Error(wr.closeErr) + case wr.CompactRevision != 0: + return v3rpc.ErrCompacted + case wr.Canceled: + if len(wr.cancelReason) != 0 { + return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason)) + } + return v3rpc.ErrFutureRev + } + return nil +} + +// IsProgressNotify returns true if the WatchResponse is progress notification. +func (wr *WatchResponse) IsProgressNotify() bool { + return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0 +} + +// watcher implements the Watcher interface +type watcher struct { + remote pb.WatchClient + callOpts []grpc.CallOption + + // mu protects the grpc streams map + mu sync.RWMutex + + // streams holds all the active grpc streams keyed by ctx value. + streams map[string]*watchGrpcStream + lg *zap.Logger +} + +// watchGrpcStream tracks all watch resources attached to a single grpc stream. +type watchGrpcStream struct { + owner *watcher + remote pb.WatchClient + callOpts []grpc.CallOption + + // ctx controls internal remote.Watch requests + ctx context.Context + // ctxKey is the key used when looking up this stream's context + ctxKey string + cancel context.CancelFunc + + // substreams holds all active watchers on this grpc stream + substreams map[int64]*watcherStream + // resuming holds all resuming watchers on this grpc stream + resuming []*watcherStream + + // reqc sends a watch request from Watch() to the main goroutine + reqc chan watchStreamRequest + // respc receives data from the watch client + respc chan *pb.WatchResponse + // donec closes to broadcast shutdown + donec chan struct{} + // errc transmits errors from grpc Recv to the watch stream reconnect logic + errc chan error + // closingc gets the watcherStream of closing watchers + closingc chan *watcherStream + // wg is Done when all substream goroutines have exited + wg sync.WaitGroup + + // resumec closes to signal that all substreams should begin resuming + resumec chan struct{} + // closeErr is the error that closed the watch stream + closeErr error + + lg *zap.Logger +} + +// watchStreamRequest is a union of the supported watch request operation types +type watchStreamRequest interface { + toPB() *pb.WatchRequest +} + +// watchRequest is issued by the subscriber to start a new watcher +type watchRequest struct { + ctx context.Context + key string + end string + rev int64 + + // send created notification event if this field is true + createdNotify bool + // progressNotify is for progress updates + progressNotify bool + // fragmentation should be disabled by default + // if true, split watch events when total exceeds + // "--max-request-bytes" flag value + 512-byte + fragment bool + + // filters is the list of events to filter out + filters []pb.WatchCreateRequest_FilterType + // get the previous key-value pair before the event happens + prevKV bool + // retc receives a chan WatchResponse once the watcher is established + retc chan chan WatchResponse +} + +// progressRequest is issued by the subscriber to request watch progress +type progressRequest struct { +} + +// watcherStream represents a registered watcher +type watcherStream struct { + // initReq is the request that initiated this request + initReq watchRequest + + // outc publishes watch responses to subscriber + outc chan WatchResponse + // recvc buffers watch responses before publishing + recvc chan *WatchResponse + // donec closes when the watcherStream goroutine stops. + donec chan struct{} + // closing is set to true when stream should be scheduled to shutdown. + closing bool + // id is the registered watch id on the grpc stream + id int64 + + // buf holds all events received from etcd but not yet consumed by the client + buf []*WatchResponse +} + +func NewWatcher(c *Client) Watcher { + return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c) +} + +func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher { + w := &watcher{ + remote: wc, + streams: make(map[string]*watchGrpcStream), + } + if c != nil { + w.callOpts = c.callOpts + w.lg = c.lg + } + return w +} + +// never closes +var valCtxCh = make(chan struct{}) +var zeroTime = time.Unix(0, 0) + +// ctx with only the values; never Done +type valCtx struct{ context.Context } + +func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false } +func (vc *valCtx) Done() <-chan struct{} { return valCtxCh } +func (vc *valCtx) Err() error { return nil } + +func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream { + ctx, cancel := context.WithCancel(&valCtx{inctx}) + wgs := &watchGrpcStream{ + owner: w, + remote: w.remote, + callOpts: w.callOpts, + ctx: ctx, + ctxKey: streamKeyFromCtx(inctx), + cancel: cancel, + substreams: make(map[int64]*watcherStream), + respc: make(chan *pb.WatchResponse), + reqc: make(chan watchStreamRequest), + donec: make(chan struct{}), + errc: make(chan error, 1), + closingc: make(chan *watcherStream), + resumec: make(chan struct{}), + lg: w.lg, + } + go wgs.run() + return wgs +} + +// Watch posts a watch request to run() and waits for a new watcher channel +func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan { + ow := opWatch(key, opts...) + + var filters []pb.WatchCreateRequest_FilterType + if ow.filterPut { + filters = append(filters, pb.WatchCreateRequest_NOPUT) + } + if ow.filterDelete { + filters = append(filters, pb.WatchCreateRequest_NODELETE) + } + + wr := &watchRequest{ + ctx: ctx, + createdNotify: ow.createdNotify, + key: string(ow.key), + end: string(ow.end), + rev: ow.rev, + progressNotify: ow.progressNotify, + fragment: ow.fragment, + filters: filters, + prevKV: ow.prevKV, + retc: make(chan chan WatchResponse, 1), + } + + ok := false + ctxKey := streamKeyFromCtx(ctx) + + // find or allocate appropriate grpc watch stream + w.mu.Lock() + if w.streams == nil { + // closed + w.mu.Unlock() + ch := make(chan WatchResponse) + close(ch) + return ch + } + wgs := w.streams[ctxKey] + if wgs == nil { + wgs = w.newWatcherGrpcStream(ctx) + w.streams[ctxKey] = wgs + } + donec := wgs.donec + reqc := wgs.reqc + w.mu.Unlock() + + // couldn't create channel; return closed channel + closeCh := make(chan WatchResponse, 1) + + // submit request + select { + case reqc <- wr: + ok = true + case <-wr.ctx.Done(): + case <-donec: + if wgs.closeErr != nil { + closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr} + break + } + // retry; may have dropped stream from no ctxs + return w.Watch(ctx, key, opts...) + } + + // receive channel + if ok { + select { + case ret := <-wr.retc: + return ret + case <-ctx.Done(): + case <-donec: + if wgs.closeErr != nil { + closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr} + break + } + // retry; may have dropped stream from no ctxs + return w.Watch(ctx, key, opts...) + } + } + + close(closeCh) + return closeCh +} + +func (w *watcher) Close() (err error) { + w.mu.Lock() + streams := w.streams + w.streams = nil + w.mu.Unlock() + for _, wgs := range streams { + if werr := wgs.close(); werr != nil { + err = werr + } + } + // Consider context.Canceled as a successful close + if err == context.Canceled { + err = nil + } + return err +} + +// RequestProgress requests a progress notify response be sent in all watch channels. +func (w *watcher) RequestProgress(ctx context.Context) (err error) { + ctxKey := streamKeyFromCtx(ctx) + + w.mu.Lock() + if w.streams == nil { + w.mu.Unlock() + return fmt.Errorf("no stream found for context") + } + wgs := w.streams[ctxKey] + if wgs == nil { + wgs = w.newWatcherGrpcStream(ctx) + w.streams[ctxKey] = wgs + } + donec := wgs.donec + reqc := wgs.reqc + w.mu.Unlock() + + pr := &progressRequest{} + + select { + case reqc <- pr: + return nil + case <-ctx.Done(): + if err == nil { + return ctx.Err() + } + return err + case <-donec: + if wgs.closeErr != nil { + return wgs.closeErr + } + // retry; may have dropped stream from no ctxs + return w.RequestProgress(ctx) + } +} + +func (w *watchGrpcStream) close() (err error) { + w.cancel() + <-w.donec + select { + case err = <-w.errc: + default: + } + return toErr(w.ctx, err) +} + +func (w *watcher) closeStream(wgs *watchGrpcStream) { + w.mu.Lock() + close(wgs.donec) + wgs.cancel() + if w.streams != nil { + delete(w.streams, wgs.ctxKey) + } + w.mu.Unlock() +} + +func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) { + // check watch ID for backward compatibility (<= v3.3) + if resp.WatchId == -1 || (resp.Canceled && resp.CancelReason != "") { + w.closeErr = v3rpc.Error(errors.New(resp.CancelReason)) + // failed; no channel + close(ws.recvc) + return + } + ws.id = resp.WatchId + w.substreams[ws.id] = ws +} + +func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) { + select { + case ws.outc <- *resp: + case <-ws.initReq.ctx.Done(): + case <-time.After(closeSendErrTimeout): + } + close(ws.outc) +} + +func (w *watchGrpcStream) closeSubstream(ws *watcherStream) { + // send channel response in case stream was never established + select { + case ws.initReq.retc <- ws.outc: + default: + } + // close subscriber's channel + if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil { + go w.sendCloseSubstream(ws, &WatchResponse{Canceled: true, closeErr: w.closeErr}) + } else if ws.outc != nil { + close(ws.outc) + } + if ws.id != -1 { + delete(w.substreams, ws.id) + return + } + for i := range w.resuming { + if w.resuming[i] == ws { + w.resuming[i] = nil + return + } + } +} + +// run is the root of the goroutines for managing a watcher client +func (w *watchGrpcStream) run() { + var wc pb.Watch_WatchClient + var closeErr error + + // substreams marked to close but goroutine still running; needed for + // avoiding double-closing recvc on grpc stream teardown + closing := make(map[*watcherStream]struct{}) + + defer func() { + w.closeErr = closeErr + // shutdown substreams and resuming substreams + for _, ws := range w.substreams { + if _, ok := closing[ws]; !ok { + close(ws.recvc) + closing[ws] = struct{}{} + } + } + for _, ws := range w.resuming { + if _, ok := closing[ws]; ws != nil && !ok { + close(ws.recvc) + closing[ws] = struct{}{} + } + } + w.joinSubstreams() + for range closing { + w.closeSubstream(<-w.closingc) + } + w.wg.Wait() + w.owner.closeStream(w) + }() + + // start a stream with the etcd grpc server + if wc, closeErr = w.newWatchClient(); closeErr != nil { + return + } + + cancelSet := make(map[int64]struct{}) + + var cur *pb.WatchResponse + for { + select { + // Watch() requested + case req := <-w.reqc: + switch wreq := req.(type) { + case *watchRequest: + outc := make(chan WatchResponse, 1) + // TODO: pass custom watch ID? + ws := &watcherStream{ + initReq: *wreq, + id: -1, + outc: outc, + // unbuffered so resumes won't cause repeat events + recvc: make(chan *WatchResponse), + } + + ws.donec = make(chan struct{}) + w.wg.Add(1) + go w.serveSubstream(ws, w.resumec) + + // queue up for watcher creation/resume + w.resuming = append(w.resuming, ws) + if len(w.resuming) == 1 { + // head of resume queue, can register a new watcher + if err := wc.Send(ws.initReq.toPB()); err != nil { + if w.lg != nil { + w.lg.Debug("error when sending request", zap.Error(err)) + } + } + } + case *progressRequest: + if err := wc.Send(wreq.toPB()); err != nil { + if w.lg != nil { + w.lg.Debug("error when sending request", zap.Error(err)) + } + } + } + + // new events from the watch client + case pbresp := <-w.respc: + if cur == nil || pbresp.Created || pbresp.Canceled { + cur = pbresp + } else if cur != nil && cur.WatchId == pbresp.WatchId { + // merge new events + cur.Events = append(cur.Events, pbresp.Events...) + // update "Fragment" field; last response with "Fragment" == false + cur.Fragment = pbresp.Fragment + } + + switch { + case pbresp.Created: + // response to head of queue creation + if ws := w.resuming[0]; ws != nil { + w.addSubstream(pbresp, ws) + w.dispatchEvent(pbresp) + w.resuming[0] = nil + } + + if ws := w.nextResume(); ws != nil { + if err := wc.Send(ws.initReq.toPB()); err != nil { + if w.lg != nil { + w.lg.Debug("error when sending request", zap.Error(err)) + } + } + } + + // reset for next iteration + cur = nil + + case pbresp.Canceled && pbresp.CompactRevision == 0: + delete(cancelSet, pbresp.WatchId) + if ws, ok := w.substreams[pbresp.WatchId]; ok { + // signal to stream goroutine to update closingc + close(ws.recvc) + closing[ws] = struct{}{} + } + + // reset for next iteration + cur = nil + + case cur.Fragment: + // watch response events are still fragmented + // continue to fetch next fragmented event arrival + continue + + default: + // dispatch to appropriate watch stream + ok := w.dispatchEvent(cur) + + // reset for next iteration + cur = nil + + if ok { + break + } + + // watch response on unexpected watch id; cancel id + if _, ok := cancelSet[pbresp.WatchId]; ok { + break + } + + cancelSet[pbresp.WatchId] = struct{}{} + cr := &pb.WatchRequest_CancelRequest{ + CancelRequest: &pb.WatchCancelRequest{ + WatchId: pbresp.WatchId, + }, + } + req := &pb.WatchRequest{RequestUnion: cr} + if w.lg != nil { + w.lg.Debug("sending watch cancel request for failed dispatch", zap.Int64("watch-id", pbresp.WatchId)) + } + if err := wc.Send(req); err != nil { + if w.lg != nil { + w.lg.Debug("failed to send watch cancel request", zap.Int64("watch-id", pbresp.WatchId), zap.Error(err)) + } + } + } + + // watch client failed on Recv; spawn another if possible + case err := <-w.errc: + if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader { + closeErr = err + return + } + if wc, closeErr = w.newWatchClient(); closeErr != nil { + return + } + if ws := w.nextResume(); ws != nil { + if err := wc.Send(ws.initReq.toPB()); err != nil { + if w.lg != nil { + w.lg.Debug("error when sending request", zap.Error(err)) + } + } + } + cancelSet = make(map[int64]struct{}) + + case <-w.ctx.Done(): + return + + case ws := <-w.closingc: + if ws.id != -1 { + // client is closing an established watch; close it on the server proactively instead of waiting + // to close when the next message arrives + cancelSet[ws.id] = struct{}{} + cr := &pb.WatchRequest_CancelRequest{ + CancelRequest: &pb.WatchCancelRequest{ + WatchId: ws.id, + }, + } + req := &pb.WatchRequest{RequestUnion: cr} + if w.lg != nil { + w.lg.Debug("sending watch cancel request for closed watcher", zap.Int64("watch-id", ws.id)) + } + if err := wc.Send(req); err != nil { + if w.lg != nil { + w.lg.Debug("failed to send watch cancel request", zap.Int64("watch-id", ws.id), zap.Error(err)) + } + } + } + w.closeSubstream(ws) + delete(closing, ws) + // no more watchers on this stream, shutdown + if len(w.substreams)+len(w.resuming) == 0 { + return + } + } + } +} + +// nextResume chooses the next resuming to register with the grpc stream. Abandoned +// streams are marked as nil in the queue since the head must wait for its inflight registration. +func (w *watchGrpcStream) nextResume() *watcherStream { + for len(w.resuming) != 0 { + if w.resuming[0] != nil { + return w.resuming[0] + } + w.resuming = w.resuming[1:len(w.resuming)] + } + return nil +} + +// dispatchEvent sends a WatchResponse to the appropriate watcher stream +func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { + events := make([]*Event, len(pbresp.Events)) + for i, ev := range pbresp.Events { + events[i] = (*Event)(ev) + } + // TODO: return watch ID? + wr := &WatchResponse{ + Header: *pbresp.Header, + Events: events, + CompactRevision: pbresp.CompactRevision, + Created: pbresp.Created, + Canceled: pbresp.Canceled, + cancelReason: pbresp.CancelReason, + } + + // watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of -1 to + // indicate they should be broadcast. + if wr.IsProgressNotify() && pbresp.WatchId == -1 { + return w.broadcastResponse(wr) + } + + return w.unicastResponse(wr, pbresp.WatchId) + +} + +// broadcastResponse send a watch response to all watch substreams. +func (w *watchGrpcStream) broadcastResponse(wr *WatchResponse) bool { + for _, ws := range w.substreams { + select { + case ws.recvc <- wr: + case <-ws.donec: + } + } + return true +} + +// unicastResponse sends a watch response to a specific watch substream. +func (w *watchGrpcStream) unicastResponse(wr *WatchResponse, watchId int64) bool { + ws, ok := w.substreams[watchId] + if !ok { + return false + } + select { + case ws.recvc <- wr: + case <-ws.donec: + return false + } + return true +} + +// serveWatchClient forwards messages from the grpc stream to run() +func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) { + for { + resp, err := wc.Recv() + if err != nil { + select { + case w.errc <- err: + case <-w.donec: + } + return + } + select { + case w.respc <- resp: + case <-w.donec: + return + } + } +} + +// serveSubstream forwards watch responses from run() to the subscriber +func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) { + if ws.closing { + panic("created substream goroutine but substream is closing") + } + + // nextRev is the minimum expected next revision + nextRev := ws.initReq.rev + resuming := false + defer func() { + if !resuming { + ws.closing = true + } + close(ws.donec) + if !resuming { + w.closingc <- ws + } + w.wg.Done() + }() + + emptyWr := &WatchResponse{} + for { + curWr := emptyWr + outc := ws.outc + + if len(ws.buf) > 0 { + curWr = ws.buf[0] + } else { + outc = nil + } + select { + case outc <- *curWr: + if ws.buf[0].Err() != nil { + return + } + ws.buf[0] = nil + ws.buf = ws.buf[1:] + case wr, ok := <-ws.recvc: + if !ok { + // shutdown from closeSubstream + return + } + + if wr.Created { + if ws.initReq.retc != nil { + ws.initReq.retc <- ws.outc + // to prevent next write from taking the slot in buffered channel + // and posting duplicate create events + ws.initReq.retc = nil + + // send first creation event only if requested + if ws.initReq.createdNotify { + ws.outc <- *wr + } + // once the watch channel is returned, a current revision + // watch must resume at the store revision. This is necessary + // for the following case to work as expected: + // wch := m1.Watch("a") + // m2.Put("a", "b") + // <-wch + // If the revision is only bound on the first observed event, + // if wch is disconnected before the Put is issued, then reconnects + // after it is committed, it'll miss the Put. + if ws.initReq.rev == 0 { + nextRev = wr.Header.Revision + } + } + } else { + // current progress of watch; <= store revision + nextRev = wr.Header.Revision + } + + if len(wr.Events) > 0 { + nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1 + } + ws.initReq.rev = nextRev + + // created event is already sent above, + // watcher should not post duplicate events + if wr.Created { + continue + } + + // TODO pause channel if buffer gets too large + ws.buf = append(ws.buf, wr) + case <-w.ctx.Done(): + return + case <-ws.initReq.ctx.Done(): + return + case <-resumec: + resuming = true + return + } + } + // lazily send cancel message if events on missing id +} + +func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) { + // mark all substreams as resuming + close(w.resumec) + w.resumec = make(chan struct{}) + w.joinSubstreams() + for _, ws := range w.substreams { + ws.id = -1 + w.resuming = append(w.resuming, ws) + } + // strip out nils, if any + var resuming []*watcherStream + for _, ws := range w.resuming { + if ws != nil { + resuming = append(resuming, ws) + } + } + w.resuming = resuming + w.substreams = make(map[int64]*watcherStream) + + // connect to grpc stream while accepting watcher cancelation + stopc := make(chan struct{}) + donec := w.waitCancelSubstreams(stopc) + wc, err := w.openWatchClient() + close(stopc) + <-donec + + // serve all non-closing streams, even if there's a client error + // so that the teardown path can shutdown the streams as expected. + for _, ws := range w.resuming { + if ws.closing { + continue + } + ws.donec = make(chan struct{}) + w.wg.Add(1) + go w.serveSubstream(ws, w.resumec) + } + + if err != nil { + return nil, v3rpc.Error(err) + } + + // receive data from new grpc stream + go w.serveWatchClient(wc) + return wc, nil +} + +func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} { + var wg sync.WaitGroup + wg.Add(len(w.resuming)) + donec := make(chan struct{}) + for i := range w.resuming { + go func(ws *watcherStream) { + defer wg.Done() + if ws.closing { + if ws.initReq.ctx.Err() != nil && ws.outc != nil { + close(ws.outc) + ws.outc = nil + } + return + } + select { + case <-ws.initReq.ctx.Done(): + // closed ws will be removed from resuming + ws.closing = true + close(ws.outc) + ws.outc = nil + w.wg.Add(1) + go func() { + defer w.wg.Done() + w.closingc <- ws + }() + case <-stopc: + } + }(w.resuming[i]) + } + go func() { + defer close(donec) + wg.Wait() + }() + return donec +} + +// joinSubstreams waits for all substream goroutines to complete. +func (w *watchGrpcStream) joinSubstreams() { + for _, ws := range w.substreams { + <-ws.donec + } + for _, ws := range w.resuming { + if ws != nil { + <-ws.donec + } + } +} + +var maxBackoff = 100 * time.Millisecond + +// openWatchClient retries opening a watch client until success or halt. +// manually retry in case "ws==nil && err==nil" +// TODO: remove FailFast=false +func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) { + backoff := time.Millisecond + for { + select { + case <-w.ctx.Done(): + if err == nil { + return nil, w.ctx.Err() + } + return nil, err + default: + } + if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil { + break + } + if isHaltErr(w.ctx, err) { + return nil, v3rpc.Error(err) + } + if isUnavailableErr(w.ctx, err) { + // retry, but backoff + if backoff < maxBackoff { + // 25% backoff factor + backoff = backoff + backoff/4 + if backoff > maxBackoff { + backoff = maxBackoff + } + } + time.Sleep(backoff) + } + } + return ws, nil +} + +// toPB converts an internal watch request structure to its protobuf WatchRequest structure. +func (wr *watchRequest) toPB() *pb.WatchRequest { + req := &pb.WatchCreateRequest{ + StartRevision: wr.rev, + Key: []byte(wr.key), + RangeEnd: []byte(wr.end), + ProgressNotify: wr.progressNotify, + Filters: wr.filters, + PrevKv: wr.prevKV, + Fragment: wr.fragment, + } + cr := &pb.WatchRequest_CreateRequest{CreateRequest: req} + return &pb.WatchRequest{RequestUnion: cr} +} + +// toPB converts an internal progress request structure to its protobuf WatchRequest structure. +func (pr *progressRequest) toPB() *pb.WatchRequest { + req := &pb.WatchProgressRequest{} + cr := &pb.WatchRequest_ProgressRequest{ProgressRequest: req} + return &pb.WatchRequest{RequestUnion: cr} +} + +func streamKeyFromCtx(ctx context.Context) string { + if md, ok := metadata.FromOutgoingContext(ctx); ok { + return fmt.Sprintf("%+v", md) + } + return "" +} diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/doc.go new file mode 100644 index 000000000..f72c6a644 --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/doc.go @@ -0,0 +1,16 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package rpctypes has types and values shared by the etcd server and client for v3 RPC interaction. +package rpctypes diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go new file mode 100644 index 000000000..e6a281460 --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go @@ -0,0 +1,235 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpctypes + +import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// server-side error +var ( + ErrGRPCEmptyKey = status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err() + ErrGRPCKeyNotFound = status.New(codes.InvalidArgument, "etcdserver: key not found").Err() + ErrGRPCValueProvided = status.New(codes.InvalidArgument, "etcdserver: value is provided").Err() + ErrGRPCLeaseProvided = status.New(codes.InvalidArgument, "etcdserver: lease is provided").Err() + ErrGRPCTooManyOps = status.New(codes.InvalidArgument, "etcdserver: too many operations in txn request").Err() + ErrGRPCDuplicateKey = status.New(codes.InvalidArgument, "etcdserver: duplicate key given in txn request").Err() + ErrGRPCCompacted = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted").Err() + ErrGRPCFutureRev = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision").Err() + ErrGRPCNoSpace = status.New(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded").Err() + + ErrGRPCLeaseNotFound = status.New(codes.NotFound, "etcdserver: requested lease not found").Err() + ErrGRPCLeaseExist = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err() + ErrGRPCLeaseTTLTooLarge = status.New(codes.OutOfRange, "etcdserver: too large lease TTL").Err() + + ErrGRPCMemberExist = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err() + ErrGRPCPeerURLExist = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err() + ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err() + ErrGRPCMemberBadURLs = status.New(codes.InvalidArgument, "etcdserver: given member URLs are invalid").Err() + ErrGRPCMemberNotFound = status.New(codes.NotFound, "etcdserver: member not found").Err() + ErrGRPCMemberNotLearner = status.New(codes.FailedPrecondition, "etcdserver: can only promote a learner member").Err() + ErrGRPCLearnerNotReady = status.New(codes.FailedPrecondition, "etcdserver: can only promote a learner member which is in sync with leader").Err() + ErrGRPCTooManyLearners = status.New(codes.FailedPrecondition, "etcdserver: too many learner members in cluster").Err() + + ErrGRPCRequestTooLarge = status.New(codes.InvalidArgument, "etcdserver: request is too large").Err() + ErrGRPCRequestTooManyRequests = status.New(codes.ResourceExhausted, "etcdserver: too many requests").Err() + + ErrGRPCRootUserNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not exist").Err() + ErrGRPCRootRoleNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not have root role").Err() + ErrGRPCUserAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: user name already exists").Err() + ErrGRPCUserEmpty = status.New(codes.InvalidArgument, "etcdserver: user name is empty").Err() + ErrGRPCUserNotFound = status.New(codes.FailedPrecondition, "etcdserver: user name not found").Err() + ErrGRPCRoleAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: role name already exists").Err() + ErrGRPCRoleNotFound = status.New(codes.FailedPrecondition, "etcdserver: role name not found").Err() + ErrGRPCRoleEmpty = status.New(codes.InvalidArgument, "etcdserver: role name is empty").Err() + ErrGRPCAuthFailed = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err() + ErrGRPCPermissionDenied = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err() + ErrGRPCRoleNotGranted = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err() + ErrGRPCPermissionNotGranted = status.New(codes.FailedPrecondition, "etcdserver: permission is not granted to the role").Err() + ErrGRPCAuthNotEnabled = status.New(codes.FailedPrecondition, "etcdserver: authentication is not enabled").Err() + ErrGRPCInvalidAuthToken = status.New(codes.Unauthenticated, "etcdserver: invalid auth token").Err() + ErrGRPCInvalidAuthMgmt = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err() + + ErrGRPCNoLeader = status.New(codes.Unavailable, "etcdserver: no leader").Err() + ErrGRPCNotLeader = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err() + ErrGRPCLeaderChanged = status.New(codes.Unavailable, "etcdserver: leader changed").Err() + ErrGRPCNotCapable = status.New(codes.Unavailable, "etcdserver: not capable").Err() + ErrGRPCStopped = status.New(codes.Unavailable, "etcdserver: server stopped").Err() + ErrGRPCTimeout = status.New(codes.Unavailable, "etcdserver: request timed out").Err() + ErrGRPCTimeoutDueToLeaderFail = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure").Err() + ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err() + ErrGRPCUnhealthy = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err() + ErrGRPCCorrupt = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err() + ErrGPRCNotSupportedForLearner = status.New(codes.Unavailable, "etcdserver: rpc not supported for learner").Err() + ErrGRPCBadLeaderTransferee = status.New(codes.FailedPrecondition, "etcdserver: bad leader transferee").Err() + + errStringToError = map[string]error{ + ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey, + ErrorDesc(ErrGRPCKeyNotFound): ErrGRPCKeyNotFound, + ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided, + ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided, + + ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps, + ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey, + ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted, + ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev, + ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace, + + ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound, + ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist, + ErrorDesc(ErrGRPCLeaseTTLTooLarge): ErrGRPCLeaseTTLTooLarge, + + ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist, + ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist, + ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted, + ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs, + ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound, + ErrorDesc(ErrGRPCMemberNotLearner): ErrGRPCMemberNotLearner, + ErrorDesc(ErrGRPCLearnerNotReady): ErrGRPCLearnerNotReady, + ErrorDesc(ErrGRPCTooManyLearners): ErrGRPCTooManyLearners, + + ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge, + ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests, + + ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist, + ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist, + ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist, + ErrorDesc(ErrGRPCUserEmpty): ErrGRPCUserEmpty, + ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound, + ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist, + ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound, + ErrorDesc(ErrGRPCRoleEmpty): ErrGRPCRoleEmpty, + ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed, + ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied, + ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted, + ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted, + ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled, + ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken, + ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt, + + ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader, + ErrorDesc(ErrGRPCNotLeader): ErrGRPCNotLeader, + ErrorDesc(ErrGRPCLeaderChanged): ErrGRPCLeaderChanged, + ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable, + ErrorDesc(ErrGRPCStopped): ErrGRPCStopped, + ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout, + ErrorDesc(ErrGRPCTimeoutDueToLeaderFail): ErrGRPCTimeoutDueToLeaderFail, + ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost, + ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy, + ErrorDesc(ErrGRPCCorrupt): ErrGRPCCorrupt, + ErrorDesc(ErrGPRCNotSupportedForLearner): ErrGPRCNotSupportedForLearner, + ErrorDesc(ErrGRPCBadLeaderTransferee): ErrGRPCBadLeaderTransferee, + } +) + +// client-side error +var ( + ErrEmptyKey = Error(ErrGRPCEmptyKey) + ErrKeyNotFound = Error(ErrGRPCKeyNotFound) + ErrValueProvided = Error(ErrGRPCValueProvided) + ErrLeaseProvided = Error(ErrGRPCLeaseProvided) + ErrTooManyOps = Error(ErrGRPCTooManyOps) + ErrDuplicateKey = Error(ErrGRPCDuplicateKey) + ErrCompacted = Error(ErrGRPCCompacted) + ErrFutureRev = Error(ErrGRPCFutureRev) + ErrNoSpace = Error(ErrGRPCNoSpace) + + ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound) + ErrLeaseExist = Error(ErrGRPCLeaseExist) + ErrLeaseTTLTooLarge = Error(ErrGRPCLeaseTTLTooLarge) + + ErrMemberExist = Error(ErrGRPCMemberExist) + ErrPeerURLExist = Error(ErrGRPCPeerURLExist) + ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted) + ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs) + ErrMemberNotFound = Error(ErrGRPCMemberNotFound) + ErrMemberNotLearner = Error(ErrGRPCMemberNotLearner) + ErrMemberLearnerNotReady = Error(ErrGRPCLearnerNotReady) + ErrTooManyLearners = Error(ErrGRPCTooManyLearners) + + ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge) + ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests) + + ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist) + ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist) + ErrUserAlreadyExist = Error(ErrGRPCUserAlreadyExist) + ErrUserEmpty = Error(ErrGRPCUserEmpty) + ErrUserNotFound = Error(ErrGRPCUserNotFound) + ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist) + ErrRoleNotFound = Error(ErrGRPCRoleNotFound) + ErrRoleEmpty = Error(ErrGRPCRoleEmpty) + ErrAuthFailed = Error(ErrGRPCAuthFailed) + ErrPermissionDenied = Error(ErrGRPCPermissionDenied) + ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted) + ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted) + ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled) + ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken) + ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt) + + ErrNoLeader = Error(ErrGRPCNoLeader) + ErrNotLeader = Error(ErrGRPCNotLeader) + ErrLeaderChanged = Error(ErrGRPCLeaderChanged) + ErrNotCapable = Error(ErrGRPCNotCapable) + ErrStopped = Error(ErrGRPCStopped) + ErrTimeout = Error(ErrGRPCTimeout) + ErrTimeoutDueToLeaderFail = Error(ErrGRPCTimeoutDueToLeaderFail) + ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost) + ErrUnhealthy = Error(ErrGRPCUnhealthy) + ErrCorrupt = Error(ErrGRPCCorrupt) + ErrBadLeaderTransferee = Error(ErrGRPCBadLeaderTransferee) +) + +// EtcdError defines gRPC server errors. +// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323) +type EtcdError struct { + code codes.Code + desc string +} + +// Code returns grpc/codes.Code. +// TODO: define clientv3/codes.Code. +func (e EtcdError) Code() codes.Code { + return e.code +} + +func (e EtcdError) Error() string { + return e.desc +} + +func Error(err error) error { + if err == nil { + return nil + } + verr, ok := errStringToError[ErrorDesc(err)] + if !ok { // not gRPC error + return err + } + ev, ok := status.FromError(verr) + var desc string + if ok { + desc = ev.Message() + } else { + desc = verr.Error() + } + return EtcdError{code: ev.Code(), desc: desc} +} + +func ErrorDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/md.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/md.go new file mode 100644 index 000000000..90b8b835b --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/md.go @@ -0,0 +1,22 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpctypes + +var ( + MetadataRequireLeaderKey = "hasleader" + MetadataHasLeader = "true" + + MetadataClientAPIVersionKey = "client-api-version" +) diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go new file mode 100644 index 000000000..8f8ac60ff --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go @@ -0,0 +1,20 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpctypes + +var ( + TokenFieldNameGRPC = "token" + TokenFieldNameSwagger = "authorization" +) diff --git a/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.pb.go b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.pb.go new file mode 100644 index 000000000..9e9b42cea --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.pb.go @@ -0,0 +1,1041 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: etcdserver.proto + +/* + Package etcdserverpb is a generated protocol buffer package. + + It is generated from these files: + etcdserver.proto + raft_internal.proto + rpc.proto + + It has these top-level messages: + Request + Metadata + RequestHeader + InternalRaftRequest + EmptyResponse + InternalAuthenticateRequest + ResponseHeader + RangeRequest + RangeResponse + PutRequest + PutResponse + DeleteRangeRequest + DeleteRangeResponse + RequestOp + ResponseOp + Compare + TxnRequest + TxnResponse + CompactionRequest + CompactionResponse + HashRequest + HashKVRequest + HashKVResponse + HashResponse + SnapshotRequest + SnapshotResponse + WatchRequest + WatchCreateRequest + WatchCancelRequest + WatchProgressRequest + WatchResponse + LeaseGrantRequest + LeaseGrantResponse + LeaseRevokeRequest + LeaseRevokeResponse + LeaseCheckpoint + LeaseCheckpointRequest + LeaseCheckpointResponse + LeaseKeepAliveRequest + LeaseKeepAliveResponse + LeaseTimeToLiveRequest + LeaseTimeToLiveResponse + LeaseLeasesRequest + LeaseStatus + LeaseLeasesResponse + Member + MemberAddRequest + MemberAddResponse + MemberRemoveRequest + MemberRemoveResponse + MemberUpdateRequest + MemberUpdateResponse + MemberListRequest + MemberListResponse + MemberPromoteRequest + MemberPromoteResponse + DefragmentRequest + DefragmentResponse + MoveLeaderRequest + MoveLeaderResponse + AlarmRequest + AlarmMember + AlarmResponse + StatusRequest + StatusResponse + AuthEnableRequest + AuthDisableRequest + AuthenticateRequest + AuthUserAddRequest + AuthUserGetRequest + AuthUserDeleteRequest + AuthUserChangePasswordRequest + AuthUserGrantRoleRequest + AuthUserRevokeRoleRequest + AuthRoleAddRequest + AuthRoleGetRequest + AuthUserListRequest + AuthRoleListRequest + AuthRoleDeleteRequest + AuthRoleGrantPermissionRequest + AuthRoleRevokePermissionRequest + AuthEnableResponse + AuthDisableResponse + AuthenticateResponse + AuthUserAddResponse + AuthUserGetResponse + AuthUserDeleteResponse + AuthUserChangePasswordResponse + AuthUserGrantRoleResponse + AuthUserRevokeRoleResponse + AuthRoleAddResponse + AuthRoleGetResponse + AuthRoleListResponse + AuthUserListResponse + AuthRoleDeleteResponse + AuthRoleGrantPermissionResponse + AuthRoleRevokePermissionResponse +*/ +package etcdserverpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Request struct { + ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` + Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"` + Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"` + Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"` + Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"` + PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"` + PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"` + PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"` + Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"` + Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"` + Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"` + Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"` + Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"` + Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"` + Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"` + Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"` + Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{0} } + +type Metadata struct { + NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"` + ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{1} } + +func init() { + proto.RegisterType((*Request)(nil), "etcdserverpb.Request") + proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata") +} +func (m *Request) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Request) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID)) + dAtA[i] = 0x12 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method))) + i += copy(dAtA[i:], m.Method) + dAtA[i] = 0x1a + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x22 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val))) + i += copy(dAtA[i:], m.Val) + dAtA[i] = 0x28 + i++ + if m.Dir { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x32 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue))) + i += copy(dAtA[i:], m.PrevValue) + dAtA[i] = 0x38 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex)) + if m.PrevExist != nil { + dAtA[i] = 0x40 + i++ + if *m.PrevExist { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + dAtA[i] = 0x48 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration)) + dAtA[i] = 0x50 + i++ + if m.Wait { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x58 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since)) + dAtA[i] = 0x60 + i++ + if m.Recursive { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x68 + i++ + if m.Sorted { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x70 + i++ + if m.Quorum { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x78 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time)) + dAtA[i] = 0x80 + i++ + dAtA[i] = 0x1 + i++ + if m.Stream { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.Refresh != nil { + dAtA[i] = 0x88 + i++ + dAtA[i] = 0x1 + i++ + if *m.Refresh { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Metadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID)) + dAtA[i] = 0x10 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Request) Size() (n int) { + var l int + _ = l + n += 1 + sovEtcdserver(uint64(m.ID)) + l = len(m.Method) + n += 1 + l + sovEtcdserver(uint64(l)) + l = len(m.Path) + n += 1 + l + sovEtcdserver(uint64(l)) + l = len(m.Val) + n += 1 + l + sovEtcdserver(uint64(l)) + n += 2 + l = len(m.PrevValue) + n += 1 + l + sovEtcdserver(uint64(l)) + n += 1 + sovEtcdserver(uint64(m.PrevIndex)) + if m.PrevExist != nil { + n += 2 + } + n += 1 + sovEtcdserver(uint64(m.Expiration)) + n += 2 + n += 1 + sovEtcdserver(uint64(m.Since)) + n += 2 + n += 2 + n += 2 + n += 1 + sovEtcdserver(uint64(m.Time)) + n += 3 + if m.Refresh != nil { + n += 3 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Metadata) Size() (n int) { + var l int + _ = l + n += 1 + sovEtcdserver(uint64(m.NodeID)) + n += 1 + sovEtcdserver(uint64(m.ClusterID)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovEtcdserver(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozEtcdserver(x uint64) (n int) { + return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEtcdserver + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Method = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEtcdserver + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEtcdserver + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Val = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Dir = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEtcdserver + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PrevValue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevIndex", wireType) + } + m.PrevIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PrevIndex |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevExist", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.PrevExist = &b + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Expiration", wireType) + } + m.Expiration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Expiration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Wait", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Wait = bool(v != 0) + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType) + } + m.Since = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Since |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Recursive = bool(v != 0) + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sorted", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Sorted = bool(v != 0) + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Quorum", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Quorum = bool(v != 0) + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + m.Time = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Time |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stream = bool(v != 0) + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Refresh", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Refresh = &b + default: + iNdEx = preIndex + skippy, err := skipEtcdserver(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEtcdserver + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + m.NodeID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NodeID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType) + } + m.ClusterID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ClusterID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEtcdserver(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEtcdserver + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEtcdserver(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthEtcdserver + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipEtcdserver(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("etcdserver.proto", fileDescriptorEtcdserver) } + +var fileDescriptorEtcdserver = []byte{ + // 380 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30, + 0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb, + 0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58, + 0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f, + 0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79, + 0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d, + 0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a, + 0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89, + 0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93, + 0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe, + 0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c, + 0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70, + 0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab, + 0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11, + 0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7, + 0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89, + 0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82, + 0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6, + 0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63, + 0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6, + 0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff, + 0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea, + 0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f, + 0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00, +} diff --git a/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.proto b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.proto new file mode 100644 index 000000000..25e0aca5d --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.proto @@ -0,0 +1,34 @@ +syntax = "proto2"; +package etcdserverpb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; + +message Request { + optional uint64 ID = 1 [(gogoproto.nullable) = false]; + optional string Method = 2 [(gogoproto.nullable) = false]; + optional string Path = 3 [(gogoproto.nullable) = false]; + optional string Val = 4 [(gogoproto.nullable) = false]; + optional bool Dir = 5 [(gogoproto.nullable) = false]; + optional string PrevValue = 6 [(gogoproto.nullable) = false]; + optional uint64 PrevIndex = 7 [(gogoproto.nullable) = false]; + optional bool PrevExist = 8 [(gogoproto.nullable) = true]; + optional int64 Expiration = 9 [(gogoproto.nullable) = false]; + optional bool Wait = 10 [(gogoproto.nullable) = false]; + optional uint64 Since = 11 [(gogoproto.nullable) = false]; + optional bool Recursive = 12 [(gogoproto.nullable) = false]; + optional bool Sorted = 13 [(gogoproto.nullable) = false]; + optional bool Quorum = 14 [(gogoproto.nullable) = false]; + optional int64 Time = 15 [(gogoproto.nullable) = false]; + optional bool Stream = 16 [(gogoproto.nullable) = false]; + optional bool Refresh = 17 [(gogoproto.nullable) = true]; +} + +message Metadata { + optional uint64 NodeID = 1 [(gogoproto.nullable) = false]; + optional uint64 ClusterID = 2 [(gogoproto.nullable) = false]; +} diff --git a/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.pb.go b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.pb.go new file mode 100644 index 000000000..b170499e4 --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.pb.go @@ -0,0 +1,2127 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: raft_internal.proto + +package etcdserverpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type RequestHeader struct { + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // username is a username that is associated with an auth token of gRPC connection + Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` + // auth_revision is a revision number of auth.authStore. It is not related to mvcc + AuthRevision uint64 `protobuf:"varint,3,opt,name=auth_revision,json=authRevision,proto3" json:"auth_revision,omitempty"` +} + +func (m *RequestHeader) Reset() { *m = RequestHeader{} } +func (m *RequestHeader) String() string { return proto.CompactTextString(m) } +func (*RequestHeader) ProtoMessage() {} +func (*RequestHeader) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{0} } + +// An InternalRaftRequest is the union of all requests which can be +// sent via raft. +type InternalRaftRequest struct { + Header *RequestHeader `protobuf:"bytes,100,opt,name=header" json:"header,omitempty"` + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + V2 *Request `protobuf:"bytes,2,opt,name=v2" json:"v2,omitempty"` + Range *RangeRequest `protobuf:"bytes,3,opt,name=range" json:"range,omitempty"` + Put *PutRequest `protobuf:"bytes,4,opt,name=put" json:"put,omitempty"` + DeleteRange *DeleteRangeRequest `protobuf:"bytes,5,opt,name=delete_range,json=deleteRange" json:"delete_range,omitempty"` + Txn *TxnRequest `protobuf:"bytes,6,opt,name=txn" json:"txn,omitempty"` + Compaction *CompactionRequest `protobuf:"bytes,7,opt,name=compaction" json:"compaction,omitempty"` + LeaseGrant *LeaseGrantRequest `protobuf:"bytes,8,opt,name=lease_grant,json=leaseGrant" json:"lease_grant,omitempty"` + LeaseRevoke *LeaseRevokeRequest `protobuf:"bytes,9,opt,name=lease_revoke,json=leaseRevoke" json:"lease_revoke,omitempty"` + Alarm *AlarmRequest `protobuf:"bytes,10,opt,name=alarm" json:"alarm,omitempty"` + LeaseCheckpoint *LeaseCheckpointRequest `protobuf:"bytes,11,opt,name=lease_checkpoint,json=leaseCheckpoint" json:"lease_checkpoint,omitempty"` + AuthEnable *AuthEnableRequest `protobuf:"bytes,1000,opt,name=auth_enable,json=authEnable" json:"auth_enable,omitempty"` + AuthDisable *AuthDisableRequest `protobuf:"bytes,1011,opt,name=auth_disable,json=authDisable" json:"auth_disable,omitempty"` + Authenticate *InternalAuthenticateRequest `protobuf:"bytes,1012,opt,name=authenticate" json:"authenticate,omitempty"` + AuthUserAdd *AuthUserAddRequest `protobuf:"bytes,1100,opt,name=auth_user_add,json=authUserAdd" json:"auth_user_add,omitempty"` + AuthUserDelete *AuthUserDeleteRequest `protobuf:"bytes,1101,opt,name=auth_user_delete,json=authUserDelete" json:"auth_user_delete,omitempty"` + AuthUserGet *AuthUserGetRequest `protobuf:"bytes,1102,opt,name=auth_user_get,json=authUserGet" json:"auth_user_get,omitempty"` + AuthUserChangePassword *AuthUserChangePasswordRequest `protobuf:"bytes,1103,opt,name=auth_user_change_password,json=authUserChangePassword" json:"auth_user_change_password,omitempty"` + AuthUserGrantRole *AuthUserGrantRoleRequest `protobuf:"bytes,1104,opt,name=auth_user_grant_role,json=authUserGrantRole" json:"auth_user_grant_role,omitempty"` + AuthUserRevokeRole *AuthUserRevokeRoleRequest `protobuf:"bytes,1105,opt,name=auth_user_revoke_role,json=authUserRevokeRole" json:"auth_user_revoke_role,omitempty"` + AuthUserList *AuthUserListRequest `protobuf:"bytes,1106,opt,name=auth_user_list,json=authUserList" json:"auth_user_list,omitempty"` + AuthRoleList *AuthRoleListRequest `protobuf:"bytes,1107,opt,name=auth_role_list,json=authRoleList" json:"auth_role_list,omitempty"` + AuthRoleAdd *AuthRoleAddRequest `protobuf:"bytes,1200,opt,name=auth_role_add,json=authRoleAdd" json:"auth_role_add,omitempty"` + AuthRoleDelete *AuthRoleDeleteRequest `protobuf:"bytes,1201,opt,name=auth_role_delete,json=authRoleDelete" json:"auth_role_delete,omitempty"` + AuthRoleGet *AuthRoleGetRequest `protobuf:"bytes,1202,opt,name=auth_role_get,json=authRoleGet" json:"auth_role_get,omitempty"` + AuthRoleGrantPermission *AuthRoleGrantPermissionRequest `protobuf:"bytes,1203,opt,name=auth_role_grant_permission,json=authRoleGrantPermission" json:"auth_role_grant_permission,omitempty"` + AuthRoleRevokePermission *AuthRoleRevokePermissionRequest `protobuf:"bytes,1204,opt,name=auth_role_revoke_permission,json=authRoleRevokePermission" json:"auth_role_revoke_permission,omitempty"` +} + +func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} } +func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) } +func (*InternalRaftRequest) ProtoMessage() {} +func (*InternalRaftRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{1} } + +type EmptyResponse struct { +} + +func (m *EmptyResponse) Reset() { *m = EmptyResponse{} } +func (m *EmptyResponse) String() string { return proto.CompactTextString(m) } +func (*EmptyResponse) ProtoMessage() {} +func (*EmptyResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{2} } + +// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest? +// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing. +// For avoiding misusage the field, we have an internal version of AuthenticateRequest. +type InternalAuthenticateRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + // simple_token is generated in API layer (etcdserver/v3_server.go) + SimpleToken string `protobuf:"bytes,3,opt,name=simple_token,json=simpleToken,proto3" json:"simple_token,omitempty"` +} + +func (m *InternalAuthenticateRequest) Reset() { *m = InternalAuthenticateRequest{} } +func (m *InternalAuthenticateRequest) String() string { return proto.CompactTextString(m) } +func (*InternalAuthenticateRequest) ProtoMessage() {} +func (*InternalAuthenticateRequest) Descriptor() ([]byte, []int) { + return fileDescriptorRaftInternal, []int{3} +} + +func init() { + proto.RegisterType((*RequestHeader)(nil), "etcdserverpb.RequestHeader") + proto.RegisterType((*InternalRaftRequest)(nil), "etcdserverpb.InternalRaftRequest") + proto.RegisterType((*EmptyResponse)(nil), "etcdserverpb.EmptyResponse") + proto.RegisterType((*InternalAuthenticateRequest)(nil), "etcdserverpb.InternalAuthenticateRequest") +} +func (m *RequestHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestHeader) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID)) + } + if len(m.Username) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Username))) + i += copy(dAtA[i:], m.Username) + } + if m.AuthRevision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRevision)) + } + return i, nil +} + +func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InternalRaftRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID)) + } + if m.V2 != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.V2.Size())) + n1, err := m.V2.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.Range != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Range.Size())) + n2, err := m.Range.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.Put != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Put.Size())) + n3, err := m.Put.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.DeleteRange != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.DeleteRange.Size())) + n4, err := m.DeleteRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.Txn != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Txn.Size())) + n5, err := m.Txn.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if m.Compaction != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Compaction.Size())) + n6, err := m.Compaction.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.LeaseGrant != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseGrant.Size())) + n7, err := m.LeaseGrant.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.LeaseRevoke != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseRevoke.Size())) + n8, err := m.LeaseRevoke.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.Alarm != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Alarm.Size())) + n9, err := m.Alarm.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if m.LeaseCheckpoint != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseCheckpoint.Size())) + n10, err := m.LeaseCheckpoint.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + if m.Header != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x6 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Header.Size())) + n11, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.AuthEnable != nil { + dAtA[i] = 0xc2 + i++ + dAtA[i] = 0x3e + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthEnable.Size())) + n12, err := m.AuthEnable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.AuthDisable != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x3f + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthDisable.Size())) + n13, err := m.AuthDisable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + if m.Authenticate != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x3f + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Authenticate.Size())) + n14, err := m.Authenticate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if m.AuthUserAdd != nil { + dAtA[i] = 0xe2 + i++ + dAtA[i] = 0x44 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserAdd.Size())) + n15, err := m.AuthUserAdd.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + if m.AuthUserDelete != nil { + dAtA[i] = 0xea + i++ + dAtA[i] = 0x44 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserDelete.Size())) + n16, err := m.AuthUserDelete.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if m.AuthUserGet != nil { + dAtA[i] = 0xf2 + i++ + dAtA[i] = 0x44 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserGet.Size())) + n17, err := m.AuthUserGet.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + if m.AuthUserChangePassword != nil { + dAtA[i] = 0xfa + i++ + dAtA[i] = 0x44 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserChangePassword.Size())) + n18, err := m.AuthUserChangePassword.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + if m.AuthUserGrantRole != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x45 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserGrantRole.Size())) + n19, err := m.AuthUserGrantRole.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + if m.AuthUserRevokeRole != nil { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x45 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserRevokeRole.Size())) + n20, err := m.AuthUserRevokeRole.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + if m.AuthUserList != nil { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x45 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserList.Size())) + n21, err := m.AuthUserList.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + } + if m.AuthRoleList != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x45 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleList.Size())) + n22, err := m.AuthRoleList.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + if m.AuthRoleAdd != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleAdd.Size())) + n23, err := m.AuthRoleAdd.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + } + if m.AuthRoleDelete != nil { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleDelete.Size())) + n24, err := m.AuthRoleDelete.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + if m.AuthRoleGet != nil { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleGet.Size())) + n25, err := m.AuthRoleGet.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + if m.AuthRoleGrantPermission != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleGrantPermission.Size())) + n26, err := m.AuthRoleGrantPermission.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + if m.AuthRoleRevokePermission != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleRevokePermission.Size())) + n27, err := m.AuthRoleRevokePermission.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + return i, nil +} + +func (m *EmptyResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EmptyResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *InternalAuthenticateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InternalAuthenticateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Password) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) + } + if len(m.SimpleToken) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.SimpleToken))) + i += copy(dAtA[i:], m.SimpleToken) + } + return i, nil +} + +func encodeVarintRaftInternal(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *RequestHeader) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRaftInternal(uint64(m.ID)) + } + l = len(m.Username) + if l > 0 { + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRevision != 0 { + n += 1 + sovRaftInternal(uint64(m.AuthRevision)) + } + return n +} + +func (m *InternalRaftRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRaftInternal(uint64(m.ID)) + } + if m.V2 != nil { + l = m.V2.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Range != nil { + l = m.Range.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Put != nil { + l = m.Put.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.DeleteRange != nil { + l = m.DeleteRange.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Txn != nil { + l = m.Txn.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Compaction != nil { + l = m.Compaction.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.LeaseGrant != nil { + l = m.LeaseGrant.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.LeaseRevoke != nil { + l = m.LeaseRevoke.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Alarm != nil { + l = m.Alarm.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.LeaseCheckpoint != nil { + l = m.LeaseCheckpoint.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Header != nil { + l = m.Header.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthEnable != nil { + l = m.AuthEnable.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthDisable != nil { + l = m.AuthDisable.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.Authenticate != nil { + l = m.Authenticate.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserAdd != nil { + l = m.AuthUserAdd.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserDelete != nil { + l = m.AuthUserDelete.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserGet != nil { + l = m.AuthUserGet.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserChangePassword != nil { + l = m.AuthUserChangePassword.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserGrantRole != nil { + l = m.AuthUserGrantRole.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserRevokeRole != nil { + l = m.AuthUserRevokeRole.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserList != nil { + l = m.AuthUserList.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleList != nil { + l = m.AuthRoleList.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleAdd != nil { + l = m.AuthRoleAdd.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleDelete != nil { + l = m.AuthRoleDelete.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleGet != nil { + l = m.AuthRoleGet.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleGrantPermission != nil { + l = m.AuthRoleGrantPermission.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleRevokePermission != nil { + l = m.AuthRoleRevokePermission.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + return n +} + +func (m *EmptyResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *InternalAuthenticateRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRaftInternal(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovRaftInternal(uint64(l)) + } + l = len(m.SimpleToken) + if l > 0 { + n += 1 + l + sovRaftInternal(uint64(l)) + } + return n +} + +func sovRaftInternal(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozRaftInternal(x uint64) (n int) { + return sovRaftInternal(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *RequestHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRevision", wireType) + } + m.AuthRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AuthRevision |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaftInternal(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftInternal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InternalRaftRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InternalRaftRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field V2", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.V2 == nil { + m.V2 = &Request{} + } + if err := m.V2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Range == nil { + m.Range = &RangeRequest{} + } + if err := m.Range.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Put", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Put == nil { + m.Put = &PutRequest{} + } + if err := m.Put.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeleteRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeleteRange == nil { + m.DeleteRange = &DeleteRangeRequest{} + } + if err := m.DeleteRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Txn", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Txn == nil { + m.Txn = &TxnRequest{} + } + if err := m.Txn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Compaction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Compaction == nil { + m.Compaction = &CompactionRequest{} + } + if err := m.Compaction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseGrant", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LeaseGrant == nil { + m.LeaseGrant = &LeaseGrantRequest{} + } + if err := m.LeaseGrant.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseRevoke", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LeaseRevoke == nil { + m.LeaseRevoke = &LeaseRevokeRequest{} + } + if err := m.LeaseRevoke.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Alarm == nil { + m.Alarm = &AlarmRequest{} + } + if err := m.Alarm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseCheckpoint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LeaseCheckpoint == nil { + m.LeaseCheckpoint = &LeaseCheckpointRequest{} + } + if err := m.LeaseCheckpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 100: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1000: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthEnable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthEnable == nil { + m.AuthEnable = &AuthEnableRequest{} + } + if err := m.AuthEnable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1011: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthDisable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthDisable == nil { + m.AuthDisable = &AuthDisableRequest{} + } + if err := m.AuthDisable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1012: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authenticate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Authenticate == nil { + m.Authenticate = &InternalAuthenticateRequest{} + } + if err := m.Authenticate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1100: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserAdd", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserAdd == nil { + m.AuthUserAdd = &AuthUserAddRequest{} + } + if err := m.AuthUserAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1101: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserDelete", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserDelete == nil { + m.AuthUserDelete = &AuthUserDeleteRequest{} + } + if err := m.AuthUserDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1102: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserGet == nil { + m.AuthUserGet = &AuthUserGetRequest{} + } + if err := m.AuthUserGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1103: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserChangePassword", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserChangePassword == nil { + m.AuthUserChangePassword = &AuthUserChangePasswordRequest{} + } + if err := m.AuthUserChangePassword.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1104: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGrantRole", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserGrantRole == nil { + m.AuthUserGrantRole = &AuthUserGrantRoleRequest{} + } + if err := m.AuthUserGrantRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1105: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserRevokeRole", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserRevokeRole == nil { + m.AuthUserRevokeRole = &AuthUserRevokeRoleRequest{} + } + if err := m.AuthUserRevokeRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1106: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserList == nil { + m.AuthUserList = &AuthUserListRequest{} + } + if err := m.AuthUserList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1107: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleList == nil { + m.AuthRoleList = &AuthRoleListRequest{} + } + if err := m.AuthRoleList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1200: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleAdd", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleAdd == nil { + m.AuthRoleAdd = &AuthRoleAddRequest{} + } + if err := m.AuthRoleAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1201: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleDelete", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleDelete == nil { + m.AuthRoleDelete = &AuthRoleDeleteRequest{} + } + if err := m.AuthRoleDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1202: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleGet == nil { + m.AuthRoleGet = &AuthRoleGetRequest{} + } + if err := m.AuthRoleGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1203: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGrantPermission", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleGrantPermission == nil { + m.AuthRoleGrantPermission = &AuthRoleGrantPermissionRequest{} + } + if err := m.AuthRoleGrantPermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1204: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleRevokePermission", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleRevokePermission == nil { + m.AuthRoleRevokePermission = &AuthRoleRevokePermissionRequest{} + } + if err := m.AuthRoleRevokePermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftInternal(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftInternal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EmptyResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EmptyResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EmptyResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRaftInternal(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftInternal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InternalAuthenticateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InternalAuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SimpleToken", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SimpleToken = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftInternal(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftInternal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRaftInternal(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthRaftInternal + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRaftInternal(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRaftInternal = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRaftInternal = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("raft_internal.proto", fileDescriptorRaftInternal) } + +var fileDescriptorRaftInternal = []byte{ + // 867 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xcb, 0x52, 0x33, 0x45, + 0x14, 0xc7, 0x49, 0xb8, 0xa6, 0x13, 0x2e, 0x36, 0xa0, 0x6d, 0xa8, 0x8a, 0x10, 0xbc, 0xe0, 0x0d, + 0xad, 0xf0, 0x00, 0x1a, 0x13, 0x0a, 0xa8, 0xa2, 0x90, 0x9a, 0xc2, 0x2a, 0xab, 0x5c, 0x8c, 0xcd, + 0xcc, 0x21, 0x19, 0x99, 0xcc, 0x8c, 0x3d, 0x9d, 0x88, 0x6f, 0xe2, 0x63, 0x78, 0xdb, 0xbb, 0x65, + 0xe1, 0x05, 0xf5, 0x05, 0x14, 0x37, 0xee, 0xbf, 0xef, 0x01, 0xbe, 0xea, 0xcb, 0xf4, 0x64, 0x92, + 0x0e, 0xbb, 0xc9, 0x39, 0xff, 0xf3, 0xfb, 0x9f, 0x99, 0x3e, 0x07, 0x1a, 0x6d, 0x32, 0x7a, 0xc3, + 0xdd, 0x20, 0xe2, 0xc0, 0x22, 0x1a, 0x1e, 0x26, 0x2c, 0xe6, 0x31, 0xae, 0x01, 0xf7, 0xfc, 0x14, + 0xd8, 0x08, 0x58, 0x72, 0x5d, 0xdf, 0xea, 0xc5, 0xbd, 0x58, 0x26, 0x3e, 0x10, 0x4f, 0x4a, 0x53, + 0xdf, 0xc8, 0x35, 0x3a, 0x52, 0x61, 0x89, 0xa7, 0x1e, 0x9b, 0x5f, 0xa2, 0x55, 0x07, 0xbe, 0x1e, + 0x42, 0xca, 0x4f, 0x81, 0xfa, 0xc0, 0xf0, 0x1a, 0x2a, 0x9f, 0x75, 0x49, 0x69, 0xb7, 0x74, 0xb0, + 0xe0, 0x94, 0xcf, 0xba, 0xb8, 0x8e, 0x56, 0x86, 0xa9, 0xb0, 0x1c, 0x00, 0x29, 0xef, 0x96, 0x0e, + 0x2a, 0x8e, 0xf9, 0x8d, 0xf7, 0xd1, 0x2a, 0x1d, 0xf2, 0xbe, 0xcb, 0x60, 0x14, 0xa4, 0x41, 0x1c, + 0x91, 0x79, 0x59, 0x56, 0x13, 0x41, 0x47, 0xc7, 0x9a, 0xbf, 0xac, 0xa3, 0xcd, 0x33, 0xdd, 0xb5, + 0x43, 0x6f, 0xb8, 0xb6, 0x9b, 0x32, 0x7a, 0x03, 0x95, 0x47, 0x2d, 0x69, 0x51, 0x6d, 0x6d, 0x1f, + 0x8e, 0xbf, 0xd7, 0xa1, 0x2e, 0x71, 0xca, 0xa3, 0x16, 0xfe, 0x10, 0x2d, 0x32, 0x1a, 0xf5, 0x40, + 0x7a, 0x55, 0x5b, 0xf5, 0x09, 0xa5, 0x48, 0x65, 0x72, 0x25, 0xc4, 0xef, 0xa0, 0xf9, 0x64, 0xc8, + 0xc9, 0x82, 0xd4, 0x93, 0xa2, 0xfe, 0x72, 0x98, 0xf5, 0xe3, 0x08, 0x11, 0xee, 0xa0, 0x9a, 0x0f, + 0x21, 0x70, 0x70, 0x95, 0xc9, 0xa2, 0x2c, 0xda, 0x2d, 0x16, 0x75, 0xa5, 0xa2, 0x60, 0x55, 0xf5, + 0xf3, 0x98, 0x30, 0xe4, 0x77, 0x11, 0x59, 0xb2, 0x19, 0x5e, 0xdd, 0x45, 0xc6, 0x90, 0xdf, 0x45, + 0xf8, 0x23, 0x84, 0xbc, 0x78, 0x90, 0x50, 0x8f, 0x8b, 0xef, 0xb7, 0x2c, 0x4b, 0x5e, 0x2b, 0x96, + 0x74, 0x4c, 0x3e, 0xab, 0x1c, 0x2b, 0xc1, 0x1f, 0xa3, 0x6a, 0x08, 0x34, 0x05, 0xb7, 0xc7, 0x68, + 0xc4, 0xc9, 0x8a, 0x8d, 0x70, 0x2e, 0x04, 0x27, 0x22, 0x6f, 0x08, 0xa1, 0x09, 0x89, 0x77, 0x56, + 0x04, 0x06, 0xa3, 0xf8, 0x16, 0x48, 0xc5, 0xf6, 0xce, 0x12, 0xe1, 0x48, 0x81, 0x79, 0xe7, 0x30, + 0x8f, 0x89, 0x63, 0xa1, 0x21, 0x65, 0x03, 0x82, 0x6c, 0xc7, 0xd2, 0x16, 0x29, 0x73, 0x2c, 0x52, + 0x88, 0x3f, 0x45, 0x1b, 0xca, 0xd6, 0xeb, 0x83, 0x77, 0x9b, 0xc4, 0x41, 0xc4, 0x49, 0x55, 0x16, + 0xbf, 0x6e, 0xb1, 0xee, 0x18, 0x51, 0x86, 0x59, 0x0f, 0x8b, 0x71, 0x7c, 0x84, 0x96, 0xfa, 0x72, + 0x86, 0x89, 0x2f, 0x31, 0x3b, 0xd6, 0x21, 0x52, 0x63, 0xee, 0x68, 0x29, 0x6e, 0xa3, 0xaa, 0x1c, + 0x61, 0x88, 0xe8, 0x75, 0x08, 0xe4, 0x7f, 0xeb, 0x09, 0xb4, 0x87, 0xbc, 0x7f, 0x2c, 0x05, 0xe6, + 0xfb, 0x51, 0x13, 0xc2, 0x5d, 0x24, 0x07, 0xde, 0xf5, 0x83, 0x54, 0x32, 0x9e, 0x2d, 0xdb, 0x3e, + 0xa0, 0x60, 0x74, 0x95, 0xc2, 0x7c, 0x40, 0x9a, 0xc7, 0xf0, 0x85, 0xa2, 0x40, 0xc4, 0x03, 0x8f, + 0x72, 0x20, 0xcf, 0x15, 0xe5, 0xed, 0x22, 0x25, 0x5b, 0xa4, 0xf6, 0x98, 0x34, 0xc3, 0x15, 0xea, + 0xf1, 0xb1, 0xde, 0x4d, 0xb1, 0xac, 0x2e, 0xf5, 0x7d, 0xf2, 0xeb, 0xca, 0xac, 0xb6, 0x3e, 0x4b, + 0x81, 0xb5, 0x7d, 0xbf, 0xd0, 0x96, 0x8e, 0xe1, 0x0b, 0xb4, 0x91, 0x63, 0xd4, 0x90, 0x93, 0xdf, + 0x14, 0x69, 0xdf, 0x4e, 0xd2, 0xdb, 0xa1, 0x61, 0x6b, 0xb4, 0x10, 0x2e, 0xb6, 0xd5, 0x03, 0x4e, + 0x7e, 0x7f, 0xb2, 0xad, 0x13, 0xe0, 0x53, 0x6d, 0x9d, 0x00, 0xc7, 0x3d, 0xf4, 0x6a, 0x8e, 0xf1, + 0xfa, 0x62, 0xed, 0xdc, 0x84, 0xa6, 0xe9, 0x37, 0x31, 0xf3, 0xc9, 0x1f, 0x0a, 0xf9, 0xae, 0x1d, + 0xd9, 0x91, 0xea, 0x4b, 0x2d, 0xce, 0xe8, 0x2f, 0x53, 0x6b, 0x1a, 0x7f, 0x8e, 0xb6, 0xc6, 0xfa, + 0x15, 0xfb, 0xe2, 0xb2, 0x38, 0x04, 0xf2, 0xa0, 0x3c, 0xde, 0x9c, 0xd1, 0xb6, 0xdc, 0xb5, 0x38, + 0x3f, 0xea, 0x97, 0xe8, 0x64, 0x06, 0x7f, 0x81, 0xb6, 0x73, 0xb2, 0x5a, 0x3d, 0x85, 0xfe, 0x53, + 0xa1, 0xdf, 0xb2, 0xa3, 0xf5, 0x0e, 0x8e, 0xb1, 0x31, 0x9d, 0x4a, 0xe1, 0x53, 0xb4, 0x96, 0xc3, + 0xc3, 0x20, 0xe5, 0xe4, 0x2f, 0x45, 0xdd, 0xb3, 0x53, 0xcf, 0x83, 0x94, 0x17, 0xe6, 0x28, 0x0b, + 0x1a, 0x92, 0x68, 0x4d, 0x91, 0xfe, 0x9e, 0x49, 0x12, 0xd6, 0x53, 0xa4, 0x2c, 0x68, 0x8e, 0x5e, + 0x92, 0xc4, 0x44, 0x7e, 0x5f, 0x99, 0x75, 0xf4, 0xa2, 0x66, 0x72, 0x22, 0x75, 0xcc, 0x4c, 0xa4, + 0xc4, 0xe8, 0x89, 0xfc, 0xa1, 0x32, 0x6b, 0x22, 0x45, 0x95, 0x65, 0x22, 0xf3, 0x70, 0xb1, 0x2d, + 0x31, 0x91, 0x3f, 0x3e, 0xd9, 0xd6, 0xe4, 0x44, 0xea, 0x18, 0xfe, 0x0a, 0xd5, 0xc7, 0x30, 0x72, + 0x50, 0x12, 0x60, 0x83, 0x20, 0x95, 0xff, 0x18, 0x7f, 0x52, 0xcc, 0xf7, 0x66, 0x30, 0x85, 0xfc, + 0xd2, 0xa8, 0x33, 0xfe, 0x2b, 0xd4, 0x9e, 0xc7, 0x03, 0xb4, 0x93, 0x7b, 0xe9, 0xd1, 0x19, 0x33, + 0xfb, 0x59, 0x99, 0xbd, 0x6f, 0x37, 0x53, 0x53, 0x32, 0xed, 0x46, 0xe8, 0x0c, 0x41, 0x73, 0x1d, + 0xad, 0x1e, 0x0f, 0x12, 0xfe, 0xad, 0x03, 0x69, 0x12, 0x47, 0x29, 0x34, 0x13, 0xb4, 0xf3, 0xc4, + 0x1f, 0x22, 0x8c, 0xd1, 0x82, 0xbc, 0x2e, 0x94, 0xe4, 0x75, 0x41, 0x3e, 0x8b, 0x6b, 0x84, 0xd9, + 0x4f, 0x7d, 0x8d, 0xc8, 0x7e, 0xe3, 0x3d, 0x54, 0x4b, 0x83, 0x41, 0x12, 0x82, 0xcb, 0xe3, 0x5b, + 0x50, 0xb7, 0x88, 0x8a, 0x53, 0x55, 0xb1, 0x2b, 0x11, 0xfa, 0x64, 0xeb, 0xfe, 0xdf, 0xc6, 0xdc, + 0xfd, 0x63, 0xa3, 0xf4, 0xf0, 0xd8, 0x28, 0xfd, 0xf3, 0xd8, 0x28, 0x7d, 0xf7, 0x5f, 0x63, 0xee, + 0x7a, 0x49, 0xde, 0x61, 0x8e, 0x5e, 0x04, 0x00, 0x00, 0xff, 0xff, 0xed, 0x36, 0xf0, 0x6f, 0x1b, + 0x09, 0x00, 0x00, +} diff --git a/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.proto b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.proto new file mode 100644 index 000000000..7111f4572 --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.proto @@ -0,0 +1,75 @@ +syntax = "proto3"; +package etcdserverpb; + +import "gogoproto/gogo.proto"; +import "etcdserver.proto"; +import "rpc.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; + +message RequestHeader { + uint64 ID = 1; + // username is a username that is associated with an auth token of gRPC connection + string username = 2; + // auth_revision is a revision number of auth.authStore. It is not related to mvcc + uint64 auth_revision = 3; +} + +// An InternalRaftRequest is the union of all requests which can be +// sent via raft. +message InternalRaftRequest { + RequestHeader header = 100; + uint64 ID = 1; + + Request v2 = 2; + + RangeRequest range = 3; + PutRequest put = 4; + DeleteRangeRequest delete_range = 5; + TxnRequest txn = 6; + CompactionRequest compaction = 7; + + LeaseGrantRequest lease_grant = 8; + LeaseRevokeRequest lease_revoke = 9; + + AlarmRequest alarm = 10; + + LeaseCheckpointRequest lease_checkpoint = 11; + + AuthEnableRequest auth_enable = 1000; + AuthDisableRequest auth_disable = 1011; + + InternalAuthenticateRequest authenticate = 1012; + + AuthUserAddRequest auth_user_add = 1100; + AuthUserDeleteRequest auth_user_delete = 1101; + AuthUserGetRequest auth_user_get = 1102; + AuthUserChangePasswordRequest auth_user_change_password = 1103; + AuthUserGrantRoleRequest auth_user_grant_role = 1104; + AuthUserRevokeRoleRequest auth_user_revoke_role = 1105; + AuthUserListRequest auth_user_list = 1106; + AuthRoleListRequest auth_role_list = 1107; + + AuthRoleAddRequest auth_role_add = 1200; + AuthRoleDeleteRequest auth_role_delete = 1201; + AuthRoleGetRequest auth_role_get = 1202; + AuthRoleGrantPermissionRequest auth_role_grant_permission = 1203; + AuthRoleRevokePermissionRequest auth_role_revoke_permission = 1204; +} + +message EmptyResponse { +} + +// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest? +// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing. +// For avoiding misusage the field, we have an internal version of AuthenticateRequest. +message InternalAuthenticateRequest { + string name = 1; + string password = 2; + + // simple_token is generated in API layer (etcdserver/v3_server.go) + string simple_token = 3; +} diff --git a/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go new file mode 100644 index 000000000..31e121ee0 --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go @@ -0,0 +1,183 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserverpb + +import ( + "fmt" + "strings" + + proto "github.com/golang/protobuf/proto" +) + +// InternalRaftStringer implements custom proto Stringer: +// redact password, replace value fields with value_size fields. +type InternalRaftStringer struct { + Request *InternalRaftRequest +} + +func (as *InternalRaftStringer) String() string { + switch { + case as.Request.LeaseGrant != nil: + return fmt.Sprintf("header:<%s> lease_grant:", + as.Request.Header.String(), + as.Request.LeaseGrant.TTL, + as.Request.LeaseGrant.ID, + ) + case as.Request.LeaseRevoke != nil: + return fmt.Sprintf("header:<%s> lease_revoke:", + as.Request.Header.String(), + as.Request.LeaseRevoke.ID, + ) + case as.Request.Authenticate != nil: + return fmt.Sprintf("header:<%s> authenticate:", + as.Request.Header.String(), + as.Request.Authenticate.Name, + as.Request.Authenticate.SimpleToken, + ) + case as.Request.AuthUserAdd != nil: + return fmt.Sprintf("header:<%s> auth_user_add:", + as.Request.Header.String(), + as.Request.AuthUserAdd.Name, + ) + case as.Request.AuthUserChangePassword != nil: + return fmt.Sprintf("header:<%s> auth_user_change_password:", + as.Request.Header.String(), + as.Request.AuthUserChangePassword.Name, + ) + case as.Request.Put != nil: + return fmt.Sprintf("header:<%s> put:<%s>", + as.Request.Header.String(), + NewLoggablePutRequest(as.Request.Put).String(), + ) + case as.Request.Txn != nil: + return fmt.Sprintf("header:<%s> txn:<%s>", + as.Request.Header.String(), + NewLoggableTxnRequest(as.Request.Txn).String(), + ) + default: + // nothing to redact + } + return as.Request.String() +} + +// txnRequestStringer implements a custom proto String to replace value bytes fields with value size +// fields in any nested txn and put operations. +type txnRequestStringer struct { + Request *TxnRequest +} + +func NewLoggableTxnRequest(request *TxnRequest) *txnRequestStringer { + return &txnRequestStringer{request} +} + +func (as *txnRequestStringer) String() string { + var compare []string + for _, c := range as.Request.Compare { + switch cv := c.TargetUnion.(type) { + case *Compare_Value: + compare = append(compare, newLoggableValueCompare(c, cv).String()) + default: + // nothing to redact + compare = append(compare, c.String()) + } + } + var success []string + for _, s := range as.Request.Success { + success = append(success, newLoggableRequestOp(s).String()) + } + var failure []string + for _, f := range as.Request.Failure { + failure = append(failure, newLoggableRequestOp(f).String()) + } + return fmt.Sprintf("compare:<%s> success:<%s> failure:<%s>", + strings.Join(compare, " "), + strings.Join(success, " "), + strings.Join(failure, " "), + ) +} + +// requestOpStringer implements a custom proto String to replace value bytes fields with value +// size fields in any nested txn and put operations. +type requestOpStringer struct { + Op *RequestOp +} + +func newLoggableRequestOp(op *RequestOp) *requestOpStringer { + return &requestOpStringer{op} +} + +func (as *requestOpStringer) String() string { + switch op := as.Op.Request.(type) { + case *RequestOp_RequestPut: + return fmt.Sprintf("request_put:<%s>", NewLoggablePutRequest(op.RequestPut).String()) + case *RequestOp_RequestTxn: + return fmt.Sprintf("request_txn:<%s>", NewLoggableTxnRequest(op.RequestTxn).String()) + default: + // nothing to redact + } + return as.Op.String() +} + +// loggableValueCompare implements a custom proto String for Compare.Value union member types to +// replace the value bytes field with a value size field. +// To preserve proto encoding of the key and range_end bytes, a faked out proto type is used here. +type loggableValueCompare struct { + Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult"` + Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget"` + Key []byte `protobuf:"bytes,3,opt,name=key,proto3"` + ValueSize int64 `protobuf:"varint,7,opt,name=value_size,proto3"` + RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,proto3"` +} + +func newLoggableValueCompare(c *Compare, cv *Compare_Value) *loggableValueCompare { + return &loggableValueCompare{ + c.Result, + c.Target, + c.Key, + int64(len(cv.Value)), + c.RangeEnd, + } +} + +func (m *loggableValueCompare) Reset() { *m = loggableValueCompare{} } +func (m *loggableValueCompare) String() string { return proto.CompactTextString(m) } +func (*loggableValueCompare) ProtoMessage() {} + +// loggablePutRequest implements a custom proto String to replace value bytes field with a value +// size field. +// To preserve proto encoding of the key bytes, a faked out proto type is used here. +type loggablePutRequest struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3"` + ValueSize int64 `protobuf:"varint,2,opt,name=value_size,proto3"` + Lease int64 `protobuf:"varint,3,opt,name=lease,proto3"` + PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,proto3"` + IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,proto3"` + IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,proto3"` +} + +func NewLoggablePutRequest(request *PutRequest) *loggablePutRequest { + return &loggablePutRequest{ + request.Key, + int64(len(request.Value)), + request.Lease, + request.PrevKv, + request.IgnoreValue, + request.IgnoreLease, + } +} + +func (m *loggablePutRequest) Reset() { *m = loggablePutRequest{} } +func (m *loggablePutRequest) String() string { return proto.CompactTextString(m) } +func (*loggablePutRequest) ProtoMessage() {} diff --git a/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.pb.go b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.pb.go new file mode 100644 index 000000000..6cbccc797 --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.pb.go @@ -0,0 +1,20088 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: rpc.proto + +package etcdserverpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + + mvccpb "go.etcd.io/etcd/mvcc/mvccpb" + + authpb "go.etcd.io/etcd/auth/authpb" + + context "golang.org/x/net/context" + + grpc "google.golang.org/grpc" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type AlarmType int32 + +const ( + AlarmType_NONE AlarmType = 0 + AlarmType_NOSPACE AlarmType = 1 + AlarmType_CORRUPT AlarmType = 2 +) + +var AlarmType_name = map[int32]string{ + 0: "NONE", + 1: "NOSPACE", + 2: "CORRUPT", +} +var AlarmType_value = map[string]int32{ + "NONE": 0, + "NOSPACE": 1, + "CORRUPT": 2, +} + +func (x AlarmType) String() string { + return proto.EnumName(AlarmType_name, int32(x)) +} +func (AlarmType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } + +type RangeRequest_SortOrder int32 + +const ( + RangeRequest_NONE RangeRequest_SortOrder = 0 + RangeRequest_ASCEND RangeRequest_SortOrder = 1 + RangeRequest_DESCEND RangeRequest_SortOrder = 2 +) + +var RangeRequest_SortOrder_name = map[int32]string{ + 0: "NONE", + 1: "ASCEND", + 2: "DESCEND", +} +var RangeRequest_SortOrder_value = map[string]int32{ + "NONE": 0, + "ASCEND": 1, + "DESCEND": 2, +} + +func (x RangeRequest_SortOrder) String() string { + return proto.EnumName(RangeRequest_SortOrder_name, int32(x)) +} +func (RangeRequest_SortOrder) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1, 0} } + +type RangeRequest_SortTarget int32 + +const ( + RangeRequest_KEY RangeRequest_SortTarget = 0 + RangeRequest_VERSION RangeRequest_SortTarget = 1 + RangeRequest_CREATE RangeRequest_SortTarget = 2 + RangeRequest_MOD RangeRequest_SortTarget = 3 + RangeRequest_VALUE RangeRequest_SortTarget = 4 +) + +var RangeRequest_SortTarget_name = map[int32]string{ + 0: "KEY", + 1: "VERSION", + 2: "CREATE", + 3: "MOD", + 4: "VALUE", +} +var RangeRequest_SortTarget_value = map[string]int32{ + "KEY": 0, + "VERSION": 1, + "CREATE": 2, + "MOD": 3, + "VALUE": 4, +} + +func (x RangeRequest_SortTarget) String() string { + return proto.EnumName(RangeRequest_SortTarget_name, int32(x)) +} +func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{1, 1} +} + +type Compare_CompareResult int32 + +const ( + Compare_EQUAL Compare_CompareResult = 0 + Compare_GREATER Compare_CompareResult = 1 + Compare_LESS Compare_CompareResult = 2 + Compare_NOT_EQUAL Compare_CompareResult = 3 +) + +var Compare_CompareResult_name = map[int32]string{ + 0: "EQUAL", + 1: "GREATER", + 2: "LESS", + 3: "NOT_EQUAL", +} +var Compare_CompareResult_value = map[string]int32{ + "EQUAL": 0, + "GREATER": 1, + "LESS": 2, + "NOT_EQUAL": 3, +} + +func (x Compare_CompareResult) String() string { + return proto.EnumName(Compare_CompareResult_name, int32(x)) +} +func (Compare_CompareResult) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9, 0} } + +type Compare_CompareTarget int32 + +const ( + Compare_VERSION Compare_CompareTarget = 0 + Compare_CREATE Compare_CompareTarget = 1 + Compare_MOD Compare_CompareTarget = 2 + Compare_VALUE Compare_CompareTarget = 3 + Compare_LEASE Compare_CompareTarget = 4 +) + +var Compare_CompareTarget_name = map[int32]string{ + 0: "VERSION", + 1: "CREATE", + 2: "MOD", + 3: "VALUE", + 4: "LEASE", +} +var Compare_CompareTarget_value = map[string]int32{ + "VERSION": 0, + "CREATE": 1, + "MOD": 2, + "VALUE": 3, + "LEASE": 4, +} + +func (x Compare_CompareTarget) String() string { + return proto.EnumName(Compare_CompareTarget_name, int32(x)) +} +func (Compare_CompareTarget) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9, 1} } + +type WatchCreateRequest_FilterType int32 + +const ( + // filter out put event. + WatchCreateRequest_NOPUT WatchCreateRequest_FilterType = 0 + // filter out delete event. + WatchCreateRequest_NODELETE WatchCreateRequest_FilterType = 1 +) + +var WatchCreateRequest_FilterType_name = map[int32]string{ + 0: "NOPUT", + 1: "NODELETE", +} +var WatchCreateRequest_FilterType_value = map[string]int32{ + "NOPUT": 0, + "NODELETE": 1, +} + +func (x WatchCreateRequest_FilterType) String() string { + return proto.EnumName(WatchCreateRequest_FilterType_name, int32(x)) +} +func (WatchCreateRequest_FilterType) EnumDescriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{21, 0} +} + +type AlarmRequest_AlarmAction int32 + +const ( + AlarmRequest_GET AlarmRequest_AlarmAction = 0 + AlarmRequest_ACTIVATE AlarmRequest_AlarmAction = 1 + AlarmRequest_DEACTIVATE AlarmRequest_AlarmAction = 2 +) + +var AlarmRequest_AlarmAction_name = map[int32]string{ + 0: "GET", + 1: "ACTIVATE", + 2: "DEACTIVATE", +} +var AlarmRequest_AlarmAction_value = map[string]int32{ + "GET": 0, + "ACTIVATE": 1, + "DEACTIVATE": 2, +} + +func (x AlarmRequest_AlarmAction) String() string { + return proto.EnumName(AlarmRequest_AlarmAction_name, int32(x)) +} +func (AlarmRequest_AlarmAction) EnumDescriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{54, 0} +} + +type ResponseHeader struct { + // cluster_id is the ID of the cluster which sent the response. + ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // member_id is the ID of the member which sent the response. + MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"` + // revision is the key-value store revision when the request was applied. + // For watch progress responses, the header.revision indicates progress. All future events + // recieved in this stream are guaranteed to have a higher revision number than the + // header.revision number. + Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"` + // raft_term is the raft term when the request was applied. + RaftTerm uint64 `protobuf:"varint,4,opt,name=raft_term,json=raftTerm,proto3" json:"raft_term,omitempty"` +} + +func (m *ResponseHeader) Reset() { *m = ResponseHeader{} } +func (m *ResponseHeader) String() string { return proto.CompactTextString(m) } +func (*ResponseHeader) ProtoMessage() {} +func (*ResponseHeader) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } + +func (m *ResponseHeader) GetClusterId() uint64 { + if m != nil { + return m.ClusterId + } + return 0 +} + +func (m *ResponseHeader) GetMemberId() uint64 { + if m != nil { + return m.MemberId + } + return 0 +} + +func (m *ResponseHeader) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *ResponseHeader) GetRaftTerm() uint64 { + if m != nil { + return m.RaftTerm + } + return 0 +} + +type RangeRequest struct { + // key is the first key for the range. If range_end is not given, the request only looks up key. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // range_end is the upper bound on the requested range [key, range_end). + // If range_end is '\0', the range is all keys >= key. + // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), + // then the range request gets all keys prefixed with key. + // If both key and range_end are '\0', then the range request returns all keys. + RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` + // limit is a limit on the number of keys returned for the request. When limit is set to 0, + // it is treated as no limit. + Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` + // revision is the point-in-time of the key-value store to use for the range. + // If revision is less or equal to zero, the range is over the newest key-value store. + // If the revision has been compacted, ErrCompacted is returned as a response. + Revision int64 `protobuf:"varint,4,opt,name=revision,proto3" json:"revision,omitempty"` + // sort_order is the order for returned sorted results. + SortOrder RangeRequest_SortOrder `protobuf:"varint,5,opt,name=sort_order,json=sortOrder,proto3,enum=etcdserverpb.RangeRequest_SortOrder" json:"sort_order,omitempty"` + // sort_target is the key-value field to use for sorting. + SortTarget RangeRequest_SortTarget `protobuf:"varint,6,opt,name=sort_target,json=sortTarget,proto3,enum=etcdserverpb.RangeRequest_SortTarget" json:"sort_target,omitempty"` + // serializable sets the range request to use serializable member-local reads. + // Range requests are linearizable by default; linearizable requests have higher + // latency and lower throughput than serializable requests but reflect the current + // consensus of the cluster. For better performance, in exchange for possible stale reads, + // a serializable range request is served locally without needing to reach consensus + // with other nodes in the cluster. + Serializable bool `protobuf:"varint,7,opt,name=serializable,proto3" json:"serializable,omitempty"` + // keys_only when set returns only the keys and not the values. + KeysOnly bool `protobuf:"varint,8,opt,name=keys_only,json=keysOnly,proto3" json:"keys_only,omitempty"` + // count_only when set returns only the count of the keys in the range. + CountOnly bool `protobuf:"varint,9,opt,name=count_only,json=countOnly,proto3" json:"count_only,omitempty"` + // min_mod_revision is the lower bound for returned key mod revisions; all keys with + // lesser mod revisions will be filtered away. + MinModRevision int64 `protobuf:"varint,10,opt,name=min_mod_revision,json=minModRevision,proto3" json:"min_mod_revision,omitempty"` + // max_mod_revision is the upper bound for returned key mod revisions; all keys with + // greater mod revisions will be filtered away. + MaxModRevision int64 `protobuf:"varint,11,opt,name=max_mod_revision,json=maxModRevision,proto3" json:"max_mod_revision,omitempty"` + // min_create_revision is the lower bound for returned key create revisions; all keys with + // lesser create revisions will be filtered away. + MinCreateRevision int64 `protobuf:"varint,12,opt,name=min_create_revision,json=minCreateRevision,proto3" json:"min_create_revision,omitempty"` + // max_create_revision is the upper bound for returned key create revisions; all keys with + // greater create revisions will be filtered away. + MaxCreateRevision int64 `protobuf:"varint,13,opt,name=max_create_revision,json=maxCreateRevision,proto3" json:"max_create_revision,omitempty"` +} + +func (m *RangeRequest) Reset() { *m = RangeRequest{} } +func (m *RangeRequest) String() string { return proto.CompactTextString(m) } +func (*RangeRequest) ProtoMessage() {} +func (*RangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1} } + +func (m *RangeRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *RangeRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *RangeRequest) GetLimit() int64 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *RangeRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *RangeRequest) GetSortOrder() RangeRequest_SortOrder { + if m != nil { + return m.SortOrder + } + return RangeRequest_NONE +} + +func (m *RangeRequest) GetSortTarget() RangeRequest_SortTarget { + if m != nil { + return m.SortTarget + } + return RangeRequest_KEY +} + +func (m *RangeRequest) GetSerializable() bool { + if m != nil { + return m.Serializable + } + return false +} + +func (m *RangeRequest) GetKeysOnly() bool { + if m != nil { + return m.KeysOnly + } + return false +} + +func (m *RangeRequest) GetCountOnly() bool { + if m != nil { + return m.CountOnly + } + return false +} + +func (m *RangeRequest) GetMinModRevision() int64 { + if m != nil { + return m.MinModRevision + } + return 0 +} + +func (m *RangeRequest) GetMaxModRevision() int64 { + if m != nil { + return m.MaxModRevision + } + return 0 +} + +func (m *RangeRequest) GetMinCreateRevision() int64 { + if m != nil { + return m.MinCreateRevision + } + return 0 +} + +func (m *RangeRequest) GetMaxCreateRevision() int64 { + if m != nil { + return m.MaxCreateRevision + } + return 0 +} + +type RangeResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // kvs is the list of key-value pairs matched by the range request. + // kvs is empty when count is requested. + Kvs []*mvccpb.KeyValue `protobuf:"bytes,2,rep,name=kvs" json:"kvs,omitempty"` + // more indicates if there are more keys to return in the requested range. + More bool `protobuf:"varint,3,opt,name=more,proto3" json:"more,omitempty"` + // count is set to the number of keys within the range when requested. + Count int64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` +} + +func (m *RangeResponse) Reset() { *m = RangeResponse{} } +func (m *RangeResponse) String() string { return proto.CompactTextString(m) } +func (*RangeResponse) ProtoMessage() {} +func (*RangeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{2} } + +func (m *RangeResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *RangeResponse) GetKvs() []*mvccpb.KeyValue { + if m != nil { + return m.Kvs + } + return nil +} + +func (m *RangeResponse) GetMore() bool { + if m != nil { + return m.More + } + return false +} + +func (m *RangeResponse) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +type PutRequest struct { + // key is the key, in bytes, to put into the key-value store. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // value is the value, in bytes, to associate with the key in the key-value store. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + // lease is the lease ID to associate with the key in the key-value store. A lease + // value of 0 indicates no lease. + Lease int64 `protobuf:"varint,3,opt,name=lease,proto3" json:"lease,omitempty"` + // If prev_kv is set, etcd gets the previous key-value pair before changing it. + // The previous key-value pair will be returned in the put response. + PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` + // If ignore_value is set, etcd updates the key using its current value. + // Returns an error if the key does not exist. + IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,json=ignoreValue,proto3" json:"ignore_value,omitempty"` + // If ignore_lease is set, etcd updates the key using its current lease. + // Returns an error if the key does not exist. + IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"` +} + +func (m *PutRequest) Reset() { *m = PutRequest{} } +func (m *PutRequest) String() string { return proto.CompactTextString(m) } +func (*PutRequest) ProtoMessage() {} +func (*PutRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{3} } + +func (m *PutRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *PutRequest) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *PutRequest) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + +func (m *PutRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + +func (m *PutRequest) GetIgnoreValue() bool { + if m != nil { + return m.IgnoreValue + } + return false +} + +func (m *PutRequest) GetIgnoreLease() bool { + if m != nil { + return m.IgnoreLease + } + return false +} + +type PutResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // if prev_kv is set in the request, the previous key-value pair will be returned. + PrevKv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"` +} + +func (m *PutResponse) Reset() { *m = PutResponse{} } +func (m *PutResponse) String() string { return proto.CompactTextString(m) } +func (*PutResponse) ProtoMessage() {} +func (*PutResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{4} } + +func (m *PutResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *PutResponse) GetPrevKv() *mvccpb.KeyValue { + if m != nil { + return m.PrevKv + } + return nil +} + +type DeleteRangeRequest struct { + // key is the first key to delete in the range. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // range_end is the key following the last key to delete for the range [key, range_end). + // If range_end is not given, the range is defined to contain only the key argument. + // If range_end is one bit larger than the given key, then the range is all the keys + // with the prefix (the given key). + // If range_end is '\0', the range is all keys greater than or equal to the key argument. + RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` + // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. + // The previous key-value pairs will be returned in the delete response. + PrevKv bool `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` +} + +func (m *DeleteRangeRequest) Reset() { *m = DeleteRangeRequest{} } +func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRangeRequest) ProtoMessage() {} +func (*DeleteRangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{5} } + +func (m *DeleteRangeRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *DeleteRangeRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *DeleteRangeRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + +type DeleteRangeResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // deleted is the number of keys deleted by the delete range request. + Deleted int64 `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted,omitempty"` + // if prev_kv is set in the request, the previous key-value pairs will be returned. + PrevKvs []*mvccpb.KeyValue `protobuf:"bytes,3,rep,name=prev_kvs,json=prevKvs" json:"prev_kvs,omitempty"` +} + +func (m *DeleteRangeResponse) Reset() { *m = DeleteRangeResponse{} } +func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteRangeResponse) ProtoMessage() {} +func (*DeleteRangeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{6} } + +func (m *DeleteRangeResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *DeleteRangeResponse) GetDeleted() int64 { + if m != nil { + return m.Deleted + } + return 0 +} + +func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue { + if m != nil { + return m.PrevKvs + } + return nil +} + +type RequestOp struct { + // request is a union of request types accepted by a transaction. + // + // Types that are valid to be assigned to Request: + // *RequestOp_RequestRange + // *RequestOp_RequestPut + // *RequestOp_RequestDeleteRange + // *RequestOp_RequestTxn + Request isRequestOp_Request `protobuf_oneof:"request"` +} + +func (m *RequestOp) Reset() { *m = RequestOp{} } +func (m *RequestOp) String() string { return proto.CompactTextString(m) } +func (*RequestOp) ProtoMessage() {} +func (*RequestOp) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{7} } + +type isRequestOp_Request interface { + isRequestOp_Request() + MarshalTo([]byte) (int, error) + Size() int +} + +type RequestOp_RequestRange struct { + RequestRange *RangeRequest `protobuf:"bytes,1,opt,name=request_range,json=requestRange,oneof"` +} +type RequestOp_RequestPut struct { + RequestPut *PutRequest `protobuf:"bytes,2,opt,name=request_put,json=requestPut,oneof"` +} +type RequestOp_RequestDeleteRange struct { + RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,json=requestDeleteRange,oneof"` +} +type RequestOp_RequestTxn struct { + RequestTxn *TxnRequest `protobuf:"bytes,4,opt,name=request_txn,json=requestTxn,oneof"` +} + +func (*RequestOp_RequestRange) isRequestOp_Request() {} +func (*RequestOp_RequestPut) isRequestOp_Request() {} +func (*RequestOp_RequestDeleteRange) isRequestOp_Request() {} +func (*RequestOp_RequestTxn) isRequestOp_Request() {} + +func (m *RequestOp) GetRequest() isRequestOp_Request { + if m != nil { + return m.Request + } + return nil +} + +func (m *RequestOp) GetRequestRange() *RangeRequest { + if x, ok := m.GetRequest().(*RequestOp_RequestRange); ok { + return x.RequestRange + } + return nil +} + +func (m *RequestOp) GetRequestPut() *PutRequest { + if x, ok := m.GetRequest().(*RequestOp_RequestPut); ok { + return x.RequestPut + } + return nil +} + +func (m *RequestOp) GetRequestDeleteRange() *DeleteRangeRequest { + if x, ok := m.GetRequest().(*RequestOp_RequestDeleteRange); ok { + return x.RequestDeleteRange + } + return nil +} + +func (m *RequestOp) GetRequestTxn() *TxnRequest { + if x, ok := m.GetRequest().(*RequestOp_RequestTxn); ok { + return x.RequestTxn + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*RequestOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _RequestOp_OneofMarshaler, _RequestOp_OneofUnmarshaler, _RequestOp_OneofSizer, []interface{}{ + (*RequestOp_RequestRange)(nil), + (*RequestOp_RequestPut)(nil), + (*RequestOp_RequestDeleteRange)(nil), + (*RequestOp_RequestTxn)(nil), + } +} + +func _RequestOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*RequestOp) + // request + switch x := m.Request.(type) { + case *RequestOp_RequestRange: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RequestRange); err != nil { + return err + } + case *RequestOp_RequestPut: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RequestPut); err != nil { + return err + } + case *RequestOp_RequestDeleteRange: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RequestDeleteRange); err != nil { + return err + } + case *RequestOp_RequestTxn: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RequestTxn); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("RequestOp.Request has unexpected type %T", x) + } + return nil +} + +func _RequestOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*RequestOp) + switch tag { + case 1: // request.request_range + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RangeRequest) + err := b.DecodeMessage(msg) + m.Request = &RequestOp_RequestRange{msg} + return true, err + case 2: // request.request_put + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PutRequest) + err := b.DecodeMessage(msg) + m.Request = &RequestOp_RequestPut{msg} + return true, err + case 3: // request.request_delete_range + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DeleteRangeRequest) + err := b.DecodeMessage(msg) + m.Request = &RequestOp_RequestDeleteRange{msg} + return true, err + case 4: // request.request_txn + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TxnRequest) + err := b.DecodeMessage(msg) + m.Request = &RequestOp_RequestTxn{msg} + return true, err + default: + return false, nil + } +} + +func _RequestOp_OneofSizer(msg proto.Message) (n int) { + m := msg.(*RequestOp) + // request + switch x := m.Request.(type) { + case *RequestOp_RequestRange: + s := proto.Size(x.RequestRange) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RequestOp_RequestPut: + s := proto.Size(x.RequestPut) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RequestOp_RequestDeleteRange: + s := proto.Size(x.RequestDeleteRange) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RequestOp_RequestTxn: + s := proto.Size(x.RequestTxn) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ResponseOp struct { + // response is a union of response types returned by a transaction. + // + // Types that are valid to be assigned to Response: + // *ResponseOp_ResponseRange + // *ResponseOp_ResponsePut + // *ResponseOp_ResponseDeleteRange + // *ResponseOp_ResponseTxn + Response isResponseOp_Response `protobuf_oneof:"response"` +} + +func (m *ResponseOp) Reset() { *m = ResponseOp{} } +func (m *ResponseOp) String() string { return proto.CompactTextString(m) } +func (*ResponseOp) ProtoMessage() {} +func (*ResponseOp) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{8} } + +type isResponseOp_Response interface { + isResponseOp_Response() + MarshalTo([]byte) (int, error) + Size() int +} + +type ResponseOp_ResponseRange struct { + ResponseRange *RangeResponse `protobuf:"bytes,1,opt,name=response_range,json=responseRange,oneof"` +} +type ResponseOp_ResponsePut struct { + ResponsePut *PutResponse `protobuf:"bytes,2,opt,name=response_put,json=responsePut,oneof"` +} +type ResponseOp_ResponseDeleteRange struct { + ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,json=responseDeleteRange,oneof"` +} +type ResponseOp_ResponseTxn struct { + ResponseTxn *TxnResponse `protobuf:"bytes,4,opt,name=response_txn,json=responseTxn,oneof"` +} + +func (*ResponseOp_ResponseRange) isResponseOp_Response() {} +func (*ResponseOp_ResponsePut) isResponseOp_Response() {} +func (*ResponseOp_ResponseDeleteRange) isResponseOp_Response() {} +func (*ResponseOp_ResponseTxn) isResponseOp_Response() {} + +func (m *ResponseOp) GetResponse() isResponseOp_Response { + if m != nil { + return m.Response + } + return nil +} + +func (m *ResponseOp) GetResponseRange() *RangeResponse { + if x, ok := m.GetResponse().(*ResponseOp_ResponseRange); ok { + return x.ResponseRange + } + return nil +} + +func (m *ResponseOp) GetResponsePut() *PutResponse { + if x, ok := m.GetResponse().(*ResponseOp_ResponsePut); ok { + return x.ResponsePut + } + return nil +} + +func (m *ResponseOp) GetResponseDeleteRange() *DeleteRangeResponse { + if x, ok := m.GetResponse().(*ResponseOp_ResponseDeleteRange); ok { + return x.ResponseDeleteRange + } + return nil +} + +func (m *ResponseOp) GetResponseTxn() *TxnResponse { + if x, ok := m.GetResponse().(*ResponseOp_ResponseTxn); ok { + return x.ResponseTxn + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ResponseOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ResponseOp_OneofMarshaler, _ResponseOp_OneofUnmarshaler, _ResponseOp_OneofSizer, []interface{}{ + (*ResponseOp_ResponseRange)(nil), + (*ResponseOp_ResponsePut)(nil), + (*ResponseOp_ResponseDeleteRange)(nil), + (*ResponseOp_ResponseTxn)(nil), + } +} + +func _ResponseOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ResponseOp) + // response + switch x := m.Response.(type) { + case *ResponseOp_ResponseRange: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ResponseRange); err != nil { + return err + } + case *ResponseOp_ResponsePut: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ResponsePut); err != nil { + return err + } + case *ResponseOp_ResponseDeleteRange: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ResponseDeleteRange); err != nil { + return err + } + case *ResponseOp_ResponseTxn: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ResponseTxn); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ResponseOp.Response has unexpected type %T", x) + } + return nil +} + +func _ResponseOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ResponseOp) + switch tag { + case 1: // response.response_range + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RangeResponse) + err := b.DecodeMessage(msg) + m.Response = &ResponseOp_ResponseRange{msg} + return true, err + case 2: // response.response_put + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PutResponse) + err := b.DecodeMessage(msg) + m.Response = &ResponseOp_ResponsePut{msg} + return true, err + case 3: // response.response_delete_range + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DeleteRangeResponse) + err := b.DecodeMessage(msg) + m.Response = &ResponseOp_ResponseDeleteRange{msg} + return true, err + case 4: // response.response_txn + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TxnResponse) + err := b.DecodeMessage(msg) + m.Response = &ResponseOp_ResponseTxn{msg} + return true, err + default: + return false, nil + } +} + +func _ResponseOp_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ResponseOp) + // response + switch x := m.Response.(type) { + case *ResponseOp_ResponseRange: + s := proto.Size(x.ResponseRange) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ResponseOp_ResponsePut: + s := proto.Size(x.ResponsePut) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ResponseOp_ResponseDeleteRange: + s := proto.Size(x.ResponseDeleteRange) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ResponseOp_ResponseTxn: + s := proto.Size(x.ResponseTxn) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Compare struct { + // result is logical comparison operation for this comparison. + Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult" json:"result,omitempty"` + // target is the key-value field to inspect for the comparison. + Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget" json:"target,omitempty"` + // key is the subject key for the comparison operation. + Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // Types that are valid to be assigned to TargetUnion: + // *Compare_Version + // *Compare_CreateRevision + // *Compare_ModRevision + // *Compare_Value + // *Compare_Lease + TargetUnion isCompare_TargetUnion `protobuf_oneof:"target_union"` + // range_end compares the given target to all keys in the range [key, range_end). + // See RangeRequest for more details on key ranges. + RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` +} + +func (m *Compare) Reset() { *m = Compare{} } +func (m *Compare) String() string { return proto.CompactTextString(m) } +func (*Compare) ProtoMessage() {} +func (*Compare) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9} } + +type isCompare_TargetUnion interface { + isCompare_TargetUnion() + MarshalTo([]byte) (int, error) + Size() int +} + +type Compare_Version struct { + Version int64 `protobuf:"varint,4,opt,name=version,proto3,oneof"` +} +type Compare_CreateRevision struct { + CreateRevision int64 `protobuf:"varint,5,opt,name=create_revision,json=createRevision,proto3,oneof"` +} +type Compare_ModRevision struct { + ModRevision int64 `protobuf:"varint,6,opt,name=mod_revision,json=modRevision,proto3,oneof"` +} +type Compare_Value struct { + Value []byte `protobuf:"bytes,7,opt,name=value,proto3,oneof"` +} +type Compare_Lease struct { + Lease int64 `protobuf:"varint,8,opt,name=lease,proto3,oneof"` +} + +func (*Compare_Version) isCompare_TargetUnion() {} +func (*Compare_CreateRevision) isCompare_TargetUnion() {} +func (*Compare_ModRevision) isCompare_TargetUnion() {} +func (*Compare_Value) isCompare_TargetUnion() {} +func (*Compare_Lease) isCompare_TargetUnion() {} + +func (m *Compare) GetTargetUnion() isCompare_TargetUnion { + if m != nil { + return m.TargetUnion + } + return nil +} + +func (m *Compare) GetResult() Compare_CompareResult { + if m != nil { + return m.Result + } + return Compare_EQUAL +} + +func (m *Compare) GetTarget() Compare_CompareTarget { + if m != nil { + return m.Target + } + return Compare_VERSION +} + +func (m *Compare) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *Compare) GetVersion() int64 { + if x, ok := m.GetTargetUnion().(*Compare_Version); ok { + return x.Version + } + return 0 +} + +func (m *Compare) GetCreateRevision() int64 { + if x, ok := m.GetTargetUnion().(*Compare_CreateRevision); ok { + return x.CreateRevision + } + return 0 +} + +func (m *Compare) GetModRevision() int64 { + if x, ok := m.GetTargetUnion().(*Compare_ModRevision); ok { + return x.ModRevision + } + return 0 +} + +func (m *Compare) GetValue() []byte { + if x, ok := m.GetTargetUnion().(*Compare_Value); ok { + return x.Value + } + return nil +} + +func (m *Compare) GetLease() int64 { + if x, ok := m.GetTargetUnion().(*Compare_Lease); ok { + return x.Lease + } + return 0 +} + +func (m *Compare) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Compare) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Compare_OneofMarshaler, _Compare_OneofUnmarshaler, _Compare_OneofSizer, []interface{}{ + (*Compare_Version)(nil), + (*Compare_CreateRevision)(nil), + (*Compare_ModRevision)(nil), + (*Compare_Value)(nil), + (*Compare_Lease)(nil), + } +} + +func _Compare_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Compare) + // target_union + switch x := m.TargetUnion.(type) { + case *Compare_Version: + _ = b.EncodeVarint(4<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.Version)) + case *Compare_CreateRevision: + _ = b.EncodeVarint(5<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.CreateRevision)) + case *Compare_ModRevision: + _ = b.EncodeVarint(6<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.ModRevision)) + case *Compare_Value: + _ = b.EncodeVarint(7<<3 | proto.WireBytes) + _ = b.EncodeRawBytes(x.Value) + case *Compare_Lease: + _ = b.EncodeVarint(8<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.Lease)) + case nil: + default: + return fmt.Errorf("Compare.TargetUnion has unexpected type %T", x) + } + return nil +} + +func _Compare_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Compare) + switch tag { + case 4: // target_union.version + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.TargetUnion = &Compare_Version{int64(x)} + return true, err + case 5: // target_union.create_revision + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.TargetUnion = &Compare_CreateRevision{int64(x)} + return true, err + case 6: // target_union.mod_revision + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.TargetUnion = &Compare_ModRevision{int64(x)} + return true, err + case 7: // target_union.value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.TargetUnion = &Compare_Value{x} + return true, err + case 8: // target_union.lease + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.TargetUnion = &Compare_Lease{int64(x)} + return true, err + default: + return false, nil + } +} + +func _Compare_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Compare) + // target_union + switch x := m.TargetUnion.(type) { + case *Compare_Version: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Version)) + case *Compare_CreateRevision: + n += proto.SizeVarint(5<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.CreateRevision)) + case *Compare_ModRevision: + n += proto.SizeVarint(6<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.ModRevision)) + case *Compare_Value: + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Value))) + n += len(x.Value) + case *Compare_Lease: + n += proto.SizeVarint(8<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Lease)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// From google paxosdb paper: +// Our implementation hinges around a powerful primitive which we call MultiOp. All other database +// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically +// and consists of three components: +// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check +// for the absence or presence of a value, or compare with a given value. Two different tests in the guard +// may apply to the same or different entries in the database. All tests in the guard are applied and +// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise +// it executes f op (see item 3 below). +// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or +// lookup operation, and applies to a single database entry. Two different operations in the list may apply +// to the same or different entries in the database. These operations are executed +// if guard evaluates to +// true. +// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false. +type TxnRequest struct { + // compare is a list of predicates representing a conjunction of terms. + // If the comparisons succeed, then the success requests will be processed in order, + // and the response will contain their respective responses in order. + // If the comparisons fail, then the failure requests will be processed in order, + // and the response will contain their respective responses in order. + Compare []*Compare `protobuf:"bytes,1,rep,name=compare" json:"compare,omitempty"` + // success is a list of requests which will be applied when compare evaluates to true. + Success []*RequestOp `protobuf:"bytes,2,rep,name=success" json:"success,omitempty"` + // failure is a list of requests which will be applied when compare evaluates to false. + Failure []*RequestOp `protobuf:"bytes,3,rep,name=failure" json:"failure,omitempty"` +} + +func (m *TxnRequest) Reset() { *m = TxnRequest{} } +func (m *TxnRequest) String() string { return proto.CompactTextString(m) } +func (*TxnRequest) ProtoMessage() {} +func (*TxnRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{10} } + +func (m *TxnRequest) GetCompare() []*Compare { + if m != nil { + return m.Compare + } + return nil +} + +func (m *TxnRequest) GetSuccess() []*RequestOp { + if m != nil { + return m.Success + } + return nil +} + +func (m *TxnRequest) GetFailure() []*RequestOp { + if m != nil { + return m.Failure + } + return nil +} + +type TxnResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // succeeded is set to true if the compare evaluated to true or false otherwise. + Succeeded bool `protobuf:"varint,2,opt,name=succeeded,proto3" json:"succeeded,omitempty"` + // responses is a list of responses corresponding to the results from applying + // success if succeeded is true or failure if succeeded is false. + Responses []*ResponseOp `protobuf:"bytes,3,rep,name=responses" json:"responses,omitempty"` +} + +func (m *TxnResponse) Reset() { *m = TxnResponse{} } +func (m *TxnResponse) String() string { return proto.CompactTextString(m) } +func (*TxnResponse) ProtoMessage() {} +func (*TxnResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{11} } + +func (m *TxnResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *TxnResponse) GetSucceeded() bool { + if m != nil { + return m.Succeeded + } + return false +} + +func (m *TxnResponse) GetResponses() []*ResponseOp { + if m != nil { + return m.Responses + } + return nil +} + +// CompactionRequest compacts the key-value store up to a given revision. All superseded keys +// with a revision less than the compaction revision will be removed. +type CompactionRequest struct { + // revision is the key-value store revision for the compaction operation. + Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"` + // physical is set so the RPC will wait until the compaction is physically + // applied to the local database such that compacted entries are totally + // removed from the backend database. + Physical bool `protobuf:"varint,2,opt,name=physical,proto3" json:"physical,omitempty"` +} + +func (m *CompactionRequest) Reset() { *m = CompactionRequest{} } +func (m *CompactionRequest) String() string { return proto.CompactTextString(m) } +func (*CompactionRequest) ProtoMessage() {} +func (*CompactionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{12} } + +func (m *CompactionRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *CompactionRequest) GetPhysical() bool { + if m != nil { + return m.Physical + } + return false +} + +type CompactionResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *CompactionResponse) Reset() { *m = CompactionResponse{} } +func (m *CompactionResponse) String() string { return proto.CompactTextString(m) } +func (*CompactionResponse) ProtoMessage() {} +func (*CompactionResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{13} } + +func (m *CompactionResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type HashRequest struct { +} + +func (m *HashRequest) Reset() { *m = HashRequest{} } +func (m *HashRequest) String() string { return proto.CompactTextString(m) } +func (*HashRequest) ProtoMessage() {} +func (*HashRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{14} } + +type HashKVRequest struct { + // revision is the key-value store revision for the hash operation. + Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"` +} + +func (m *HashKVRequest) Reset() { *m = HashKVRequest{} } +func (m *HashKVRequest) String() string { return proto.CompactTextString(m) } +func (*HashKVRequest) ProtoMessage() {} +func (*HashKVRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{15} } + +func (m *HashKVRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +type HashKVResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // hash is the hash value computed from the responding member's MVCC keys up to a given revision. + Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"` + // compact_revision is the compacted revision of key-value store when hash begins. + CompactRevision int64 `protobuf:"varint,3,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` +} + +func (m *HashKVResponse) Reset() { *m = HashKVResponse{} } +func (m *HashKVResponse) String() string { return proto.CompactTextString(m) } +func (*HashKVResponse) ProtoMessage() {} +func (*HashKVResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{16} } + +func (m *HashKVResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *HashKVResponse) GetHash() uint32 { + if m != nil { + return m.Hash + } + return 0 +} + +func (m *HashKVResponse) GetCompactRevision() int64 { + if m != nil { + return m.CompactRevision + } + return 0 +} + +type HashResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // hash is the hash value computed from the responding member's KV's backend. + Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"` +} + +func (m *HashResponse) Reset() { *m = HashResponse{} } +func (m *HashResponse) String() string { return proto.CompactTextString(m) } +func (*HashResponse) ProtoMessage() {} +func (*HashResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{17} } + +func (m *HashResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *HashResponse) GetHash() uint32 { + if m != nil { + return m.Hash + } + return 0 +} + +type SnapshotRequest struct { +} + +func (m *SnapshotRequest) Reset() { *m = SnapshotRequest{} } +func (m *SnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*SnapshotRequest) ProtoMessage() {} +func (*SnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{18} } + +type SnapshotResponse struct { + // header has the current key-value store information. The first header in the snapshot + // stream indicates the point in time of the snapshot. + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // remaining_bytes is the number of blob bytes to be sent after this message + RemainingBytes uint64 `protobuf:"varint,2,opt,name=remaining_bytes,json=remainingBytes,proto3" json:"remaining_bytes,omitempty"` + // blob contains the next chunk of the snapshot in the snapshot stream. + Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"` +} + +func (m *SnapshotResponse) Reset() { *m = SnapshotResponse{} } +func (m *SnapshotResponse) String() string { return proto.CompactTextString(m) } +func (*SnapshotResponse) ProtoMessage() {} +func (*SnapshotResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{19} } + +func (m *SnapshotResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *SnapshotResponse) GetRemainingBytes() uint64 { + if m != nil { + return m.RemainingBytes + } + return 0 +} + +func (m *SnapshotResponse) GetBlob() []byte { + if m != nil { + return m.Blob + } + return nil +} + +type WatchRequest struct { + // request_union is a request to either create a new watcher or cancel an existing watcher. + // + // Types that are valid to be assigned to RequestUnion: + // *WatchRequest_CreateRequest + // *WatchRequest_CancelRequest + // *WatchRequest_ProgressRequest + RequestUnion isWatchRequest_RequestUnion `protobuf_oneof:"request_union"` +} + +func (m *WatchRequest) Reset() { *m = WatchRequest{} } +func (m *WatchRequest) String() string { return proto.CompactTextString(m) } +func (*WatchRequest) ProtoMessage() {} +func (*WatchRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{20} } + +type isWatchRequest_RequestUnion interface { + isWatchRequest_RequestUnion() + MarshalTo([]byte) (int, error) + Size() int +} + +type WatchRequest_CreateRequest struct { + CreateRequest *WatchCreateRequest `protobuf:"bytes,1,opt,name=create_request,json=createRequest,oneof"` +} +type WatchRequest_CancelRequest struct { + CancelRequest *WatchCancelRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,oneof"` +} +type WatchRequest_ProgressRequest struct { + ProgressRequest *WatchProgressRequest `protobuf:"bytes,3,opt,name=progress_request,json=progressRequest,oneof"` +} + +func (*WatchRequest_CreateRequest) isWatchRequest_RequestUnion() {} +func (*WatchRequest_CancelRequest) isWatchRequest_RequestUnion() {} +func (*WatchRequest_ProgressRequest) isWatchRequest_RequestUnion() {} + +func (m *WatchRequest) GetRequestUnion() isWatchRequest_RequestUnion { + if m != nil { + return m.RequestUnion + } + return nil +} + +func (m *WatchRequest) GetCreateRequest() *WatchCreateRequest { + if x, ok := m.GetRequestUnion().(*WatchRequest_CreateRequest); ok { + return x.CreateRequest + } + return nil +} + +func (m *WatchRequest) GetCancelRequest() *WatchCancelRequest { + if x, ok := m.GetRequestUnion().(*WatchRequest_CancelRequest); ok { + return x.CancelRequest + } + return nil +} + +func (m *WatchRequest) GetProgressRequest() *WatchProgressRequest { + if x, ok := m.GetRequestUnion().(*WatchRequest_ProgressRequest); ok { + return x.ProgressRequest + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*WatchRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _WatchRequest_OneofMarshaler, _WatchRequest_OneofUnmarshaler, _WatchRequest_OneofSizer, []interface{}{ + (*WatchRequest_CreateRequest)(nil), + (*WatchRequest_CancelRequest)(nil), + (*WatchRequest_ProgressRequest)(nil), + } +} + +func _WatchRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*WatchRequest) + // request_union + switch x := m.RequestUnion.(type) { + case *WatchRequest_CreateRequest: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CreateRequest); err != nil { + return err + } + case *WatchRequest_CancelRequest: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CancelRequest); err != nil { + return err + } + case *WatchRequest_ProgressRequest: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ProgressRequest); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("WatchRequest.RequestUnion has unexpected type %T", x) + } + return nil +} + +func _WatchRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*WatchRequest) + switch tag { + case 1: // request_union.create_request + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(WatchCreateRequest) + err := b.DecodeMessage(msg) + m.RequestUnion = &WatchRequest_CreateRequest{msg} + return true, err + case 2: // request_union.cancel_request + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(WatchCancelRequest) + err := b.DecodeMessage(msg) + m.RequestUnion = &WatchRequest_CancelRequest{msg} + return true, err + case 3: // request_union.progress_request + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(WatchProgressRequest) + err := b.DecodeMessage(msg) + m.RequestUnion = &WatchRequest_ProgressRequest{msg} + return true, err + default: + return false, nil + } +} + +func _WatchRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*WatchRequest) + // request_union + switch x := m.RequestUnion.(type) { + case *WatchRequest_CreateRequest: + s := proto.Size(x.CreateRequest) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *WatchRequest_CancelRequest: + s := proto.Size(x.CancelRequest) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *WatchRequest_ProgressRequest: + s := proto.Size(x.ProgressRequest) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type WatchCreateRequest struct { + // key is the key to register for watching. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // range_end is the end of the range [key, range_end) to watch. If range_end is not given, + // only the key argument is watched. If range_end is equal to '\0', all keys greater than + // or equal to the key argument are watched. + // If the range_end is one bit larger than the given key, + // then all keys with the prefix (the given key) will be watched. + RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` + // start_revision is an optional revision to watch from (inclusive). No start_revision is "now". + StartRevision int64 `protobuf:"varint,3,opt,name=start_revision,json=startRevision,proto3" json:"start_revision,omitempty"` + // progress_notify is set so that the etcd server will periodically send a WatchResponse with + // no events to the new watcher if there are no recent events. It is useful when clients + // wish to recover a disconnected watcher starting from a recent known revision. + // The etcd server may decide how often it will send notifications based on current load. + ProgressNotify bool `protobuf:"varint,4,opt,name=progress_notify,json=progressNotify,proto3" json:"progress_notify,omitempty"` + // filters filter the events at server side before it sends back to the watcher. + Filters []WatchCreateRequest_FilterType `protobuf:"varint,5,rep,packed,name=filters,enum=etcdserverpb.WatchCreateRequest_FilterType" json:"filters,omitempty"` + // If prev_kv is set, created watcher gets the previous KV before the event happens. + // If the previous KV is already compacted, nothing will be returned. + PrevKv bool `protobuf:"varint,6,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` + // If watch_id is provided and non-zero, it will be assigned to this watcher. + // Since creating a watcher in etcd is not a synchronous operation, + // this can be used ensure that ordering is correct when creating multiple + // watchers on the same stream. Creating a watcher with an ID already in + // use on the stream will cause an error to be returned. + WatchId int64 `protobuf:"varint,7,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` + // fragment enables splitting large revisions into multiple watch responses. + Fragment bool `protobuf:"varint,8,opt,name=fragment,proto3" json:"fragment,omitempty"` +} + +func (m *WatchCreateRequest) Reset() { *m = WatchCreateRequest{} } +func (m *WatchCreateRequest) String() string { return proto.CompactTextString(m) } +func (*WatchCreateRequest) ProtoMessage() {} +func (*WatchCreateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{21} } + +func (m *WatchCreateRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *WatchCreateRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *WatchCreateRequest) GetStartRevision() int64 { + if m != nil { + return m.StartRevision + } + return 0 +} + +func (m *WatchCreateRequest) GetProgressNotify() bool { + if m != nil { + return m.ProgressNotify + } + return false +} + +func (m *WatchCreateRequest) GetFilters() []WatchCreateRequest_FilterType { + if m != nil { + return m.Filters + } + return nil +} + +func (m *WatchCreateRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + +func (m *WatchCreateRequest) GetWatchId() int64 { + if m != nil { + return m.WatchId + } + return 0 +} + +func (m *WatchCreateRequest) GetFragment() bool { + if m != nil { + return m.Fragment + } + return false +} + +type WatchCancelRequest struct { + // watch_id is the watcher id to cancel so that no more events are transmitted. + WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` +} + +func (m *WatchCancelRequest) Reset() { *m = WatchCancelRequest{} } +func (m *WatchCancelRequest) String() string { return proto.CompactTextString(m) } +func (*WatchCancelRequest) ProtoMessage() {} +func (*WatchCancelRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{22} } + +func (m *WatchCancelRequest) GetWatchId() int64 { + if m != nil { + return m.WatchId + } + return 0 +} + +// Requests the a watch stream progress status be sent in the watch response stream as soon as +// possible. +type WatchProgressRequest struct { +} + +func (m *WatchProgressRequest) Reset() { *m = WatchProgressRequest{} } +func (m *WatchProgressRequest) String() string { return proto.CompactTextString(m) } +func (*WatchProgressRequest) ProtoMessage() {} +func (*WatchProgressRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{23} } + +type WatchResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // watch_id is the ID of the watcher that corresponds to the response. + WatchId int64 `protobuf:"varint,2,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` + // created is set to true if the response is for a create watch request. + // The client should record the watch_id and expect to receive events for + // the created watcher from the same stream. + // All events sent to the created watcher will attach with the same watch_id. + Created bool `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"` + // canceled is set to true if the response is for a cancel watch request. + // No further events will be sent to the canceled watcher. + Canceled bool `protobuf:"varint,4,opt,name=canceled,proto3" json:"canceled,omitempty"` + // compact_revision is set to the minimum index if a watcher tries to watch + // at a compacted index. + // + // This happens when creating a watcher at a compacted revision or the watcher cannot + // catch up with the progress of the key-value store. + // + // The client should treat the watcher as canceled and should not try to create any + // watcher with the same start_revision again. + CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` + // cancel_reason indicates the reason for canceling the watcher. + CancelReason string `protobuf:"bytes,6,opt,name=cancel_reason,json=cancelReason,proto3" json:"cancel_reason,omitempty"` + // framgment is true if large watch response was split over multiple responses. + Fragment bool `protobuf:"varint,7,opt,name=fragment,proto3" json:"fragment,omitempty"` + Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events" json:"events,omitempty"` +} + +func (m *WatchResponse) Reset() { *m = WatchResponse{} } +func (m *WatchResponse) String() string { return proto.CompactTextString(m) } +func (*WatchResponse) ProtoMessage() {} +func (*WatchResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{24} } + +func (m *WatchResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *WatchResponse) GetWatchId() int64 { + if m != nil { + return m.WatchId + } + return 0 +} + +func (m *WatchResponse) GetCreated() bool { + if m != nil { + return m.Created + } + return false +} + +func (m *WatchResponse) GetCanceled() bool { + if m != nil { + return m.Canceled + } + return false +} + +func (m *WatchResponse) GetCompactRevision() int64 { + if m != nil { + return m.CompactRevision + } + return 0 +} + +func (m *WatchResponse) GetCancelReason() string { + if m != nil { + return m.CancelReason + } + return "" +} + +func (m *WatchResponse) GetFragment() bool { + if m != nil { + return m.Fragment + } + return false +} + +func (m *WatchResponse) GetEvents() []*mvccpb.Event { + if m != nil { + return m.Events + } + return nil +} + +type LeaseGrantRequest struct { + // TTL is the advisory time-to-live in seconds. Expired lease will return -1. + TTL int64 `protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"` + // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. + ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *LeaseGrantRequest) Reset() { *m = LeaseGrantRequest{} } +func (m *LeaseGrantRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseGrantRequest) ProtoMessage() {} +func (*LeaseGrantRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{25} } + +func (m *LeaseGrantRequest) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseGrantRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +type LeaseGrantResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // ID is the lease ID for the granted lease. + ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` + // TTL is the server chosen lease time-to-live in seconds. + TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` + Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *LeaseGrantResponse) Reset() { *m = LeaseGrantResponse{} } +func (m *LeaseGrantResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseGrantResponse) ProtoMessage() {} +func (*LeaseGrantResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{26} } + +func (m *LeaseGrantResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaseGrantResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseGrantResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseGrantResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +type LeaseRevokeRequest struct { + // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *LeaseRevokeRequest) Reset() { *m = LeaseRevokeRequest{} } +func (m *LeaseRevokeRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseRevokeRequest) ProtoMessage() {} +func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{27} } + +func (m *LeaseRevokeRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +type LeaseRevokeResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *LeaseRevokeResponse) Reset() { *m = LeaseRevokeResponse{} } +func (m *LeaseRevokeResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseRevokeResponse) ProtoMessage() {} +func (*LeaseRevokeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{28} } + +func (m *LeaseRevokeResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type LeaseCheckpoint struct { + // ID is the lease ID to checkpoint. + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // Remaining_TTL is the remaining time until expiry of the lease. + Remaining_TTL int64 `protobuf:"varint,2,opt,name=remaining_TTL,json=remainingTTL,proto3" json:"remaining_TTL,omitempty"` +} + +func (m *LeaseCheckpoint) Reset() { *m = LeaseCheckpoint{} } +func (m *LeaseCheckpoint) String() string { return proto.CompactTextString(m) } +func (*LeaseCheckpoint) ProtoMessage() {} +func (*LeaseCheckpoint) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{29} } + +func (m *LeaseCheckpoint) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseCheckpoint) GetRemaining_TTL() int64 { + if m != nil { + return m.Remaining_TTL + } + return 0 +} + +type LeaseCheckpointRequest struct { + Checkpoints []*LeaseCheckpoint `protobuf:"bytes,1,rep,name=checkpoints" json:"checkpoints,omitempty"` +} + +func (m *LeaseCheckpointRequest) Reset() { *m = LeaseCheckpointRequest{} } +func (m *LeaseCheckpointRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseCheckpointRequest) ProtoMessage() {} +func (*LeaseCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{30} } + +func (m *LeaseCheckpointRequest) GetCheckpoints() []*LeaseCheckpoint { + if m != nil { + return m.Checkpoints + } + return nil +} + +type LeaseCheckpointResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *LeaseCheckpointResponse) Reset() { *m = LeaseCheckpointResponse{} } +func (m *LeaseCheckpointResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseCheckpointResponse) ProtoMessage() {} +func (*LeaseCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{31} } + +func (m *LeaseCheckpointResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type LeaseKeepAliveRequest struct { + // ID is the lease ID for the lease to keep alive. + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *LeaseKeepAliveRequest) Reset() { *m = LeaseKeepAliveRequest{} } +func (m *LeaseKeepAliveRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseKeepAliveRequest) ProtoMessage() {} +func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{32} } + +func (m *LeaseKeepAliveRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +type LeaseKeepAliveResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // ID is the lease ID from the keep alive request. + ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` + // TTL is the new time-to-live for the lease. + TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` +} + +func (m *LeaseKeepAliveResponse) Reset() { *m = LeaseKeepAliveResponse{} } +func (m *LeaseKeepAliveResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseKeepAliveResponse) ProtoMessage() {} +func (*LeaseKeepAliveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{33} } + +func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaseKeepAliveResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseKeepAliveResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +type LeaseTimeToLiveRequest struct { + // ID is the lease ID for the lease. + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // keys is true to query all the keys attached to this lease. + Keys bool `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` +} + +func (m *LeaseTimeToLiveRequest) Reset() { *m = LeaseTimeToLiveRequest{} } +func (m *LeaseTimeToLiveRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseTimeToLiveRequest) ProtoMessage() {} +func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{34} } + +func (m *LeaseTimeToLiveRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseTimeToLiveRequest) GetKeys() bool { + if m != nil { + return m.Keys + } + return false +} + +type LeaseTimeToLiveResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // ID is the lease ID from the keep alive request. + ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` + // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. + TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` + // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. + GrantedTTL int64 `protobuf:"varint,4,opt,name=grantedTTL,proto3" json:"grantedTTL,omitempty"` + // Keys is the list of keys attached to this lease. + Keys [][]byte `protobuf:"bytes,5,rep,name=keys" json:"keys,omitempty"` +} + +func (m *LeaseTimeToLiveResponse) Reset() { *m = LeaseTimeToLiveResponse{} } +func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseTimeToLiveResponse) ProtoMessage() {} +func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{35} } + +func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaseTimeToLiveResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetGrantedTTL() int64 { + if m != nil { + return m.GrantedTTL + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte { + if m != nil { + return m.Keys + } + return nil +} + +type LeaseLeasesRequest struct { +} + +func (m *LeaseLeasesRequest) Reset() { *m = LeaseLeasesRequest{} } +func (m *LeaseLeasesRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseLeasesRequest) ProtoMessage() {} +func (*LeaseLeasesRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{36} } + +type LeaseStatus struct { + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *LeaseStatus) Reset() { *m = LeaseStatus{} } +func (m *LeaseStatus) String() string { return proto.CompactTextString(m) } +func (*LeaseStatus) ProtoMessage() {} +func (*LeaseStatus) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{37} } + +func (m *LeaseStatus) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +type LeaseLeasesResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Leases []*LeaseStatus `protobuf:"bytes,2,rep,name=leases" json:"leases,omitempty"` +} + +func (m *LeaseLeasesResponse) Reset() { *m = LeaseLeasesResponse{} } +func (m *LeaseLeasesResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseLeasesResponse) ProtoMessage() {} +func (*LeaseLeasesResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{38} } + +func (m *LeaseLeasesResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaseLeasesResponse) GetLeases() []*LeaseStatus { + if m != nil { + return m.Leases + } + return nil +} + +type Member struct { + // ID is the member ID for this member. + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // name is the human-readable name of the member. If the member is not started, the name will be an empty string. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // peerURLs is the list of URLs the member exposes to the cluster for communication. + PeerURLs []string `protobuf:"bytes,3,rep,name=peerURLs" json:"peerURLs,omitempty"` + // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. + ClientURLs []string `protobuf:"bytes,4,rep,name=clientURLs" json:"clientURLs,omitempty"` + // isLearner indicates if the member is raft learner. + IsLearner bool `protobuf:"varint,5,opt,name=isLearner,proto3" json:"isLearner,omitempty"` +} + +func (m *Member) Reset() { *m = Member{} } +func (m *Member) String() string { return proto.CompactTextString(m) } +func (*Member) ProtoMessage() {} +func (*Member) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{39} } + +func (m *Member) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *Member) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Member) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + +func (m *Member) GetClientURLs() []string { + if m != nil { + return m.ClientURLs + } + return nil +} + +func (m *Member) GetIsLearner() bool { + if m != nil { + return m.IsLearner + } + return false +} + +type MemberAddRequest struct { + // peerURLs is the list of URLs the added member will use to communicate with the cluster. + PeerURLs []string `protobuf:"bytes,1,rep,name=peerURLs" json:"peerURLs,omitempty"` + // isLearner indicates if the added member is raft learner. + IsLearner bool `protobuf:"varint,2,opt,name=isLearner,proto3" json:"isLearner,omitempty"` +} + +func (m *MemberAddRequest) Reset() { *m = MemberAddRequest{} } +func (m *MemberAddRequest) String() string { return proto.CompactTextString(m) } +func (*MemberAddRequest) ProtoMessage() {} +func (*MemberAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{40} } + +func (m *MemberAddRequest) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + +func (m *MemberAddRequest) GetIsLearner() bool { + if m != nil { + return m.IsLearner + } + return false +} + +type MemberAddResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // member is the member information for the added member. + Member *Member `protobuf:"bytes,2,opt,name=member" json:"member,omitempty"` + // members is a list of all members after adding the new member. + Members []*Member `protobuf:"bytes,3,rep,name=members" json:"members,omitempty"` +} + +func (m *MemberAddResponse) Reset() { *m = MemberAddResponse{} } +func (m *MemberAddResponse) String() string { return proto.CompactTextString(m) } +func (*MemberAddResponse) ProtoMessage() {} +func (*MemberAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{41} } + +func (m *MemberAddResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberAddResponse) GetMember() *Member { + if m != nil { + return m.Member + } + return nil +} + +func (m *MemberAddResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type MemberRemoveRequest struct { + // ID is the member ID of the member to remove. + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *MemberRemoveRequest) Reset() { *m = MemberRemoveRequest{} } +func (m *MemberRemoveRequest) String() string { return proto.CompactTextString(m) } +func (*MemberRemoveRequest) ProtoMessage() {} +func (*MemberRemoveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{42} } + +func (m *MemberRemoveRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +type MemberRemoveResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // members is a list of all members after removing the member. + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` +} + +func (m *MemberRemoveResponse) Reset() { *m = MemberRemoveResponse{} } +func (m *MemberRemoveResponse) String() string { return proto.CompactTextString(m) } +func (*MemberRemoveResponse) ProtoMessage() {} +func (*MemberRemoveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{43} } + +func (m *MemberRemoveResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberRemoveResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type MemberUpdateRequest struct { + // ID is the member ID of the member to update. + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // peerURLs is the new list of URLs the member will use to communicate with the cluster. + PeerURLs []string `protobuf:"bytes,2,rep,name=peerURLs" json:"peerURLs,omitempty"` +} + +func (m *MemberUpdateRequest) Reset() { *m = MemberUpdateRequest{} } +func (m *MemberUpdateRequest) String() string { return proto.CompactTextString(m) } +func (*MemberUpdateRequest) ProtoMessage() {} +func (*MemberUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{44} } + +func (m *MemberUpdateRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *MemberUpdateRequest) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + +type MemberUpdateResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // members is a list of all members after updating the member. + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` +} + +func (m *MemberUpdateResponse) Reset() { *m = MemberUpdateResponse{} } +func (m *MemberUpdateResponse) String() string { return proto.CompactTextString(m) } +func (*MemberUpdateResponse) ProtoMessage() {} +func (*MemberUpdateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{45} } + +func (m *MemberUpdateResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberUpdateResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type MemberListRequest struct { +} + +func (m *MemberListRequest) Reset() { *m = MemberListRequest{} } +func (m *MemberListRequest) String() string { return proto.CompactTextString(m) } +func (*MemberListRequest) ProtoMessage() {} +func (*MemberListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{46} } + +type MemberListResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // members is a list of all members associated with the cluster. + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` +} + +func (m *MemberListResponse) Reset() { *m = MemberListResponse{} } +func (m *MemberListResponse) String() string { return proto.CompactTextString(m) } +func (*MemberListResponse) ProtoMessage() {} +func (*MemberListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{47} } + +func (m *MemberListResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberListResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type MemberPromoteRequest struct { + // ID is the member ID of the member to promote. + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *MemberPromoteRequest) Reset() { *m = MemberPromoteRequest{} } +func (m *MemberPromoteRequest) String() string { return proto.CompactTextString(m) } +func (*MemberPromoteRequest) ProtoMessage() {} +func (*MemberPromoteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{48} } + +func (m *MemberPromoteRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +type MemberPromoteResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // members is a list of all members after promoting the member. + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` +} + +func (m *MemberPromoteResponse) Reset() { *m = MemberPromoteResponse{} } +func (m *MemberPromoteResponse) String() string { return proto.CompactTextString(m) } +func (*MemberPromoteResponse) ProtoMessage() {} +func (*MemberPromoteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{49} } + +func (m *MemberPromoteResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberPromoteResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type DefragmentRequest struct { +} + +func (m *DefragmentRequest) Reset() { *m = DefragmentRequest{} } +func (m *DefragmentRequest) String() string { return proto.CompactTextString(m) } +func (*DefragmentRequest) ProtoMessage() {} +func (*DefragmentRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{50} } + +type DefragmentResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *DefragmentResponse) Reset() { *m = DefragmentResponse{} } +func (m *DefragmentResponse) String() string { return proto.CompactTextString(m) } +func (*DefragmentResponse) ProtoMessage() {} +func (*DefragmentResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{51} } + +func (m *DefragmentResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type MoveLeaderRequest struct { + // targetID is the node ID for the new leader. + TargetID uint64 `protobuf:"varint,1,opt,name=targetID,proto3" json:"targetID,omitempty"` +} + +func (m *MoveLeaderRequest) Reset() { *m = MoveLeaderRequest{} } +func (m *MoveLeaderRequest) String() string { return proto.CompactTextString(m) } +func (*MoveLeaderRequest) ProtoMessage() {} +func (*MoveLeaderRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{52} } + +func (m *MoveLeaderRequest) GetTargetID() uint64 { + if m != nil { + return m.TargetID + } + return 0 +} + +type MoveLeaderResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *MoveLeaderResponse) Reset() { *m = MoveLeaderResponse{} } +func (m *MoveLeaderResponse) String() string { return proto.CompactTextString(m) } +func (*MoveLeaderResponse) ProtoMessage() {} +func (*MoveLeaderResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{53} } + +func (m *MoveLeaderResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AlarmRequest struct { + // action is the kind of alarm request to issue. The action + // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a + // raised alarm. + Action AlarmRequest_AlarmAction `protobuf:"varint,1,opt,name=action,proto3,enum=etcdserverpb.AlarmRequest_AlarmAction" json:"action,omitempty"` + // memberID is the ID of the member associated with the alarm. If memberID is 0, the + // alarm request covers all members. + MemberID uint64 `protobuf:"varint,2,opt,name=memberID,proto3" json:"memberID,omitempty"` + // alarm is the type of alarm to consider for this request. + Alarm AlarmType `protobuf:"varint,3,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` +} + +func (m *AlarmRequest) Reset() { *m = AlarmRequest{} } +func (m *AlarmRequest) String() string { return proto.CompactTextString(m) } +func (*AlarmRequest) ProtoMessage() {} +func (*AlarmRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{54} } + +func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction { + if m != nil { + return m.Action + } + return AlarmRequest_GET +} + +func (m *AlarmRequest) GetMemberID() uint64 { + if m != nil { + return m.MemberID + } + return 0 +} + +func (m *AlarmRequest) GetAlarm() AlarmType { + if m != nil { + return m.Alarm + } + return AlarmType_NONE +} + +type AlarmMember struct { + // memberID is the ID of the member associated with the raised alarm. + MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"` + // alarm is the type of alarm which has been raised. + Alarm AlarmType `protobuf:"varint,2,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` +} + +func (m *AlarmMember) Reset() { *m = AlarmMember{} } +func (m *AlarmMember) String() string { return proto.CompactTextString(m) } +func (*AlarmMember) ProtoMessage() {} +func (*AlarmMember) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{55} } + +func (m *AlarmMember) GetMemberID() uint64 { + if m != nil { + return m.MemberID + } + return 0 +} + +func (m *AlarmMember) GetAlarm() AlarmType { + if m != nil { + return m.Alarm + } + return AlarmType_NONE +} + +type AlarmResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // alarms is a list of alarms associated with the alarm request. + Alarms []*AlarmMember `protobuf:"bytes,2,rep,name=alarms" json:"alarms,omitempty"` +} + +func (m *AlarmResponse) Reset() { *m = AlarmResponse{} } +func (m *AlarmResponse) String() string { return proto.CompactTextString(m) } +func (*AlarmResponse) ProtoMessage() {} +func (*AlarmResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{56} } + +func (m *AlarmResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AlarmResponse) GetAlarms() []*AlarmMember { + if m != nil { + return m.Alarms + } + return nil +} + +type StatusRequest struct { +} + +func (m *StatusRequest) Reset() { *m = StatusRequest{} } +func (m *StatusRequest) String() string { return proto.CompactTextString(m) } +func (*StatusRequest) ProtoMessage() {} +func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{57} } + +type StatusResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // version is the cluster protocol version used by the responding member. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + // dbSize is the size of the backend database physically allocated, in bytes, of the responding member. + DbSize int64 `protobuf:"varint,3,opt,name=dbSize,proto3" json:"dbSize,omitempty"` + // leader is the member ID which the responding member believes is the current leader. + Leader uint64 `protobuf:"varint,4,opt,name=leader,proto3" json:"leader,omitempty"` + // raftIndex is the current raft committed index of the responding member. + RaftIndex uint64 `protobuf:"varint,5,opt,name=raftIndex,proto3" json:"raftIndex,omitempty"` + // raftTerm is the current raft term of the responding member. + RaftTerm uint64 `protobuf:"varint,6,opt,name=raftTerm,proto3" json:"raftTerm,omitempty"` + // raftAppliedIndex is the current raft applied index of the responding member. + RaftAppliedIndex uint64 `protobuf:"varint,7,opt,name=raftAppliedIndex,proto3" json:"raftAppliedIndex,omitempty"` + // errors contains alarm/health information and status. + Errors []string `protobuf:"bytes,8,rep,name=errors" json:"errors,omitempty"` + // dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member. + DbSizeInUse int64 `protobuf:"varint,9,opt,name=dbSizeInUse,proto3" json:"dbSizeInUse,omitempty"` + // isLearner indicates if the member is raft learner. + IsLearner bool `protobuf:"varint,10,opt,name=isLearner,proto3" json:"isLearner,omitempty"` +} + +func (m *StatusResponse) Reset() { *m = StatusResponse{} } +func (m *StatusResponse) String() string { return proto.CompactTextString(m) } +func (*StatusResponse) ProtoMessage() {} +func (*StatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{58} } + +func (m *StatusResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *StatusResponse) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *StatusResponse) GetDbSize() int64 { + if m != nil { + return m.DbSize + } + return 0 +} + +func (m *StatusResponse) GetLeader() uint64 { + if m != nil { + return m.Leader + } + return 0 +} + +func (m *StatusResponse) GetRaftIndex() uint64 { + if m != nil { + return m.RaftIndex + } + return 0 +} + +func (m *StatusResponse) GetRaftTerm() uint64 { + if m != nil { + return m.RaftTerm + } + return 0 +} + +func (m *StatusResponse) GetRaftAppliedIndex() uint64 { + if m != nil { + return m.RaftAppliedIndex + } + return 0 +} + +func (m *StatusResponse) GetErrors() []string { + if m != nil { + return m.Errors + } + return nil +} + +func (m *StatusResponse) GetDbSizeInUse() int64 { + if m != nil { + return m.DbSizeInUse + } + return 0 +} + +func (m *StatusResponse) GetIsLearner() bool { + if m != nil { + return m.IsLearner + } + return false +} + +type AuthEnableRequest struct { +} + +func (m *AuthEnableRequest) Reset() { *m = AuthEnableRequest{} } +func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) } +func (*AuthEnableRequest) ProtoMessage() {} +func (*AuthEnableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{59} } + +type AuthDisableRequest struct { +} + +func (m *AuthDisableRequest) Reset() { *m = AuthDisableRequest{} } +func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) } +func (*AuthDisableRequest) ProtoMessage() {} +func (*AuthDisableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{60} } + +type AuthenticateRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` +} + +func (m *AuthenticateRequest) Reset() { *m = AuthenticateRequest{} } +func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) } +func (*AuthenticateRequest) ProtoMessage() {} +func (*AuthenticateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{61} } + +func (m *AuthenticateRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthenticateRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +type AuthUserAddRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + Options *authpb.UserAddOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` +} + +func (m *AuthUserAddRequest) Reset() { *m = AuthUserAddRequest{} } +func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserAddRequest) ProtoMessage() {} +func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{62} } + +func (m *AuthUserAddRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserAddRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +func (m *AuthUserAddRequest) GetOptions() *authpb.UserAddOptions { + if m != nil { + return m.Options + } + return nil +} + +type AuthUserGetRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *AuthUserGetRequest) Reset() { *m = AuthUserGetRequest{} } +func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserGetRequest) ProtoMessage() {} +func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{63} } + +func (m *AuthUserGetRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type AuthUserDeleteRequest struct { + // name is the name of the user to delete. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *AuthUserDeleteRequest) Reset() { *m = AuthUserDeleteRequest{} } +func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserDeleteRequest) ProtoMessage() {} +func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{64} } + +func (m *AuthUserDeleteRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type AuthUserChangePasswordRequest struct { + // name is the name of the user whose password is being changed. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // password is the new password for the user. + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` +} + +func (m *AuthUserChangePasswordRequest) Reset() { *m = AuthUserChangePasswordRequest{} } +func (m *AuthUserChangePasswordRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserChangePasswordRequest) ProtoMessage() {} +func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{65} +} + +func (m *AuthUserChangePasswordRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserChangePasswordRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +type AuthUserGrantRoleRequest struct { + // user is the name of the user which should be granted a given role. + User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` + // role is the name of the role to grant to the user. + Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` +} + +func (m *AuthUserGrantRoleRequest) Reset() { *m = AuthUserGrantRoleRequest{} } +func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserGrantRoleRequest) ProtoMessage() {} +func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{66} } + +func (m *AuthUserGrantRoleRequest) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *AuthUserGrantRoleRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +type AuthUserRevokeRoleRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` +} + +func (m *AuthUserRevokeRoleRequest) Reset() { *m = AuthUserRevokeRoleRequest{} } +func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserRevokeRoleRequest) ProtoMessage() {} +func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{67} } + +func (m *AuthUserRevokeRoleRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserRevokeRoleRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +type AuthRoleAddRequest struct { + // name is the name of the role to add to the authentication system. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *AuthRoleAddRequest) Reset() { *m = AuthRoleAddRequest{} } +func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleAddRequest) ProtoMessage() {} +func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{68} } + +func (m *AuthRoleAddRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type AuthRoleGetRequest struct { + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` +} + +func (m *AuthRoleGetRequest) Reset() { *m = AuthRoleGetRequest{} } +func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGetRequest) ProtoMessage() {} +func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{69} } + +func (m *AuthRoleGetRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +type AuthUserListRequest struct { +} + +func (m *AuthUserListRequest) Reset() { *m = AuthUserListRequest{} } +func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserListRequest) ProtoMessage() {} +func (*AuthUserListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{70} } + +type AuthRoleListRequest struct { +} + +func (m *AuthRoleListRequest) Reset() { *m = AuthRoleListRequest{} } +func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleListRequest) ProtoMessage() {} +func (*AuthRoleListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{71} } + +type AuthRoleDeleteRequest struct { + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` +} + +func (m *AuthRoleDeleteRequest) Reset() { *m = AuthRoleDeleteRequest{} } +func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleDeleteRequest) ProtoMessage() {} +func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{72} } + +func (m *AuthRoleDeleteRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +type AuthRoleGrantPermissionRequest struct { + // name is the name of the role which will be granted the permission. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // perm is the permission to grant to the role. + Perm *authpb.Permission `protobuf:"bytes,2,opt,name=perm" json:"perm,omitempty"` +} + +func (m *AuthRoleGrantPermissionRequest) Reset() { *m = AuthRoleGrantPermissionRequest{} } +func (m *AuthRoleGrantPermissionRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGrantPermissionRequest) ProtoMessage() {} +func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{73} +} + +func (m *AuthRoleGrantPermissionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission { + if m != nil { + return m.Perm + } + return nil +} + +type AuthRoleRevokePermissionRequest struct { + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` +} + +func (m *AuthRoleRevokePermissionRequest) Reset() { *m = AuthRoleRevokePermissionRequest{} } +func (m *AuthRoleRevokePermissionRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleRevokePermissionRequest) ProtoMessage() {} +func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{74} +} + +func (m *AuthRoleRevokePermissionRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +func (m *AuthRoleRevokePermissionRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +type AuthEnableResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthEnableResponse) Reset() { *m = AuthEnableResponse{} } +func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) } +func (*AuthEnableResponse) ProtoMessage() {} +func (*AuthEnableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{75} } + +func (m *AuthEnableResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthDisableResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthDisableResponse) Reset() { *m = AuthDisableResponse{} } +func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) } +func (*AuthDisableResponse) ProtoMessage() {} +func (*AuthDisableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{76} } + +func (m *AuthDisableResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthenticateResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // token is an authorized token that can be used in succeeding RPCs + Token string `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"` +} + +func (m *AuthenticateResponse) Reset() { *m = AuthenticateResponse{} } +func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) } +func (*AuthenticateResponse) ProtoMessage() {} +func (*AuthenticateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{77} } + +func (m *AuthenticateResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthenticateResponse) GetToken() string { + if m != nil { + return m.Token + } + return "" +} + +type AuthUserAddResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthUserAddResponse) Reset() { *m = AuthUserAddResponse{} } +func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserAddResponse) ProtoMessage() {} +func (*AuthUserAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{78} } + +func (m *AuthUserAddResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthUserGetResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Roles []string `protobuf:"bytes,2,rep,name=roles" json:"roles,omitempty"` +} + +func (m *AuthUserGetResponse) Reset() { *m = AuthUserGetResponse{} } +func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserGetResponse) ProtoMessage() {} +func (*AuthUserGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{79} } + +func (m *AuthUserGetResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthUserGetResponse) GetRoles() []string { + if m != nil { + return m.Roles + } + return nil +} + +type AuthUserDeleteResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthUserDeleteResponse) Reset() { *m = AuthUserDeleteResponse{} } +func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserDeleteResponse) ProtoMessage() {} +func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{80} } + +func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthUserChangePasswordResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthUserChangePasswordResponse) Reset() { *m = AuthUserChangePasswordResponse{} } +func (m *AuthUserChangePasswordResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserChangePasswordResponse) ProtoMessage() {} +func (*AuthUserChangePasswordResponse) Descriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{81} +} + +func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthUserGrantRoleResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthUserGrantRoleResponse) Reset() { *m = AuthUserGrantRoleResponse{} } +func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserGrantRoleResponse) ProtoMessage() {} +func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{82} } + +func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthUserRevokeRoleResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthUserRevokeRoleResponse) Reset() { *m = AuthUserRevokeRoleResponse{} } +func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserRevokeRoleResponse) ProtoMessage() {} +func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{83} } + +func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthRoleAddResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthRoleAddResponse) Reset() { *m = AuthRoleAddResponse{} } +func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleAddResponse) ProtoMessage() {} +func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{84} } + +func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthRoleGetResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Perm []*authpb.Permission `protobuf:"bytes,2,rep,name=perm" json:"perm,omitempty"` +} + +func (m *AuthRoleGetResponse) Reset() { *m = AuthRoleGetResponse{} } +func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGetResponse) ProtoMessage() {} +func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{85} } + +func (m *AuthRoleGetResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthRoleGetResponse) GetPerm() []*authpb.Permission { + if m != nil { + return m.Perm + } + return nil +} + +type AuthRoleListResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Roles []string `protobuf:"bytes,2,rep,name=roles" json:"roles,omitempty"` +} + +func (m *AuthRoleListResponse) Reset() { *m = AuthRoleListResponse{} } +func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleListResponse) ProtoMessage() {} +func (*AuthRoleListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{86} } + +func (m *AuthRoleListResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthRoleListResponse) GetRoles() []string { + if m != nil { + return m.Roles + } + return nil +} + +type AuthUserListResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Users []string `protobuf:"bytes,2,rep,name=users" json:"users,omitempty"` +} + +func (m *AuthUserListResponse) Reset() { *m = AuthUserListResponse{} } +func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserListResponse) ProtoMessage() {} +func (*AuthUserListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{87} } + +func (m *AuthUserListResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthUserListResponse) GetUsers() []string { + if m != nil { + return m.Users + } + return nil +} + +type AuthRoleDeleteResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthRoleDeleteResponse) Reset() { *m = AuthRoleDeleteResponse{} } +func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleDeleteResponse) ProtoMessage() {} +func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{88} } + +func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthRoleGrantPermissionResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthRoleGrantPermissionResponse) Reset() { *m = AuthRoleGrantPermissionResponse{} } +func (m *AuthRoleGrantPermissionResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGrantPermissionResponse) ProtoMessage() {} +func (*AuthRoleGrantPermissionResponse) Descriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{89} +} + +func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthRoleRevokePermissionResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthRoleRevokePermissionResponse) Reset() { *m = AuthRoleRevokePermissionResponse{} } +func (m *AuthRoleRevokePermissionResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleRevokePermissionResponse) ProtoMessage() {} +func (*AuthRoleRevokePermissionResponse) Descriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{90} +} + +func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func init() { + proto.RegisterType((*ResponseHeader)(nil), "etcdserverpb.ResponseHeader") + proto.RegisterType((*RangeRequest)(nil), "etcdserverpb.RangeRequest") + proto.RegisterType((*RangeResponse)(nil), "etcdserverpb.RangeResponse") + proto.RegisterType((*PutRequest)(nil), "etcdserverpb.PutRequest") + proto.RegisterType((*PutResponse)(nil), "etcdserverpb.PutResponse") + proto.RegisterType((*DeleteRangeRequest)(nil), "etcdserverpb.DeleteRangeRequest") + proto.RegisterType((*DeleteRangeResponse)(nil), "etcdserverpb.DeleteRangeResponse") + proto.RegisterType((*RequestOp)(nil), "etcdserverpb.RequestOp") + proto.RegisterType((*ResponseOp)(nil), "etcdserverpb.ResponseOp") + proto.RegisterType((*Compare)(nil), "etcdserverpb.Compare") + proto.RegisterType((*TxnRequest)(nil), "etcdserverpb.TxnRequest") + proto.RegisterType((*TxnResponse)(nil), "etcdserverpb.TxnResponse") + proto.RegisterType((*CompactionRequest)(nil), "etcdserverpb.CompactionRequest") + proto.RegisterType((*CompactionResponse)(nil), "etcdserverpb.CompactionResponse") + proto.RegisterType((*HashRequest)(nil), "etcdserverpb.HashRequest") + proto.RegisterType((*HashKVRequest)(nil), "etcdserverpb.HashKVRequest") + proto.RegisterType((*HashKVResponse)(nil), "etcdserverpb.HashKVResponse") + proto.RegisterType((*HashResponse)(nil), "etcdserverpb.HashResponse") + proto.RegisterType((*SnapshotRequest)(nil), "etcdserverpb.SnapshotRequest") + proto.RegisterType((*SnapshotResponse)(nil), "etcdserverpb.SnapshotResponse") + proto.RegisterType((*WatchRequest)(nil), "etcdserverpb.WatchRequest") + proto.RegisterType((*WatchCreateRequest)(nil), "etcdserverpb.WatchCreateRequest") + proto.RegisterType((*WatchCancelRequest)(nil), "etcdserverpb.WatchCancelRequest") + proto.RegisterType((*WatchProgressRequest)(nil), "etcdserverpb.WatchProgressRequest") + proto.RegisterType((*WatchResponse)(nil), "etcdserverpb.WatchResponse") + proto.RegisterType((*LeaseGrantRequest)(nil), "etcdserverpb.LeaseGrantRequest") + proto.RegisterType((*LeaseGrantResponse)(nil), "etcdserverpb.LeaseGrantResponse") + proto.RegisterType((*LeaseRevokeRequest)(nil), "etcdserverpb.LeaseRevokeRequest") + proto.RegisterType((*LeaseRevokeResponse)(nil), "etcdserverpb.LeaseRevokeResponse") + proto.RegisterType((*LeaseCheckpoint)(nil), "etcdserverpb.LeaseCheckpoint") + proto.RegisterType((*LeaseCheckpointRequest)(nil), "etcdserverpb.LeaseCheckpointRequest") + proto.RegisterType((*LeaseCheckpointResponse)(nil), "etcdserverpb.LeaseCheckpointResponse") + proto.RegisterType((*LeaseKeepAliveRequest)(nil), "etcdserverpb.LeaseKeepAliveRequest") + proto.RegisterType((*LeaseKeepAliveResponse)(nil), "etcdserverpb.LeaseKeepAliveResponse") + proto.RegisterType((*LeaseTimeToLiveRequest)(nil), "etcdserverpb.LeaseTimeToLiveRequest") + proto.RegisterType((*LeaseTimeToLiveResponse)(nil), "etcdserverpb.LeaseTimeToLiveResponse") + proto.RegisterType((*LeaseLeasesRequest)(nil), "etcdserverpb.LeaseLeasesRequest") + proto.RegisterType((*LeaseStatus)(nil), "etcdserverpb.LeaseStatus") + proto.RegisterType((*LeaseLeasesResponse)(nil), "etcdserverpb.LeaseLeasesResponse") + proto.RegisterType((*Member)(nil), "etcdserverpb.Member") + proto.RegisterType((*MemberAddRequest)(nil), "etcdserverpb.MemberAddRequest") + proto.RegisterType((*MemberAddResponse)(nil), "etcdserverpb.MemberAddResponse") + proto.RegisterType((*MemberRemoveRequest)(nil), "etcdserverpb.MemberRemoveRequest") + proto.RegisterType((*MemberRemoveResponse)(nil), "etcdserverpb.MemberRemoveResponse") + proto.RegisterType((*MemberUpdateRequest)(nil), "etcdserverpb.MemberUpdateRequest") + proto.RegisterType((*MemberUpdateResponse)(nil), "etcdserverpb.MemberUpdateResponse") + proto.RegisterType((*MemberListRequest)(nil), "etcdserverpb.MemberListRequest") + proto.RegisterType((*MemberListResponse)(nil), "etcdserverpb.MemberListResponse") + proto.RegisterType((*MemberPromoteRequest)(nil), "etcdserverpb.MemberPromoteRequest") + proto.RegisterType((*MemberPromoteResponse)(nil), "etcdserverpb.MemberPromoteResponse") + proto.RegisterType((*DefragmentRequest)(nil), "etcdserverpb.DefragmentRequest") + proto.RegisterType((*DefragmentResponse)(nil), "etcdserverpb.DefragmentResponse") + proto.RegisterType((*MoveLeaderRequest)(nil), "etcdserverpb.MoveLeaderRequest") + proto.RegisterType((*MoveLeaderResponse)(nil), "etcdserverpb.MoveLeaderResponse") + proto.RegisterType((*AlarmRequest)(nil), "etcdserverpb.AlarmRequest") + proto.RegisterType((*AlarmMember)(nil), "etcdserverpb.AlarmMember") + proto.RegisterType((*AlarmResponse)(nil), "etcdserverpb.AlarmResponse") + proto.RegisterType((*StatusRequest)(nil), "etcdserverpb.StatusRequest") + proto.RegisterType((*StatusResponse)(nil), "etcdserverpb.StatusResponse") + proto.RegisterType((*AuthEnableRequest)(nil), "etcdserverpb.AuthEnableRequest") + proto.RegisterType((*AuthDisableRequest)(nil), "etcdserverpb.AuthDisableRequest") + proto.RegisterType((*AuthenticateRequest)(nil), "etcdserverpb.AuthenticateRequest") + proto.RegisterType((*AuthUserAddRequest)(nil), "etcdserverpb.AuthUserAddRequest") + proto.RegisterType((*AuthUserGetRequest)(nil), "etcdserverpb.AuthUserGetRequest") + proto.RegisterType((*AuthUserDeleteRequest)(nil), "etcdserverpb.AuthUserDeleteRequest") + proto.RegisterType((*AuthUserChangePasswordRequest)(nil), "etcdserverpb.AuthUserChangePasswordRequest") + proto.RegisterType((*AuthUserGrantRoleRequest)(nil), "etcdserverpb.AuthUserGrantRoleRequest") + proto.RegisterType((*AuthUserRevokeRoleRequest)(nil), "etcdserverpb.AuthUserRevokeRoleRequest") + proto.RegisterType((*AuthRoleAddRequest)(nil), "etcdserverpb.AuthRoleAddRequest") + proto.RegisterType((*AuthRoleGetRequest)(nil), "etcdserverpb.AuthRoleGetRequest") + proto.RegisterType((*AuthUserListRequest)(nil), "etcdserverpb.AuthUserListRequest") + proto.RegisterType((*AuthRoleListRequest)(nil), "etcdserverpb.AuthRoleListRequest") + proto.RegisterType((*AuthRoleDeleteRequest)(nil), "etcdserverpb.AuthRoleDeleteRequest") + proto.RegisterType((*AuthRoleGrantPermissionRequest)(nil), "etcdserverpb.AuthRoleGrantPermissionRequest") + proto.RegisterType((*AuthRoleRevokePermissionRequest)(nil), "etcdserverpb.AuthRoleRevokePermissionRequest") + proto.RegisterType((*AuthEnableResponse)(nil), "etcdserverpb.AuthEnableResponse") + proto.RegisterType((*AuthDisableResponse)(nil), "etcdserverpb.AuthDisableResponse") + proto.RegisterType((*AuthenticateResponse)(nil), "etcdserverpb.AuthenticateResponse") + proto.RegisterType((*AuthUserAddResponse)(nil), "etcdserverpb.AuthUserAddResponse") + proto.RegisterType((*AuthUserGetResponse)(nil), "etcdserverpb.AuthUserGetResponse") + proto.RegisterType((*AuthUserDeleteResponse)(nil), "etcdserverpb.AuthUserDeleteResponse") + proto.RegisterType((*AuthUserChangePasswordResponse)(nil), "etcdserverpb.AuthUserChangePasswordResponse") + proto.RegisterType((*AuthUserGrantRoleResponse)(nil), "etcdserverpb.AuthUserGrantRoleResponse") + proto.RegisterType((*AuthUserRevokeRoleResponse)(nil), "etcdserverpb.AuthUserRevokeRoleResponse") + proto.RegisterType((*AuthRoleAddResponse)(nil), "etcdserverpb.AuthRoleAddResponse") + proto.RegisterType((*AuthRoleGetResponse)(nil), "etcdserverpb.AuthRoleGetResponse") + proto.RegisterType((*AuthRoleListResponse)(nil), "etcdserverpb.AuthRoleListResponse") + proto.RegisterType((*AuthUserListResponse)(nil), "etcdserverpb.AuthUserListResponse") + proto.RegisterType((*AuthRoleDeleteResponse)(nil), "etcdserverpb.AuthRoleDeleteResponse") + proto.RegisterType((*AuthRoleGrantPermissionResponse)(nil), "etcdserverpb.AuthRoleGrantPermissionResponse") + proto.RegisterType((*AuthRoleRevokePermissionResponse)(nil), "etcdserverpb.AuthRoleRevokePermissionResponse") + proto.RegisterEnum("etcdserverpb.AlarmType", AlarmType_name, AlarmType_value) + proto.RegisterEnum("etcdserverpb.RangeRequest_SortOrder", RangeRequest_SortOrder_name, RangeRequest_SortOrder_value) + proto.RegisterEnum("etcdserverpb.RangeRequest_SortTarget", RangeRequest_SortTarget_name, RangeRequest_SortTarget_value) + proto.RegisterEnum("etcdserverpb.Compare_CompareResult", Compare_CompareResult_name, Compare_CompareResult_value) + proto.RegisterEnum("etcdserverpb.Compare_CompareTarget", Compare_CompareTarget_name, Compare_CompareTarget_value) + proto.RegisterEnum("etcdserverpb.WatchCreateRequest_FilterType", WatchCreateRequest_FilterType_name, WatchCreateRequest_FilterType_value) + proto.RegisterEnum("etcdserverpb.AlarmRequest_AlarmAction", AlarmRequest_AlarmAction_name, AlarmRequest_AlarmAction_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for KV service + +type KVClient interface { + // Range gets the keys in the range from the key-value store. + Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) + // Put puts the given key into the key-value store. + // A put request increments the revision of the key-value store + // and generates one event in the event history. + Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) + // DeleteRange deletes the given range from the key-value store. + // A delete request increments the revision of the key-value store + // and generates a delete event in the event history for every deleted key. + DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) + // Txn processes multiple requests in a single transaction. + // A txn request increments the revision of the key-value store + // and generates events with the same revision for every completed request. + // It is not allowed to modify the same key several times within one txn. + Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) + // Compact compacts the event history in the etcd key-value store. The key-value + // store should be periodically compacted or the event history will continue to grow + // indefinitely. + Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) +} + +type kVClient struct { + cc *grpc.ClientConn +} + +func NewKVClient(cc *grpc.ClientConn) KVClient { + return &kVClient{cc} +} + +func (c *kVClient) Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) { + out := new(RangeResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/Range", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *kVClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) { + out := new(PutResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/Put", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *kVClient) DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) { + out := new(DeleteRangeResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/DeleteRange", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *kVClient) Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) { + out := new(TxnResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/Txn", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *kVClient) Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) { + out := new(CompactionResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/Compact", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for KV service + +type KVServer interface { + // Range gets the keys in the range from the key-value store. + Range(context.Context, *RangeRequest) (*RangeResponse, error) + // Put puts the given key into the key-value store. + // A put request increments the revision of the key-value store + // and generates one event in the event history. + Put(context.Context, *PutRequest) (*PutResponse, error) + // DeleteRange deletes the given range from the key-value store. + // A delete request increments the revision of the key-value store + // and generates a delete event in the event history for every deleted key. + DeleteRange(context.Context, *DeleteRangeRequest) (*DeleteRangeResponse, error) + // Txn processes multiple requests in a single transaction. + // A txn request increments the revision of the key-value store + // and generates events with the same revision for every completed request. + // It is not allowed to modify the same key several times within one txn. + Txn(context.Context, *TxnRequest) (*TxnResponse, error) + // Compact compacts the event history in the etcd key-value store. The key-value + // store should be periodically compacted or the event history will continue to grow + // indefinitely. + Compact(context.Context, *CompactionRequest) (*CompactionResponse, error) +} + +func RegisterKVServer(s *grpc.Server, srv KVServer) { + s.RegisterService(&_KV_serviceDesc, srv) +} + +func _KV_Range_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RangeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).Range(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/Range", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).Range(ctx, req.(*RangeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KV_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PutRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).Put(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/Put", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).Put(ctx, req.(*PutRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KV_DeleteRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteRangeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).DeleteRange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/DeleteRange", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).DeleteRange(ctx, req.(*DeleteRangeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KV_Txn_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TxnRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).Txn(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/Txn", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).Txn(ctx, req.(*TxnRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KV_Compact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CompactionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).Compact(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/Compact", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).Compact(ctx, req.(*CompactionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _KV_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.KV", + HandlerType: (*KVServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Range", + Handler: _KV_Range_Handler, + }, + { + MethodName: "Put", + Handler: _KV_Put_Handler, + }, + { + MethodName: "DeleteRange", + Handler: _KV_DeleteRange_Handler, + }, + { + MethodName: "Txn", + Handler: _KV_Txn_Handler, + }, + { + MethodName: "Compact", + Handler: _KV_Compact_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "rpc.proto", +} + +// Client API for Watch service + +type WatchClient interface { + // Watch watches for events happening or that have happened. Both input and output + // are streams; the input stream is for creating and canceling watchers and the output + // stream sends events. One watch RPC can watch on multiple key ranges, streaming events + // for several watches at once. The entire event history can be watched starting from the + // last compaction revision. + Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) +} + +type watchClient struct { + cc *grpc.ClientConn +} + +func NewWatchClient(cc *grpc.ClientConn) WatchClient { + return &watchClient{cc} +} + +func (c *watchClient) Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Watch_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Watch/Watch", opts...) + if err != nil { + return nil, err + } + x := &watchWatchClient{stream} + return x, nil +} + +type Watch_WatchClient interface { + Send(*WatchRequest) error + Recv() (*WatchResponse, error) + grpc.ClientStream +} + +type watchWatchClient struct { + grpc.ClientStream +} + +func (x *watchWatchClient) Send(m *WatchRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *watchWatchClient) Recv() (*WatchResponse, error) { + m := new(WatchResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for Watch service + +type WatchServer interface { + // Watch watches for events happening or that have happened. Both input and output + // are streams; the input stream is for creating and canceling watchers and the output + // stream sends events. One watch RPC can watch on multiple key ranges, streaming events + // for several watches at once. The entire event history can be watched starting from the + // last compaction revision. + Watch(Watch_WatchServer) error +} + +func RegisterWatchServer(s *grpc.Server, srv WatchServer) { + s.RegisterService(&_Watch_serviceDesc, srv) +} + +func _Watch_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(WatchServer).Watch(&watchWatchServer{stream}) +} + +type Watch_WatchServer interface { + Send(*WatchResponse) error + Recv() (*WatchRequest, error) + grpc.ServerStream +} + +type watchWatchServer struct { + grpc.ServerStream +} + +func (x *watchWatchServer) Send(m *WatchResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *watchWatchServer) Recv() (*WatchRequest, error) { + m := new(WatchRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _Watch_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Watch", + HandlerType: (*WatchServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Watch", + Handler: _Watch_Watch_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "rpc.proto", +} + +// Client API for Lease service + +type LeaseClient interface { + // LeaseGrant creates a lease which expires if the server does not receive a keepAlive + // within a given time to live period. All keys attached to the lease will be expired and + // deleted if the lease expires. Each expired key generates a delete event in the event history. + LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) + // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. + LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) + // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client + // to the server and streaming keep alive responses from the server to the client. + LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) + // LeaseTimeToLive retrieves lease information. + LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) + // LeaseLeases lists all existing leases. + LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) +} + +type leaseClient struct { + cc *grpc.ClientConn +} + +func NewLeaseClient(cc *grpc.ClientConn) LeaseClient { + return &leaseClient{cc} +} + +func (c *leaseClient) LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) { + out := new(LeaseGrantResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseGrant", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leaseClient) LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) { + out := new(LeaseRevokeResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseRevoke", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Lease_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Lease/LeaseKeepAlive", opts...) + if err != nil { + return nil, err + } + x := &leaseLeaseKeepAliveClient{stream} + return x, nil +} + +type Lease_LeaseKeepAliveClient interface { + Send(*LeaseKeepAliveRequest) error + Recv() (*LeaseKeepAliveResponse, error) + grpc.ClientStream +} + +type leaseLeaseKeepAliveClient struct { + grpc.ClientStream +} + +func (x *leaseLeaseKeepAliveClient) Send(m *LeaseKeepAliveRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *leaseLeaseKeepAliveClient) Recv() (*LeaseKeepAliveResponse, error) { + m := new(LeaseKeepAliveResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *leaseClient) LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) { + out := new(LeaseTimeToLiveResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseTimeToLive", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leaseClient) LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) { + out := new(LeaseLeasesResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseLeases", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Lease service + +type LeaseServer interface { + // LeaseGrant creates a lease which expires if the server does not receive a keepAlive + // within a given time to live period. All keys attached to the lease will be expired and + // deleted if the lease expires. Each expired key generates a delete event in the event history. + LeaseGrant(context.Context, *LeaseGrantRequest) (*LeaseGrantResponse, error) + // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. + LeaseRevoke(context.Context, *LeaseRevokeRequest) (*LeaseRevokeResponse, error) + // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client + // to the server and streaming keep alive responses from the server to the client. + LeaseKeepAlive(Lease_LeaseKeepAliveServer) error + // LeaseTimeToLive retrieves lease information. + LeaseTimeToLive(context.Context, *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error) + // LeaseLeases lists all existing leases. + LeaseLeases(context.Context, *LeaseLeasesRequest) (*LeaseLeasesResponse, error) +} + +func RegisterLeaseServer(s *grpc.Server, srv LeaseServer) { + s.RegisterService(&_Lease_serviceDesc, srv) +} + +func _Lease_LeaseGrant_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseGrantRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeaseServer).LeaseGrant(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Lease/LeaseGrant", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeaseServer).LeaseGrant(ctx, req.(*LeaseGrantRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Lease_LeaseRevoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseRevokeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeaseServer).LeaseRevoke(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Lease/LeaseRevoke", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeaseServer).LeaseRevoke(ctx, req.(*LeaseRevokeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Lease_LeaseKeepAlive_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(LeaseServer).LeaseKeepAlive(&leaseLeaseKeepAliveServer{stream}) +} + +type Lease_LeaseKeepAliveServer interface { + Send(*LeaseKeepAliveResponse) error + Recv() (*LeaseKeepAliveRequest, error) + grpc.ServerStream +} + +type leaseLeaseKeepAliveServer struct { + grpc.ServerStream +} + +func (x *leaseLeaseKeepAliveServer) Send(m *LeaseKeepAliveResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *leaseLeaseKeepAliveServer) Recv() (*LeaseKeepAliveRequest, error) { + m := new(LeaseKeepAliveRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Lease_LeaseTimeToLive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseTimeToLiveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeaseServer).LeaseTimeToLive(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Lease/LeaseTimeToLive", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeaseServer).LeaseTimeToLive(ctx, req.(*LeaseTimeToLiveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Lease_LeaseLeases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseLeasesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeaseServer).LeaseLeases(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Lease/LeaseLeases", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeaseServer).LeaseLeases(ctx, req.(*LeaseLeasesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Lease_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Lease", + HandlerType: (*LeaseServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "LeaseGrant", + Handler: _Lease_LeaseGrant_Handler, + }, + { + MethodName: "LeaseRevoke", + Handler: _Lease_LeaseRevoke_Handler, + }, + { + MethodName: "LeaseTimeToLive", + Handler: _Lease_LeaseTimeToLive_Handler, + }, + { + MethodName: "LeaseLeases", + Handler: _Lease_LeaseLeases_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "LeaseKeepAlive", + Handler: _Lease_LeaseKeepAlive_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "rpc.proto", +} + +// Client API for Cluster service + +type ClusterClient interface { + // MemberAdd adds a member into the cluster. + MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) + // MemberRemove removes an existing member from the cluster. + MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) + // MemberUpdate updates the member configuration. + MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) + // MemberList lists all the members in the cluster. + MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) + // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. + MemberPromote(ctx context.Context, in *MemberPromoteRequest, opts ...grpc.CallOption) (*MemberPromoteResponse, error) +} + +type clusterClient struct { + cc *grpc.ClientConn +} + +func NewClusterClient(cc *grpc.ClientConn) ClusterClient { + return &clusterClient{cc} +} + +func (c *clusterClient) MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) { + out := new(MemberAddResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberAdd", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterClient) MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) { + out := new(MemberRemoveResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberRemove", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterClient) MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) { + out := new(MemberUpdateResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberUpdate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterClient) MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) { + out := new(MemberListResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberList", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterClient) MemberPromote(ctx context.Context, in *MemberPromoteRequest, opts ...grpc.CallOption) (*MemberPromoteResponse, error) { + out := new(MemberPromoteResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberPromote", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Cluster service + +type ClusterServer interface { + // MemberAdd adds a member into the cluster. + MemberAdd(context.Context, *MemberAddRequest) (*MemberAddResponse, error) + // MemberRemove removes an existing member from the cluster. + MemberRemove(context.Context, *MemberRemoveRequest) (*MemberRemoveResponse, error) + // MemberUpdate updates the member configuration. + MemberUpdate(context.Context, *MemberUpdateRequest) (*MemberUpdateResponse, error) + // MemberList lists all the members in the cluster. + MemberList(context.Context, *MemberListRequest) (*MemberListResponse, error) + // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. + MemberPromote(context.Context, *MemberPromoteRequest) (*MemberPromoteResponse, error) +} + +func RegisterClusterServer(s *grpc.Server, srv ClusterServer) { + s.RegisterService(&_Cluster_serviceDesc, srv) +} + +func _Cluster_MemberAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberAddRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberAdd(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberAdd", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberAdd(ctx, req.(*MemberAddRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Cluster_MemberRemove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberRemoveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberRemove(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberRemove", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberRemove(ctx, req.(*MemberRemoveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Cluster_MemberUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberUpdateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberUpdate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberUpdate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberUpdate(ctx, req.(*MemberUpdateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Cluster_MemberList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberList(ctx, req.(*MemberListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Cluster_MemberPromote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberPromoteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberPromote(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberPromote", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberPromote(ctx, req.(*MemberPromoteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Cluster_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Cluster", + HandlerType: (*ClusterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "MemberAdd", + Handler: _Cluster_MemberAdd_Handler, + }, + { + MethodName: "MemberRemove", + Handler: _Cluster_MemberRemove_Handler, + }, + { + MethodName: "MemberUpdate", + Handler: _Cluster_MemberUpdate_Handler, + }, + { + MethodName: "MemberList", + Handler: _Cluster_MemberList_Handler, + }, + { + MethodName: "MemberPromote", + Handler: _Cluster_MemberPromote_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "rpc.proto", +} + +// Client API for Maintenance service + +type MaintenanceClient interface { + // Alarm activates, deactivates, and queries alarms regarding cluster health. + Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) + // Status gets the status of the member. + Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) + // Defragment defragments a member's backend database to recover storage space. + Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) + // Hash computes the hash of whole backend keyspace, + // including key, lease, and other buckets in storage. + // This is designed for testing ONLY! + // Do not rely on this in production with ongoing transactions, + // since Hash operation does not hold MVCC locks. + // Use "HashKV" API instead for "key" bucket consistency checks. + Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) + // HashKV computes the hash of all MVCC keys up to a given revision. + // It only iterates "key" bucket in backend storage. + HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) + // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. + Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) + // MoveLeader requests current leader node to transfer its leadership to transferee. + MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) +} + +type maintenanceClient struct { + cc *grpc.ClientConn +} + +func NewMaintenanceClient(cc *grpc.ClientConn) MaintenanceClient { + return &maintenanceClient{cc} +} + +func (c *maintenanceClient) Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) { + out := new(AlarmResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Alarm", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { + out := new(StatusResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Status", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) { + out := new(DefragmentResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Defragment", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) { + out := new(HashResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Hash", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) { + out := new(HashKVResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/HashKV", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Maintenance_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Maintenance/Snapshot", opts...) + if err != nil { + return nil, err + } + x := &maintenanceSnapshotClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Maintenance_SnapshotClient interface { + Recv() (*SnapshotResponse, error) + grpc.ClientStream +} + +type maintenanceSnapshotClient struct { + grpc.ClientStream +} + +func (x *maintenanceSnapshotClient) Recv() (*SnapshotResponse, error) { + m := new(SnapshotResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *maintenanceClient) MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) { + out := new(MoveLeaderResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/MoveLeader", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Maintenance service + +type MaintenanceServer interface { + // Alarm activates, deactivates, and queries alarms regarding cluster health. + Alarm(context.Context, *AlarmRequest) (*AlarmResponse, error) + // Status gets the status of the member. + Status(context.Context, *StatusRequest) (*StatusResponse, error) + // Defragment defragments a member's backend database to recover storage space. + Defragment(context.Context, *DefragmentRequest) (*DefragmentResponse, error) + // Hash computes the hash of whole backend keyspace, + // including key, lease, and other buckets in storage. + // This is designed for testing ONLY! + // Do not rely on this in production with ongoing transactions, + // since Hash operation does not hold MVCC locks. + // Use "HashKV" API instead for "key" bucket consistency checks. + Hash(context.Context, *HashRequest) (*HashResponse, error) + // HashKV computes the hash of all MVCC keys up to a given revision. + // It only iterates "key" bucket in backend storage. + HashKV(context.Context, *HashKVRequest) (*HashKVResponse, error) + // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. + Snapshot(*SnapshotRequest, Maintenance_SnapshotServer) error + // MoveLeader requests current leader node to transfer its leadership to transferee. + MoveLeader(context.Context, *MoveLeaderRequest) (*MoveLeaderResponse, error) +} + +func RegisterMaintenanceServer(s *grpc.Server, srv MaintenanceServer) { + s.RegisterService(&_Maintenance_serviceDesc, srv) +} + +func _Maintenance_Alarm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AlarmRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).Alarm(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/Alarm", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).Alarm(ctx, req.(*AlarmRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).Status(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/Status", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).Status(ctx, req.(*StatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_Defragment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DefragmentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).Defragment(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/Defragment", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).Defragment(ctx, req.(*DefragmentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_Hash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HashRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).Hash(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/Hash", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).Hash(ctx, req.(*HashRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_HashKV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HashKVRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).HashKV(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/HashKV", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).HashKV(ctx, req.(*HashKVRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_Snapshot_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SnapshotRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(MaintenanceServer).Snapshot(m, &maintenanceSnapshotServer{stream}) +} + +type Maintenance_SnapshotServer interface { + Send(*SnapshotResponse) error + grpc.ServerStream +} + +type maintenanceSnapshotServer struct { + grpc.ServerStream +} + +func (x *maintenanceSnapshotServer) Send(m *SnapshotResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Maintenance_MoveLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MoveLeaderRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).MoveLeader(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/MoveLeader", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).MoveLeader(ctx, req.(*MoveLeaderRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Maintenance_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Maintenance", + HandlerType: (*MaintenanceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Alarm", + Handler: _Maintenance_Alarm_Handler, + }, + { + MethodName: "Status", + Handler: _Maintenance_Status_Handler, + }, + { + MethodName: "Defragment", + Handler: _Maintenance_Defragment_Handler, + }, + { + MethodName: "Hash", + Handler: _Maintenance_Hash_Handler, + }, + { + MethodName: "HashKV", + Handler: _Maintenance_HashKV_Handler, + }, + { + MethodName: "MoveLeader", + Handler: _Maintenance_MoveLeader_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Snapshot", + Handler: _Maintenance_Snapshot_Handler, + ServerStreams: true, + }, + }, + Metadata: "rpc.proto", +} + +// Client API for Auth service + +type AuthClient interface { + // AuthEnable enables authentication. + AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) + // AuthDisable disables authentication. + AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) + // Authenticate processes an authenticate request. + Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) + // UserAdd adds a new user. User name cannot be empty. + UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) + // UserGet gets detailed user information. + UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) + // UserList gets a list of all users. + UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) + // UserDelete deletes a specified user. + UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) + // UserChangePassword changes the password of a specified user. + UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) + // UserGrant grants a role to a specified user. + UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) + // UserRevokeRole revokes a role of specified user. + UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) + // RoleAdd adds a new role. Role name cannot be empty. + RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) + // RoleGet gets detailed role information. + RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) + // RoleList gets lists of all roles. + RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) + // RoleDelete deletes a specified role. + RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) + // RoleGrantPermission grants a permission of a specified key or range to a specified role. + RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) + // RoleRevokePermission revokes a key or range permission of a specified role. + RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) +} + +type authClient struct { + cc *grpc.ClientConn +} + +func NewAuthClient(cc *grpc.ClientConn) AuthClient { + return &authClient{cc} +} + +func (c *authClient) AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) { + out := new(AuthEnableResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/AuthEnable", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) { + out := new(AuthDisableResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/AuthDisable", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) { + out := new(AuthenticateResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/Authenticate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) { + out := new(AuthUserAddResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserAdd", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) { + out := new(AuthUserGetResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserGet", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) { + out := new(AuthUserListResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserList", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) { + out := new(AuthUserDeleteResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserDelete", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) { + out := new(AuthUserChangePasswordResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserChangePassword", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) { + out := new(AuthUserGrantRoleResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserGrantRole", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) { + out := new(AuthUserRevokeRoleResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserRevokeRole", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) { + out := new(AuthRoleAddResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleAdd", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) { + out := new(AuthRoleGetResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleGet", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) { + out := new(AuthRoleListResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleList", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) { + out := new(AuthRoleDeleteResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleDelete", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) { + out := new(AuthRoleGrantPermissionResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleGrantPermission", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) { + out := new(AuthRoleRevokePermissionResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleRevokePermission", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Auth service + +type AuthServer interface { + // AuthEnable enables authentication. + AuthEnable(context.Context, *AuthEnableRequest) (*AuthEnableResponse, error) + // AuthDisable disables authentication. + AuthDisable(context.Context, *AuthDisableRequest) (*AuthDisableResponse, error) + // Authenticate processes an authenticate request. + Authenticate(context.Context, *AuthenticateRequest) (*AuthenticateResponse, error) + // UserAdd adds a new user. User name cannot be empty. + UserAdd(context.Context, *AuthUserAddRequest) (*AuthUserAddResponse, error) + // UserGet gets detailed user information. + UserGet(context.Context, *AuthUserGetRequest) (*AuthUserGetResponse, error) + // UserList gets a list of all users. + UserList(context.Context, *AuthUserListRequest) (*AuthUserListResponse, error) + // UserDelete deletes a specified user. + UserDelete(context.Context, *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error) + // UserChangePassword changes the password of a specified user. + UserChangePassword(context.Context, *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error) + // UserGrant grants a role to a specified user. + UserGrantRole(context.Context, *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error) + // UserRevokeRole revokes a role of specified user. + UserRevokeRole(context.Context, *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error) + // RoleAdd adds a new role. Role name cannot be empty. + RoleAdd(context.Context, *AuthRoleAddRequest) (*AuthRoleAddResponse, error) + // RoleGet gets detailed role information. + RoleGet(context.Context, *AuthRoleGetRequest) (*AuthRoleGetResponse, error) + // RoleList gets lists of all roles. + RoleList(context.Context, *AuthRoleListRequest) (*AuthRoleListResponse, error) + // RoleDelete deletes a specified role. + RoleDelete(context.Context, *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error) + // RoleGrantPermission grants a permission of a specified key or range to a specified role. + RoleGrantPermission(context.Context, *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error) + // RoleRevokePermission revokes a key or range permission of a specified role. + RoleRevokePermission(context.Context, *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error) +} + +func RegisterAuthServer(s *grpc.Server, srv AuthServer) { + s.RegisterService(&_Auth_serviceDesc, srv) +} + +func _Auth_AuthEnable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthEnableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).AuthEnable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/AuthEnable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).AuthEnable(ctx, req.(*AuthEnableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_AuthDisable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthDisableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).AuthDisable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/AuthDisable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).AuthDisable(ctx, req.(*AuthDisableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_Authenticate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthenticateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).Authenticate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/Authenticate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).Authenticate(ctx, req.(*AuthenticateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserAddRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserAdd(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserAdd", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserAdd(ctx, req.(*AuthUserAddRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserGetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserGet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserGet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserGet(ctx, req.(*AuthUserGetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserList(ctx, req.(*AuthUserListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserDelete(ctx, req.(*AuthUserDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserChangePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserChangePasswordRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserChangePassword(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserChangePassword", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserChangePassword(ctx, req.(*AuthUserChangePasswordRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserGrantRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserGrantRoleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserGrantRole(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserGrantRole", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserGrantRole(ctx, req.(*AuthUserGrantRoleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserRevokeRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserRevokeRoleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserRevokeRole(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserRevokeRole", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserRevokeRole(ctx, req.(*AuthUserRevokeRoleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleAddRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleAdd(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleAdd", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleAdd(ctx, req.(*AuthRoleAddRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleGetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleGet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleGet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleGet(ctx, req.(*AuthRoleGetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleList(ctx, req.(*AuthRoleListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleDelete(ctx, req.(*AuthRoleDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleGrantPermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleGrantPermissionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleGrantPermission(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleGrantPermission", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleGrantPermission(ctx, req.(*AuthRoleGrantPermissionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleRevokePermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleRevokePermissionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleRevokePermission(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleRevokePermission", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleRevokePermission(ctx, req.(*AuthRoleRevokePermissionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Auth_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Auth", + HandlerType: (*AuthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AuthEnable", + Handler: _Auth_AuthEnable_Handler, + }, + { + MethodName: "AuthDisable", + Handler: _Auth_AuthDisable_Handler, + }, + { + MethodName: "Authenticate", + Handler: _Auth_Authenticate_Handler, + }, + { + MethodName: "UserAdd", + Handler: _Auth_UserAdd_Handler, + }, + { + MethodName: "UserGet", + Handler: _Auth_UserGet_Handler, + }, + { + MethodName: "UserList", + Handler: _Auth_UserList_Handler, + }, + { + MethodName: "UserDelete", + Handler: _Auth_UserDelete_Handler, + }, + { + MethodName: "UserChangePassword", + Handler: _Auth_UserChangePassword_Handler, + }, + { + MethodName: "UserGrantRole", + Handler: _Auth_UserGrantRole_Handler, + }, + { + MethodName: "UserRevokeRole", + Handler: _Auth_UserRevokeRole_Handler, + }, + { + MethodName: "RoleAdd", + Handler: _Auth_RoleAdd_Handler, + }, + { + MethodName: "RoleGet", + Handler: _Auth_RoleGet_Handler, + }, + { + MethodName: "RoleList", + Handler: _Auth_RoleList_Handler, + }, + { + MethodName: "RoleDelete", + Handler: _Auth_RoleDelete_Handler, + }, + { + MethodName: "RoleGrantPermission", + Handler: _Auth_RoleGrantPermission_Handler, + }, + { + MethodName: "RoleRevokePermission", + Handler: _Auth_RoleRevokePermission_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "rpc.proto", +} + +func (m *ResponseHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseHeader) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ClusterId != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ClusterId)) + } + if m.MemberId != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MemberId)) + } + if m.Revision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) + } + if m.RaftTerm != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm)) + } + return i, nil +} + +func (m *RangeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RangeRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) + } + if m.Limit != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) + } + if m.Revision != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) + } + if m.SortOrder != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.SortOrder)) + } + if m.SortTarget != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.SortTarget)) + } + if m.Serializable { + dAtA[i] = 0x38 + i++ + if m.Serializable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.KeysOnly { + dAtA[i] = 0x40 + i++ + if m.KeysOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.CountOnly { + dAtA[i] = 0x48 + i++ + if m.CountOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.MinModRevision != 0 { + dAtA[i] = 0x50 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MinModRevision)) + } + if m.MaxModRevision != 0 { + dAtA[i] = 0x58 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MaxModRevision)) + } + if m.MinCreateRevision != 0 { + dAtA[i] = 0x60 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MinCreateRevision)) + } + if m.MaxCreateRevision != 0 { + dAtA[i] = 0x68 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MaxCreateRevision)) + } + return i, nil +} + +func (m *RangeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RangeResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n1, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if len(m.Kvs) > 0 { + for _, msg := range m.Kvs { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.More { + dAtA[i] = 0x18 + i++ + if m.More { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Count != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Count)) + } + return i, nil +} + +func (m *PutRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PutRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + if m.Lease != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Lease)) + } + if m.PrevKv { + dAtA[i] = 0x20 + i++ + if m.PrevKv { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.IgnoreValue { + dAtA[i] = 0x28 + i++ + if m.IgnoreValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.IgnoreLease { + dAtA[i] = 0x30 + i++ + if m.IgnoreLease { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *PutResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PutResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n2, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.PrevKv != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.PrevKv.Size())) + n3, err := m.PrevKv.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} + +func (m *DeleteRangeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteRangeRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) + } + if m.PrevKv { + dAtA[i] = 0x18 + i++ + if m.PrevKv { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *DeleteRangeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteRangeResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n4, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.Deleted != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Deleted)) + } + if len(m.PrevKvs) > 0 { + for _, msg := range m.PrevKvs { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RequestOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestOp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Request != nil { + nn5, err := m.Request.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn5 + } + return i, nil +} + +func (m *RequestOp_RequestRange) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.RequestRange != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RequestRange.Size())) + n6, err := m.RequestRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} +func (m *RequestOp_RequestPut) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.RequestPut != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RequestPut.Size())) + n7, err := m.RequestPut.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} +func (m *RequestOp_RequestDeleteRange) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.RequestDeleteRange != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RequestDeleteRange.Size())) + n8, err := m.RequestDeleteRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} +func (m *RequestOp_RequestTxn) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.RequestTxn != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RequestTxn.Size())) + n9, err := m.RequestTxn.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} +func (m *ResponseOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseOp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Response != nil { + nn10, err := m.Response.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn10 + } + return i, nil +} + +func (m *ResponseOp_ResponseRange) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ResponseRange != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ResponseRange.Size())) + n11, err := m.ResponseRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + return i, nil +} +func (m *ResponseOp_ResponsePut) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ResponsePut != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ResponsePut.Size())) + n12, err := m.ResponsePut.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} +func (m *ResponseOp_ResponseDeleteRange) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ResponseDeleteRange != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ResponseDeleteRange.Size())) + n13, err := m.ResponseDeleteRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + return i, nil +} +func (m *ResponseOp_ResponseTxn) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ResponseTxn != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ResponseTxn.Size())) + n14, err := m.ResponseTxn.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + return i, nil +} +func (m *Compare) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Compare) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Result != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Result)) + } + if m.Target != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Target)) + } + if len(m.Key) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if m.TargetUnion != nil { + nn15, err := m.TargetUnion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn15 + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x4 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) + } + return i, nil +} + +func (m *Compare_Version) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Version)) + return i, nil +} +func (m *Compare_CreateRevision) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x28 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CreateRevision)) + return i, nil +} +func (m *Compare_ModRevision) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x30 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ModRevision)) + return i, nil +} +func (m *Compare_Value) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Value != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} +func (m *Compare_Lease) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x40 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Lease)) + return i, nil +} +func (m *TxnRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TxnRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Compare) > 0 { + for _, msg := range m.Compare { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Success) > 0 { + for _, msg := range m.Success { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Failure) > 0 { + for _, msg := range m.Failure { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *TxnResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TxnResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n16, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if m.Succeeded { + dAtA[i] = 0x10 + i++ + if m.Succeeded { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Responses) > 0 { + for _, msg := range m.Responses { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CompactionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompactionRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Revision != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) + } + if m.Physical { + dAtA[i] = 0x10 + i++ + if m.Physical { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *CompactionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompactionResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n17, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + return i, nil +} + +func (m *HashRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *HashKVRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashKVRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Revision != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) + } + return i, nil +} + +func (m *HashKVResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashKVResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n18, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + if m.Hash != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Hash)) + } + if m.CompactRevision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) + } + return i, nil +} + +func (m *HashResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n19, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + if m.Hash != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Hash)) + } + return i, nil +} + +func (m *SnapshotRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *SnapshotResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n20, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + if m.RemainingBytes != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RemainingBytes)) + } + if len(m.Blob) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Blob))) + i += copy(dAtA[i:], m.Blob) + } + return i, nil +} + +func (m *WatchRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RequestUnion != nil { + nn21, err := m.RequestUnion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn21 + } + return i, nil +} + +func (m *WatchRequest_CreateRequest) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.CreateRequest != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CreateRequest.Size())) + n22, err := m.CreateRequest.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + return i, nil +} +func (m *WatchRequest_CancelRequest) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.CancelRequest != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CancelRequest.Size())) + n23, err := m.CancelRequest.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + } + return i, nil +} +func (m *WatchRequest_ProgressRequest) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ProgressRequest != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ProgressRequest.Size())) + n24, err := m.ProgressRequest.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + return i, nil +} +func (m *WatchCreateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchCreateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) + } + if m.StartRevision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.StartRevision)) + } + if m.ProgressNotify { + dAtA[i] = 0x20 + i++ + if m.ProgressNotify { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Filters) > 0 { + dAtA26 := make([]byte, len(m.Filters)*10) + var j25 int + for _, num := range m.Filters { + for num >= 1<<7 { + dAtA26[j25] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j25++ + } + dAtA26[j25] = uint8(num) + j25++ + } + dAtA[i] = 0x2a + i++ + i = encodeVarintRpc(dAtA, i, uint64(j25)) + i += copy(dAtA[i:], dAtA26[:j25]) + } + if m.PrevKv { + dAtA[i] = 0x30 + i++ + if m.PrevKv { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.WatchId != 0 { + dAtA[i] = 0x38 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) + } + if m.Fragment { + dAtA[i] = 0x40 + i++ + if m.Fragment { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *WatchCancelRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchCancelRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.WatchId != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) + } + return i, nil +} + +func (m *WatchProgressRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchProgressRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *WatchResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n27, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + if m.WatchId != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) + } + if m.Created { + dAtA[i] = 0x18 + i++ + if m.Created { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Canceled { + dAtA[i] = 0x20 + i++ + if m.Canceled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.CompactRevision != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) + } + if len(m.CancelReason) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason))) + i += copy(dAtA[i:], m.CancelReason) + } + if m.Fragment { + dAtA[i] = 0x38 + i++ + if m.Fragment { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Events) > 0 { + for _, msg := range m.Events { + dAtA[i] = 0x5a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LeaseGrantRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseGrantRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.TTL != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) + } + if m.ID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + return i, nil +} + +func (m *LeaseGrantResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseGrantResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n28, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + if m.ID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if m.TTL != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) + } + if len(m.Error) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + } + return i, nil +} + +func (m *LeaseRevokeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseRevokeRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + return i, nil +} + +func (m *LeaseRevokeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseRevokeResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n29, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + } + return i, nil +} + +func (m *LeaseCheckpoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseCheckpoint) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if m.Remaining_TTL != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Remaining_TTL)) + } + return i, nil +} + +func (m *LeaseCheckpointRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseCheckpointRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Checkpoints) > 0 { + for _, msg := range m.Checkpoints { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LeaseCheckpointResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseCheckpointResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n30, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + return i, nil +} + +func (m *LeaseKeepAliveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseKeepAliveRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + return i, nil +} + +func (m *LeaseKeepAliveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseKeepAliveResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n31, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + if m.ID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if m.TTL != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) + } + return i, nil +} + +func (m *LeaseTimeToLiveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseTimeToLiveRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if m.Keys { + dAtA[i] = 0x10 + i++ + if m.Keys { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *LeaseTimeToLiveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseTimeToLiveResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n32, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } + if m.ID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if m.TTL != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) + } + if m.GrantedTTL != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.GrantedTTL)) + } + if len(m.Keys) > 0 { + for _, b := range m.Keys { + dAtA[i] = 0x2a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(b))) + i += copy(dAtA[i:], b) + } + } + return i, nil +} + +func (m *LeaseLeasesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseLeasesRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *LeaseStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + return i, nil +} + +func (m *LeaseLeasesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseLeasesResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n33, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + } + if len(m.Leases) > 0 { + for _, msg := range m.Leases { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Member) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Member) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if len(m.Name) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.ClientURLs) > 0 { + for _, s := range m.ClientURLs { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.IsLearner { + dAtA[i] = 0x28 + i++ + if m.IsLearner { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *MemberAddRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberAddRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.IsLearner { + dAtA[i] = 0x10 + i++ + if m.IsLearner { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *MemberAddResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n34, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + } + if m.Member != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Member.Size())) + n35, err := m.Member.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *MemberRemoveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberRemoveRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + return i, nil +} + +func (m *MemberRemoveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n36, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *MemberUpdateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberUpdateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *MemberUpdateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n37, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *MemberListRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberListRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *MemberListResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberListResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n38, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *MemberPromoteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberPromoteRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + return i, nil +} + +func (m *MemberPromoteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberPromoteResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n39, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DefragmentRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DefragmentRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *DefragmentResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DefragmentResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n40, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + } + return i, nil +} + +func (m *MoveLeaderRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MoveLeaderRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.TargetID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TargetID)) + } + return i, nil +} + +func (m *MoveLeaderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MoveLeaderResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n41, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + } + return i, nil +} + +func (m *AlarmRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlarmRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Action != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Action)) + } + if m.MemberID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MemberID)) + } + if m.Alarm != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Alarm)) + } + return i, nil +} + +func (m *AlarmMember) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlarmMember) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.MemberID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MemberID)) + } + if m.Alarm != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Alarm)) + } + return i, nil +} + +func (m *AlarmResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlarmResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n42, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n42 + } + if len(m.Alarms) > 0 { + for _, msg := range m.Alarms { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *StatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *StatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n43, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 + } + if len(m.Version) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + } + if m.DbSize != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.DbSize)) + } + if m.Leader != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Leader)) + } + if m.RaftIndex != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RaftIndex)) + } + if m.RaftTerm != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm)) + } + if m.RaftAppliedIndex != 0 { + dAtA[i] = 0x38 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RaftAppliedIndex)) + } + if len(m.Errors) > 0 { + for _, s := range m.Errors { + dAtA[i] = 0x42 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.DbSizeInUse != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.DbSizeInUse)) + } + if m.IsLearner { + dAtA[i] = 0x50 + i++ + if m.IsLearner { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *AuthEnableRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthEnableRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *AuthDisableRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthDisableRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *AuthenticateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthenticateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Password) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) + } + return i, nil +} + +func (m *AuthUserAddRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserAddRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Password) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) + } + if m.Options != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Options.Size())) + n44, err := m.Options.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n44 + } + return i, nil +} + +func (m *AuthUserGetRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserGetRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + return i, nil +} + +func (m *AuthUserDeleteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserDeleteRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + return i, nil +} + +func (m *AuthUserChangePasswordRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserChangePasswordRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Password) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) + } + return i, nil +} + +func (m *AuthUserGrantRoleRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserGrantRoleRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.User) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + } + if len(m.Role) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + } + return i, nil +} + +func (m *AuthUserRevokeRoleRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserRevokeRoleRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Role) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + } + return i, nil +} + +func (m *AuthRoleAddRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleAddRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + return i, nil +} + +func (m *AuthRoleGetRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleGetRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Role) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + } + return i, nil +} + +func (m *AuthUserListRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserListRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *AuthRoleListRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleListRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *AuthRoleDeleteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleDeleteRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Role) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + } + return i, nil +} + +func (m *AuthRoleGrantPermissionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleGrantPermissionRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Perm != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Perm.Size())) + n45, err := m.Perm.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n45 + } + return i, nil +} + +func (m *AuthRoleRevokePermissionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleRevokePermissionRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Role) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + } + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) + } + return i, nil +} + +func (m *AuthEnableResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthEnableResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n46, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n46 + } + return i, nil +} + +func (m *AuthDisableResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthDisableResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n47, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n47 + } + return i, nil +} + +func (m *AuthenticateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthenticateResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n48, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n48 + } + if len(m.Token) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Token))) + i += copy(dAtA[i:], m.Token) + } + return i, nil +} + +func (m *AuthUserAddResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserAddResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n49, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n49 + } + return i, nil +} + +func (m *AuthUserGetResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserGetResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n50, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n50 + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *AuthUserDeleteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserDeleteResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n51, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n51 + } + return i, nil +} + +func (m *AuthUserChangePasswordResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserChangePasswordResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n52, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n52 + } + return i, nil +} + +func (m *AuthUserGrantRoleResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserGrantRoleResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n53, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n53 + } + return i, nil +} + +func (m *AuthUserRevokeRoleResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserRevokeRoleResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n54, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n54 + } + return i, nil +} + +func (m *AuthRoleAddResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleAddResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n55, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n55 + } + return i, nil +} + +func (m *AuthRoleGetResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleGetResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n56, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n56 + } + if len(m.Perm) > 0 { + for _, msg := range m.Perm { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *AuthRoleListResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleListResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n57, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n57 + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *AuthUserListResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserListResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n58, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n58 + } + if len(m.Users) > 0 { + for _, s := range m.Users { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *AuthRoleDeleteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleDeleteResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n59, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n59 + } + return i, nil +} + +func (m *AuthRoleGrantPermissionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleGrantPermissionResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n60, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n60 + } + return i, nil +} + +func (m *AuthRoleRevokePermissionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleRevokePermissionResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n61, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n61 + } + return i, nil +} + +func encodeVarintRpc(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *ResponseHeader) Size() (n int) { + var l int + _ = l + if m.ClusterId != 0 { + n += 1 + sovRpc(uint64(m.ClusterId)) + } + if m.MemberId != 0 { + n += 1 + sovRpc(uint64(m.MemberId)) + } + if m.Revision != 0 { + n += 1 + sovRpc(uint64(m.Revision)) + } + if m.RaftTerm != 0 { + n += 1 + sovRpc(uint64(m.RaftTerm)) + } + return n +} + +func (m *RangeRequest) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.Limit != 0 { + n += 1 + sovRpc(uint64(m.Limit)) + } + if m.Revision != 0 { + n += 1 + sovRpc(uint64(m.Revision)) + } + if m.SortOrder != 0 { + n += 1 + sovRpc(uint64(m.SortOrder)) + } + if m.SortTarget != 0 { + n += 1 + sovRpc(uint64(m.SortTarget)) + } + if m.Serializable { + n += 2 + } + if m.KeysOnly { + n += 2 + } + if m.CountOnly { + n += 2 + } + if m.MinModRevision != 0 { + n += 1 + sovRpc(uint64(m.MinModRevision)) + } + if m.MaxModRevision != 0 { + n += 1 + sovRpc(uint64(m.MaxModRevision)) + } + if m.MinCreateRevision != 0 { + n += 1 + sovRpc(uint64(m.MinCreateRevision)) + } + if m.MaxCreateRevision != 0 { + n += 1 + sovRpc(uint64(m.MaxCreateRevision)) + } + return n +} + +func (m *RangeResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Kvs) > 0 { + for _, e := range m.Kvs { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.More { + n += 2 + } + if m.Count != 0 { + n += 1 + sovRpc(uint64(m.Count)) + } + return n +} + +func (m *PutRequest) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.Lease != 0 { + n += 1 + sovRpc(uint64(m.Lease)) + } + if m.PrevKv { + n += 2 + } + if m.IgnoreValue { + n += 2 + } + if m.IgnoreLease { + n += 2 + } + return n +} + +func (m *PutResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.PrevKv != nil { + l = m.PrevKv.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *DeleteRangeRequest) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.PrevKv { + n += 2 + } + return n +} + +func (m *DeleteRangeResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.Deleted != 0 { + n += 1 + sovRpc(uint64(m.Deleted)) + } + if len(m.PrevKvs) > 0 { + for _, e := range m.PrevKvs { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *RequestOp) Size() (n int) { + var l int + _ = l + if m.Request != nil { + n += m.Request.Size() + } + return n +} + +func (m *RequestOp_RequestRange) Size() (n int) { + var l int + _ = l + if m.RequestRange != nil { + l = m.RequestRange.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *RequestOp_RequestPut) Size() (n int) { + var l int + _ = l + if m.RequestPut != nil { + l = m.RequestPut.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *RequestOp_RequestDeleteRange) Size() (n int) { + var l int + _ = l + if m.RequestDeleteRange != nil { + l = m.RequestDeleteRange.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *RequestOp_RequestTxn) Size() (n int) { + var l int + _ = l + if m.RequestTxn != nil { + l = m.RequestTxn.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *ResponseOp) Size() (n int) { + var l int + _ = l + if m.Response != nil { + n += m.Response.Size() + } + return n +} + +func (m *ResponseOp_ResponseRange) Size() (n int) { + var l int + _ = l + if m.ResponseRange != nil { + l = m.ResponseRange.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *ResponseOp_ResponsePut) Size() (n int) { + var l int + _ = l + if m.ResponsePut != nil { + l = m.ResponsePut.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *ResponseOp_ResponseDeleteRange) Size() (n int) { + var l int + _ = l + if m.ResponseDeleteRange != nil { + l = m.ResponseDeleteRange.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *ResponseOp_ResponseTxn) Size() (n int) { + var l int + _ = l + if m.ResponseTxn != nil { + l = m.ResponseTxn.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *Compare) Size() (n int) { + var l int + _ = l + if m.Result != 0 { + n += 1 + sovRpc(uint64(m.Result)) + } + if m.Target != 0 { + n += 1 + sovRpc(uint64(m.Target)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.TargetUnion != nil { + n += m.TargetUnion.Size() + } + l = len(m.RangeEnd) + if l > 0 { + n += 2 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *Compare_Version) Size() (n int) { + var l int + _ = l + n += 1 + sovRpc(uint64(m.Version)) + return n +} +func (m *Compare_CreateRevision) Size() (n int) { + var l int + _ = l + n += 1 + sovRpc(uint64(m.CreateRevision)) + return n +} +func (m *Compare_ModRevision) Size() (n int) { + var l int + _ = l + n += 1 + sovRpc(uint64(m.ModRevision)) + return n +} +func (m *Compare_Value) Size() (n int) { + var l int + _ = l + if m.Value != nil { + l = len(m.Value) + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *Compare_Lease) Size() (n int) { + var l int + _ = l + n += 1 + sovRpc(uint64(m.Lease)) + return n +} +func (m *TxnRequest) Size() (n int) { + var l int + _ = l + if len(m.Compare) > 0 { + for _, e := range m.Compare { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if len(m.Success) > 0 { + for _, e := range m.Success { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if len(m.Failure) > 0 { + for _, e := range m.Failure { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *TxnResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.Succeeded { + n += 2 + } + if len(m.Responses) > 0 { + for _, e := range m.Responses { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *CompactionRequest) Size() (n int) { + var l int + _ = l + if m.Revision != 0 { + n += 1 + sovRpc(uint64(m.Revision)) + } + if m.Physical { + n += 2 + } + return n +} + +func (m *CompactionResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *HashRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *HashKVRequest) Size() (n int) { + var l int + _ = l + if m.Revision != 0 { + n += 1 + sovRpc(uint64(m.Revision)) + } + return n +} + +func (m *HashKVResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.Hash != 0 { + n += 1 + sovRpc(uint64(m.Hash)) + } + if m.CompactRevision != 0 { + n += 1 + sovRpc(uint64(m.CompactRevision)) + } + return n +} + +func (m *HashResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.Hash != 0 { + n += 1 + sovRpc(uint64(m.Hash)) + } + return n +} + +func (m *SnapshotRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *SnapshotResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.RemainingBytes != 0 { + n += 1 + sovRpc(uint64(m.RemainingBytes)) + } + l = len(m.Blob) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *WatchRequest) Size() (n int) { + var l int + _ = l + if m.RequestUnion != nil { + n += m.RequestUnion.Size() + } + return n +} + +func (m *WatchRequest_CreateRequest) Size() (n int) { + var l int + _ = l + if m.CreateRequest != nil { + l = m.CreateRequest.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *WatchRequest_CancelRequest) Size() (n int) { + var l int + _ = l + if m.CancelRequest != nil { + l = m.CancelRequest.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *WatchRequest_ProgressRequest) Size() (n int) { + var l int + _ = l + if m.ProgressRequest != nil { + l = m.ProgressRequest.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *WatchCreateRequest) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.StartRevision != 0 { + n += 1 + sovRpc(uint64(m.StartRevision)) + } + if m.ProgressNotify { + n += 2 + } + if len(m.Filters) > 0 { + l = 0 + for _, e := range m.Filters { + l += sovRpc(uint64(e)) + } + n += 1 + sovRpc(uint64(l)) + l + } + if m.PrevKv { + n += 2 + } + if m.WatchId != 0 { + n += 1 + sovRpc(uint64(m.WatchId)) + } + if m.Fragment { + n += 2 + } + return n +} + +func (m *WatchCancelRequest) Size() (n int) { + var l int + _ = l + if m.WatchId != 0 { + n += 1 + sovRpc(uint64(m.WatchId)) + } + return n +} + +func (m *WatchProgressRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *WatchResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.WatchId != 0 { + n += 1 + sovRpc(uint64(m.WatchId)) + } + if m.Created { + n += 2 + } + if m.Canceled { + n += 2 + } + if m.CompactRevision != 0 { + n += 1 + sovRpc(uint64(m.CompactRevision)) + } + l = len(m.CancelReason) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.Fragment { + n += 2 + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *LeaseGrantRequest) Size() (n int) { + var l int + _ = l + if m.TTL != 0 { + n += 1 + sovRpc(uint64(m.TTL)) + } + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + return n +} + +func (m *LeaseGrantResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.TTL != 0 { + n += 1 + sovRpc(uint64(m.TTL)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *LeaseRevokeRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + return n +} + +func (m *LeaseRevokeResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *LeaseCheckpoint) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.Remaining_TTL != 0 { + n += 1 + sovRpc(uint64(m.Remaining_TTL)) + } + return n +} + +func (m *LeaseCheckpointRequest) Size() (n int) { + var l int + _ = l + if len(m.Checkpoints) > 0 { + for _, e := range m.Checkpoints { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *LeaseCheckpointResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *LeaseKeepAliveRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + return n +} + +func (m *LeaseKeepAliveResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.TTL != 0 { + n += 1 + sovRpc(uint64(m.TTL)) + } + return n +} + +func (m *LeaseTimeToLiveRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.Keys { + n += 2 + } + return n +} + +func (m *LeaseTimeToLiveResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.TTL != 0 { + n += 1 + sovRpc(uint64(m.TTL)) + } + if m.GrantedTTL != 0 { + n += 1 + sovRpc(uint64(m.GrantedTTL)) + } + if len(m.Keys) > 0 { + for _, b := range m.Keys { + l = len(b) + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *LeaseLeasesRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *LeaseStatus) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + return n +} + +func (m *LeaseLeasesResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Leases) > 0 { + for _, e := range m.Leases { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *Member) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if len(m.ClientURLs) > 0 { + for _, s := range m.ClientURLs { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.IsLearner { + n += 2 + } + return n +} + +func (m *MemberAddRequest) Size() (n int) { + var l int + _ = l + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.IsLearner { + n += 2 + } + return n +} + +func (m *MemberAddResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.Member != nil { + l = m.Member.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *MemberRemoveRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + return n +} + +func (m *MemberRemoveResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *MemberUpdateRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *MemberUpdateResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *MemberListRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *MemberListResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *MemberPromoteRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + return n +} + +func (m *MemberPromoteResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *DefragmentRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *DefragmentResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *MoveLeaderRequest) Size() (n int) { + var l int + _ = l + if m.TargetID != 0 { + n += 1 + sovRpc(uint64(m.TargetID)) + } + return n +} + +func (m *MoveLeaderResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AlarmRequest) Size() (n int) { + var l int + _ = l + if m.Action != 0 { + n += 1 + sovRpc(uint64(m.Action)) + } + if m.MemberID != 0 { + n += 1 + sovRpc(uint64(m.MemberID)) + } + if m.Alarm != 0 { + n += 1 + sovRpc(uint64(m.Alarm)) + } + return n +} + +func (m *AlarmMember) Size() (n int) { + var l int + _ = l + if m.MemberID != 0 { + n += 1 + sovRpc(uint64(m.MemberID)) + } + if m.Alarm != 0 { + n += 1 + sovRpc(uint64(m.Alarm)) + } + return n +} + +func (m *AlarmResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Alarms) > 0 { + for _, e := range m.Alarms { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *StatusRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *StatusResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.DbSize != 0 { + n += 1 + sovRpc(uint64(m.DbSize)) + } + if m.Leader != 0 { + n += 1 + sovRpc(uint64(m.Leader)) + } + if m.RaftIndex != 0 { + n += 1 + sovRpc(uint64(m.RaftIndex)) + } + if m.RaftTerm != 0 { + n += 1 + sovRpc(uint64(m.RaftTerm)) + } + if m.RaftAppliedIndex != 0 { + n += 1 + sovRpc(uint64(m.RaftAppliedIndex)) + } + if len(m.Errors) > 0 { + for _, s := range m.Errors { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.DbSizeInUse != 0 { + n += 1 + sovRpc(uint64(m.DbSizeInUse)) + } + if m.IsLearner { + n += 2 + } + return n +} + +func (m *AuthEnableRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *AuthDisableRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *AuthenticateRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserAddRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserGetRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserDeleteRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserChangePasswordRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserGrantRoleRequest) Size() (n int) { + var l int + _ = l + l = len(m.User) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Role) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserRevokeRoleRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Role) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleAddRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleGetRequest) Size() (n int) { + var l int + _ = l + l = len(m.Role) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserListRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *AuthRoleListRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *AuthRoleDeleteRequest) Size() (n int) { + var l int + _ = l + l = len(m.Role) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleGrantPermissionRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.Perm != nil { + l = m.Perm.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleRevokePermissionRequest) Size() (n int) { + var l int + _ = l + l = len(m.Role) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthEnableResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthDisableResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthenticateResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Token) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserAddResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserGetResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *AuthUserDeleteResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserChangePasswordResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserGrantRoleResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserRevokeRoleResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleAddResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleGetResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Perm) > 0 { + for _, e := range m.Perm { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *AuthRoleListResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *AuthUserListResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Users) > 0 { + for _, s := range m.Users { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *AuthRoleDeleteResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleGrantPermissionResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleRevokePermissionResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func sovRpc(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozRpc(x uint64) (n int) { + return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ResponseHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) + } + m.ClusterId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ClusterId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemberId", wireType) + } + m.MemberId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemberId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType) + } + m.RaftTerm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftTerm |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RangeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RangeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SortOrder", wireType) + } + m.SortOrder = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SortOrder |= (RangeRequest_SortOrder(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SortTarget", wireType) + } + m.SortTarget = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SortTarget |= (RangeRequest_SortTarget(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Serializable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Serializable = bool(v != 0) + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeysOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.KeysOnly = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CountOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CountOnly = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinModRevision", wireType) + } + m.MinModRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinModRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxModRevision", wireType) + } + m.MaxModRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxModRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinCreateRevision", wireType) + } + m.MinCreateRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinCreateRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxCreateRevision", wireType) + } + m.MaxCreateRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxCreateRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RangeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RangeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RangeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kvs = append(m.Kvs, &mvccpb.KeyValue{}) + if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field More", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.More = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PutRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PutRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PutRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + m.Lease = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lease |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PrevKv = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IgnoreValue = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreLease", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IgnoreLease = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PutResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PutResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PutResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PrevKv == nil { + m.PrevKv = &mvccpb.KeyValue{} + } + if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteRangeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PrevKv = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteRangeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType) + } + m.Deleted = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Deleted |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKvs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PrevKvs = append(m.PrevKvs, &mvccpb.KeyValue{}) + if err := m.PrevKvs[len(m.PrevKvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RangeRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &RequestOp_RequestRange{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestPut", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PutRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &RequestOp_RequestPut{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestDeleteRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &DeleteRangeRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &RequestOp_RequestDeleteRange{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestTxn", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &TxnRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &RequestOp_RequestTxn{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RangeResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Response = &ResponseOp_ResponseRange{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponsePut", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PutResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Response = &ResponseOp_ResponsePut{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseDeleteRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &DeleteRangeResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Response = &ResponseOp_ResponseDeleteRange{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseTxn", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &TxnResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Response = &ResponseOp_ResponseTxn{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Compare) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Compare: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Compare: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + m.Result = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Result |= (Compare_CompareResult(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + m.Target = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Target |= (Compare_CompareTarget(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetUnion = &Compare_Version{v} + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetUnion = &Compare_CreateRevision{v} + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetUnion = &Compare_ModRevision{v} + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := make([]byte, postIndex-iNdEx) + copy(v, dAtA[iNdEx:postIndex]) + m.TargetUnion = &Compare_Value{v} + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetUnion = &Compare_Lease{v} + case 64: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TxnRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TxnRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TxnRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Compare", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Compare = append(m.Compare, &Compare{}) + if err := m.Compare[len(m.Compare)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Success = append(m.Success, &RequestOp{}) + if err := m.Success[len(m.Success)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Failure", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Failure = append(m.Failure, &RequestOp{}) + if err := m.Failure[len(m.Failure)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TxnResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TxnResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TxnResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Succeeded = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Responses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Responses = append(m.Responses, &ResponseOp{}) + if err := m.Responses[len(m.Responses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompactionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompactionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompactionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Physical", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Physical = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompactionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompactionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompactionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HashRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HashRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HashRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HashKVRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HashKVRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HashKVRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HashKVResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HashKVResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HashKVResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + m.Hash = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Hash |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType) + } + m.CompactRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompactRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HashResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HashResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HashResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + m.Hash = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Hash |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RemainingBytes", wireType) + } + m.RemainingBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RemainingBytes |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blob", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Blob = append(m.Blob[:0], dAtA[iNdEx:postIndex]...) + if m.Blob == nil { + m.Blob = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &WatchCreateRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.RequestUnion = &WatchRequest_CreateRequest{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CancelRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &WatchCancelRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.RequestUnion = &WatchRequest_CancelRequest{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &WatchProgressRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.RequestUnion = &WatchRequest_ProgressRequest{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchCreateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartRevision", wireType) + } + m.StartRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressNotify", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ProgressNotify = bool(v != 0) + case 5: + if wireType == 0 { + var v WatchCreateRequest_FilterType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Filters = append(m.Filters, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v WatchCreateRequest_FilterType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Filters = append(m.Filters, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PrevKv = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType) + } + m.WatchId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.WatchId |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Fragment = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchCancelRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchCancelRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchCancelRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType) + } + m.WatchId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.WatchId |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchProgressRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchProgressRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchProgressRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType) + } + m.WatchId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.WatchId |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Created = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Canceled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Canceled = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType) + } + m.CompactRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompactRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CancelReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Fragment = bool(v != 0) + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, &mvccpb.Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseGrantRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseGrantRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) + } + m.TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseGrantResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseGrantResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) + } + m.TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseRevokeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseRevokeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseRevokeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseRevokeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseCheckpoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseCheckpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseCheckpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Remaining_TTL", wireType) + } + m.Remaining_TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Remaining_TTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseCheckpointRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseCheckpointRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseCheckpointRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Checkpoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Checkpoints = append(m.Checkpoints, &LeaseCheckpoint{}) + if err := m.Checkpoints[len(m.Checkpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseCheckpointResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseCheckpointResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseCheckpointResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseKeepAliveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseKeepAliveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseKeepAliveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseKeepAliveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseKeepAliveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) + } + m.TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseTimeToLiveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseTimeToLiveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Keys = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseTimeToLiveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseTimeToLiveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) + } + m.TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GrantedTTL", wireType) + } + m.GrantedTTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GrantedTTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx)) + copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseLeasesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseLeasesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseLeasesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseLeasesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseLeasesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Leases = append(m.Leases, &LeaseStatus{}) + if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Member) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Member: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientURLs = append(m.ClientURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IsLearner = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberAddRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberAddRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IsLearner = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberAddResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Member == nil { + m.Member = &Member{} + } + if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberRemoveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberRemoveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberUpdateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberUpdateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberUpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberListRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberListRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberListRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberListResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberListResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberListResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberPromoteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberPromoteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberPromoteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberPromoteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberPromoteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberPromoteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DefragmentRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DefragmentRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DefragmentRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DefragmentResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DefragmentResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DefragmentResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MoveLeaderRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MoveLeaderRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MoveLeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetID", wireType) + } + m.TargetID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TargetID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MoveLeaderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MoveLeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AlarmRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AlarmRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AlarmRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= (AlarmRequest_AlarmAction(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType) + } + m.MemberID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemberID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) + } + m.Alarm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Alarm |= (AlarmType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AlarmMember) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AlarmMember: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AlarmMember: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType) + } + m.MemberID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemberID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) + } + m.Alarm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Alarm |= (AlarmType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AlarmResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AlarmResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AlarmResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Alarms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Alarms = append(m.Alarms, &AlarmMember{}) + if err := m.Alarms[len(m.Alarms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DbSize", wireType) + } + m.DbSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DbSize |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + m.Leader = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Leader |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType) + } + m.RaftIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftIndex |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType) + } + m.RaftTerm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftTerm |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftAppliedIndex", wireType) + } + m.RaftAppliedIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftAppliedIndex |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Errors", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Errors = append(m.Errors, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DbSizeInUse", wireType) + } + m.DbSizeInUse = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DbSizeInUse |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IsLearner = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthEnableRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthEnableRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthEnableRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthDisableRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthDisableRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthDisableRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthenticateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserAddRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &authpb.UserAddOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserGetRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserGetRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserDeleteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserChangePasswordRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserChangePasswordRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserGrantRoleRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserGrantRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserRevokeRoleRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserRevokeRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleAddRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleGetRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleGetRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserListRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserListRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserListRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleListRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleListRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleListRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleDeleteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Perm == nil { + m.Perm = &authpb.Permission{} + } + if err := m.Perm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthEnableResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthEnableResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthDisableResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthDisableResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthenticateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthenticateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Token = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserAddResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserGetResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserGetResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserDeleteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserChangePasswordResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserChangePasswordResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserGrantRoleResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserGrantRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserRevokeRoleResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserRevokeRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleAddResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleGetResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleGetResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Perm = append(m.Perm, &authpb.Permission{}) + if err := m.Perm[len(m.Perm)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleListResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleListResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserListResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserListResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Users = append(m.Users, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleDeleteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRpc(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthRpc + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRpc(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRpc = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("rpc.proto", fileDescriptorRpc) } + +var fileDescriptorRpc = []byte{ + // 3928 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0x5b, 0x6f, 0x23, 0xc9, + 0x75, 0x56, 0x93, 0xe2, 0xed, 0xf0, 0x22, 0xaa, 0x74, 0x19, 0x0e, 0x67, 0x46, 0xa3, 0xad, 0xd9, + 0xd9, 0xd5, 0xce, 0xec, 0x8a, 0x6b, 0xd9, 0x4e, 0x80, 0x49, 0xe2, 0x58, 0x23, 0x71, 0x67, 0xb4, + 0xd2, 0x88, 0xda, 0x16, 0x67, 0xf6, 0x02, 0x23, 0x42, 0x8b, 0x2c, 0x49, 0x1d, 0x91, 0xdd, 0x74, + 0x77, 0x93, 0x23, 0x6d, 0x2e, 0x0e, 0x0c, 0xc7, 0x40, 0xf2, 0x68, 0x03, 0x41, 0xf2, 0x90, 0xa7, + 0x20, 0x08, 0xfc, 0x90, 0xe7, 0x00, 0xf9, 0x05, 0x79, 0xca, 0x05, 0xf9, 0x03, 0xc1, 0xc6, 0x2f, + 0xc9, 0xaf, 0x30, 0xea, 0xd6, 0x5d, 0x7d, 0xa3, 0xc6, 0xa6, 0x77, 0x5f, 0xa4, 0xae, 0x53, 0xa7, + 0xce, 0x39, 0x75, 0xaa, 0xea, 0x9c, 0xd3, 0x5f, 0x17, 0xa1, 0xe4, 0x8c, 0x7a, 0x9b, 0x23, 0xc7, + 0xf6, 0x6c, 0x54, 0x21, 0x5e, 0xaf, 0xef, 0x12, 0x67, 0x42, 0x9c, 0xd1, 0x69, 0x73, 0xf9, 0xdc, + 0x3e, 0xb7, 0x59, 0x47, 0x8b, 0x3e, 0x71, 0x9e, 0xe6, 0x6d, 0xca, 0xd3, 0x1a, 0x4e, 0x7a, 0x3d, + 0xf6, 0x67, 0x74, 0xda, 0xba, 0x9c, 0x88, 0xae, 0x3b, 0xac, 0xcb, 0x18, 0x7b, 0x17, 0xec, 0xcf, + 0xe8, 0x94, 0xfd, 0x13, 0x9d, 0x77, 0xcf, 0x6d, 0xfb, 0x7c, 0x40, 0x5a, 0xc6, 0xc8, 0x6c, 0x19, + 0x96, 0x65, 0x7b, 0x86, 0x67, 0xda, 0x96, 0xcb, 0x7b, 0xf1, 0x5f, 0x6a, 0x50, 0xd3, 0x89, 0x3b, + 0xb2, 0x2d, 0x97, 0x3c, 0x27, 0x46, 0x9f, 0x38, 0xe8, 0x1e, 0x40, 0x6f, 0x30, 0x76, 0x3d, 0xe2, + 0x9c, 0x98, 0xfd, 0x86, 0xb6, 0xae, 0x6d, 0xcc, 0xeb, 0x25, 0x41, 0xd9, 0xeb, 0xa3, 0x3b, 0x50, + 0x1a, 0x92, 0xe1, 0x29, 0xef, 0xcd, 0xb0, 0xde, 0x22, 0x27, 0xec, 0xf5, 0x51, 0x13, 0x8a, 0x0e, + 0x99, 0x98, 0xae, 0x69, 0x5b, 0x8d, 0xec, 0xba, 0xb6, 0x91, 0xd5, 0xfd, 0x36, 0x1d, 0xe8, 0x18, + 0x67, 0xde, 0x89, 0x47, 0x9c, 0x61, 0x63, 0x9e, 0x0f, 0xa4, 0x84, 0x2e, 0x71, 0x86, 0xf8, 0x27, + 0x39, 0xa8, 0xe8, 0x86, 0x75, 0x4e, 0x74, 0xf2, 0xc3, 0x31, 0x71, 0x3d, 0x54, 0x87, 0xec, 0x25, + 0xb9, 0x66, 0xea, 0x2b, 0x3a, 0x7d, 0xe4, 0xe3, 0xad, 0x73, 0x72, 0x42, 0x2c, 0xae, 0xb8, 0x42, + 0xc7, 0x5b, 0xe7, 0xa4, 0x6d, 0xf5, 0xd1, 0x32, 0xe4, 0x06, 0xe6, 0xd0, 0xf4, 0x84, 0x56, 0xde, + 0x08, 0x99, 0x33, 0x1f, 0x31, 0x67, 0x07, 0xc0, 0xb5, 0x1d, 0xef, 0xc4, 0x76, 0xfa, 0xc4, 0x69, + 0xe4, 0xd6, 0xb5, 0x8d, 0xda, 0xd6, 0xdb, 0x9b, 0xea, 0x42, 0x6c, 0xaa, 0x06, 0x6d, 0x1e, 0xdb, + 0x8e, 0xd7, 0xa1, 0xbc, 0x7a, 0xc9, 0x95, 0x8f, 0xe8, 0x23, 0x28, 0x33, 0x21, 0x9e, 0xe1, 0x9c, + 0x13, 0xaf, 0x91, 0x67, 0x52, 0x1e, 0xde, 0x20, 0xa5, 0xcb, 0x98, 0x75, 0xa6, 0x9e, 0x3f, 0x23, + 0x0c, 0x15, 0x97, 0x38, 0xa6, 0x31, 0x30, 0xbf, 0x34, 0x4e, 0x07, 0xa4, 0x51, 0x58, 0xd7, 0x36, + 0x8a, 0x7a, 0x88, 0x46, 0xe7, 0x7f, 0x49, 0xae, 0xdd, 0x13, 0xdb, 0x1a, 0x5c, 0x37, 0x8a, 0x8c, + 0xa1, 0x48, 0x09, 0x1d, 0x6b, 0x70, 0xcd, 0x16, 0xcd, 0x1e, 0x5b, 0x1e, 0xef, 0x2d, 0xb1, 0xde, + 0x12, 0xa3, 0xb0, 0xee, 0x0d, 0xa8, 0x0f, 0x4d, 0xeb, 0x64, 0x68, 0xf7, 0x4f, 0x7c, 0x87, 0x00, + 0x73, 0x48, 0x6d, 0x68, 0x5a, 0x2f, 0xec, 0xbe, 0x2e, 0xdd, 0x42, 0x39, 0x8d, 0xab, 0x30, 0x67, + 0x59, 0x70, 0x1a, 0x57, 0x2a, 0xe7, 0x26, 0x2c, 0x51, 0x99, 0x3d, 0x87, 0x18, 0x1e, 0x09, 0x98, + 0x2b, 0x8c, 0x79, 0x71, 0x68, 0x5a, 0x3b, 0xac, 0x27, 0xc4, 0x6f, 0x5c, 0xc5, 0xf8, 0xab, 0x82, + 0xdf, 0xb8, 0x0a, 0xf3, 0xe3, 0x4d, 0x28, 0xf9, 0x3e, 0x47, 0x45, 0x98, 0x3f, 0xec, 0x1c, 0xb6, + 0xeb, 0x73, 0x08, 0x20, 0xbf, 0x7d, 0xbc, 0xd3, 0x3e, 0xdc, 0xad, 0x6b, 0xa8, 0x0c, 0x85, 0xdd, + 0x36, 0x6f, 0x64, 0xf0, 0x53, 0x80, 0xc0, 0xbb, 0xa8, 0x00, 0xd9, 0xfd, 0xf6, 0xe7, 0xf5, 0x39, + 0xca, 0xf3, 0xaa, 0xad, 0x1f, 0xef, 0x75, 0x0e, 0xeb, 0x1a, 0x1d, 0xbc, 0xa3, 0xb7, 0xb7, 0xbb, + 0xed, 0x7a, 0x86, 0x72, 0xbc, 0xe8, 0xec, 0xd6, 0xb3, 0xa8, 0x04, 0xb9, 0x57, 0xdb, 0x07, 0x2f, + 0xdb, 0xf5, 0x79, 0xfc, 0x73, 0x0d, 0xaa, 0x62, 0xbd, 0xf8, 0x99, 0x40, 0xdf, 0x81, 0xfc, 0x05, + 0x3b, 0x17, 0x6c, 0x2b, 0x96, 0xb7, 0xee, 0x46, 0x16, 0x37, 0x74, 0x76, 0x74, 0xc1, 0x8b, 0x30, + 0x64, 0x2f, 0x27, 0x6e, 0x23, 0xb3, 0x9e, 0xdd, 0x28, 0x6f, 0xd5, 0x37, 0xf9, 0x81, 0xdd, 0xdc, + 0x27, 0xd7, 0xaf, 0x8c, 0xc1, 0x98, 0xe8, 0xb4, 0x13, 0x21, 0x98, 0x1f, 0xda, 0x0e, 0x61, 0x3b, + 0xb6, 0xa8, 0xb3, 0x67, 0xba, 0x8d, 0xd9, 0xa2, 0x89, 0xdd, 0xca, 0x1b, 0xf8, 0x17, 0x1a, 0xc0, + 0xd1, 0xd8, 0x4b, 0x3f, 0x1a, 0xcb, 0x90, 0x9b, 0x50, 0xc1, 0xe2, 0x58, 0xf0, 0x06, 0x3b, 0x13, + 0xc4, 0x70, 0x89, 0x7f, 0x26, 0x68, 0x03, 0xdd, 0x82, 0xc2, 0xc8, 0x21, 0x93, 0x93, 0xcb, 0x09, + 0x53, 0x52, 0xd4, 0xf3, 0xb4, 0xb9, 0x3f, 0x41, 0x6f, 0x41, 0xc5, 0x3c, 0xb7, 0x6c, 0x87, 0x9c, + 0x70, 0x59, 0x39, 0xd6, 0x5b, 0xe6, 0x34, 0x66, 0xb7, 0xc2, 0xc2, 0x05, 0xe7, 0x55, 0x96, 0x03, + 0x4a, 0xc2, 0x16, 0x94, 0x99, 0xa9, 0x33, 0xb9, 0xef, 0xbd, 0xc0, 0xc6, 0x0c, 0x1b, 0x16, 0x77, + 0xa1, 0xb0, 0x1a, 0xff, 0x00, 0xd0, 0x2e, 0x19, 0x10, 0x8f, 0xcc, 0x12, 0x3d, 0x14, 0x9f, 0x64, + 0x55, 0x9f, 0xe0, 0x9f, 0x69, 0xb0, 0x14, 0x12, 0x3f, 0xd3, 0xb4, 0x1a, 0x50, 0xe8, 0x33, 0x61, + 0xdc, 0x82, 0xac, 0x2e, 0x9b, 0xe8, 0x31, 0x14, 0x85, 0x01, 0x6e, 0x23, 0x9b, 0xb2, 0x69, 0x0a, + 0xdc, 0x26, 0x17, 0xff, 0x22, 0x03, 0x25, 0x31, 0xd1, 0xce, 0x08, 0x6d, 0x43, 0xd5, 0xe1, 0x8d, + 0x13, 0x36, 0x1f, 0x61, 0x51, 0x33, 0x3d, 0x08, 0x3d, 0x9f, 0xd3, 0x2b, 0x62, 0x08, 0x23, 0xa3, + 0xdf, 0x83, 0xb2, 0x14, 0x31, 0x1a, 0x7b, 0xc2, 0xe5, 0x8d, 0xb0, 0x80, 0x60, 0xff, 0x3d, 0x9f, + 0xd3, 0x41, 0xb0, 0x1f, 0x8d, 0x3d, 0xd4, 0x85, 0x65, 0x39, 0x98, 0xcf, 0x46, 0x98, 0x91, 0x65, + 0x52, 0xd6, 0xc3, 0x52, 0xe2, 0x4b, 0xf5, 0x7c, 0x4e, 0x47, 0x62, 0xbc, 0xd2, 0xa9, 0x9a, 0xe4, + 0x5d, 0xf1, 0xe0, 0x1d, 0x33, 0xa9, 0x7b, 0x65, 0xc5, 0x4d, 0xea, 0x5e, 0x59, 0x4f, 0x4b, 0x50, + 0x10, 0x2d, 0xfc, 0x2f, 0x19, 0x00, 0xb9, 0x1a, 0x9d, 0x11, 0xda, 0x85, 0x9a, 0x23, 0x5a, 0x21, + 0x6f, 0xdd, 0x49, 0xf4, 0x96, 0x58, 0xc4, 0x39, 0xbd, 0x2a, 0x07, 0x71, 0xe3, 0xbe, 0x07, 0x15, + 0x5f, 0x4a, 0xe0, 0xb0, 0xdb, 0x09, 0x0e, 0xf3, 0x25, 0x94, 0xe5, 0x00, 0xea, 0xb2, 0x4f, 0x61, + 0xc5, 0x1f, 0x9f, 0xe0, 0xb3, 0xb7, 0xa6, 0xf8, 0xcc, 0x17, 0xb8, 0x24, 0x25, 0xa8, 0x5e, 0x53, + 0x0d, 0x0b, 0xdc, 0x76, 0x3b, 0xc1, 0x6d, 0x71, 0xc3, 0xa8, 0xe3, 0x80, 0xe6, 0x4b, 0xde, 0xc4, + 0xff, 0x97, 0x85, 0xc2, 0x8e, 0x3d, 0x1c, 0x19, 0x0e, 0x5d, 0x8d, 0xbc, 0x43, 0xdc, 0xf1, 0xc0, + 0x63, 0xee, 0xaa, 0x6d, 0x3d, 0x08, 0x4b, 0x14, 0x6c, 0xf2, 0xbf, 0xce, 0x58, 0x75, 0x31, 0x84, + 0x0e, 0x16, 0xe9, 0x31, 0xf3, 0x06, 0x83, 0x45, 0x72, 0x14, 0x43, 0xe4, 0x41, 0xce, 0x06, 0x07, + 0xb9, 0x09, 0x85, 0x09, 0x71, 0x82, 0x94, 0xfe, 0x7c, 0x4e, 0x97, 0x04, 0xf4, 0x1e, 0x2c, 0x44, + 0xd3, 0x4b, 0x4e, 0xf0, 0xd4, 0x7a, 0xe1, 0x6c, 0xf4, 0x00, 0x2a, 0xa1, 0x1c, 0x97, 0x17, 0x7c, + 0xe5, 0xa1, 0x92, 0xe2, 0x56, 0x65, 0x5c, 0xa5, 0xf9, 0xb8, 0xf2, 0x7c, 0x4e, 0x46, 0xd6, 0x55, + 0x19, 0x59, 0x8b, 0x62, 0x94, 0x88, 0xad, 0xa1, 0x20, 0xf3, 0xfd, 0x70, 0x90, 0xc1, 0xdf, 0x87, + 0x6a, 0xc8, 0x41, 0x34, 0xef, 0xb4, 0x3f, 0x79, 0xb9, 0x7d, 0xc0, 0x93, 0xd4, 0x33, 0x96, 0x97, + 0xf4, 0xba, 0x46, 0x73, 0xdd, 0x41, 0xfb, 0xf8, 0xb8, 0x9e, 0x41, 0x55, 0x28, 0x1d, 0x76, 0xba, + 0x27, 0x9c, 0x2b, 0x8b, 0x9f, 0xf9, 0x12, 0x44, 0x92, 0x53, 0x72, 0xdb, 0x9c, 0x92, 0xdb, 0x34, + 0x99, 0xdb, 0x32, 0x41, 0x6e, 0x63, 0x69, 0xee, 0xa0, 0xbd, 0x7d, 0xdc, 0xae, 0xcf, 0x3f, 0xad, + 0x41, 0x85, 0xfb, 0xf7, 0x64, 0x6c, 0xd1, 0x54, 0xfb, 0x0f, 0x1a, 0x40, 0x70, 0x9a, 0x50, 0x0b, + 0x0a, 0x3d, 0xae, 0xa7, 0xa1, 0xb1, 0x60, 0xb4, 0x92, 0xb8, 0x64, 0xba, 0xe4, 0x42, 0xdf, 0x82, + 0x82, 0x3b, 0xee, 0xf5, 0x88, 0x2b, 0x53, 0xde, 0xad, 0x68, 0x3c, 0x14, 0xd1, 0x4a, 0x97, 0x7c, + 0x74, 0xc8, 0x99, 0x61, 0x0e, 0xc6, 0x2c, 0x01, 0x4e, 0x1f, 0x22, 0xf8, 0xf0, 0xdf, 0x69, 0x50, + 0x56, 0x36, 0xef, 0x6f, 0x18, 0x84, 0xef, 0x42, 0x89, 0xd9, 0x40, 0xfa, 0x22, 0x0c, 0x17, 0xf5, + 0x80, 0x80, 0x7e, 0x07, 0x4a, 0xf2, 0x04, 0xc8, 0x48, 0xdc, 0x48, 0x16, 0xdb, 0x19, 0xe9, 0x01, + 0x2b, 0xde, 0x87, 0x45, 0xe6, 0x95, 0x1e, 0x2d, 0xae, 0xa5, 0x1f, 0xd5, 0xf2, 0x53, 0x8b, 0x94, + 0x9f, 0x4d, 0x28, 0x8e, 0x2e, 0xae, 0x5d, 0xb3, 0x67, 0x0c, 0x84, 0x15, 0x7e, 0x1b, 0x7f, 0x0c, + 0x48, 0x15, 0x36, 0xcb, 0x74, 0x71, 0x15, 0xca, 0xcf, 0x0d, 0xf7, 0x42, 0x98, 0x84, 0x1f, 0x43, + 0x95, 0x36, 0xf7, 0x5f, 0xbd, 0x81, 0x8d, 0xec, 0xe5, 0x40, 0x72, 0xcf, 0xe4, 0x73, 0x04, 0xf3, + 0x17, 0x86, 0x7b, 0xc1, 0x26, 0x5a, 0xd5, 0xd9, 0x33, 0x7a, 0x0f, 0xea, 0x3d, 0x3e, 0xc9, 0x93, + 0xc8, 0x2b, 0xc3, 0x82, 0xa0, 0xfb, 0x95, 0xe0, 0x67, 0x50, 0xe1, 0x73, 0xf8, 0x6d, 0x1b, 0x81, + 0x17, 0x61, 0xe1, 0xd8, 0x32, 0x46, 0xee, 0x85, 0x2d, 0xb3, 0x1b, 0x9d, 0x74, 0x3d, 0xa0, 0xcd, + 0xa4, 0xf1, 0x5d, 0x58, 0x70, 0xc8, 0xd0, 0x30, 0x2d, 0xd3, 0x3a, 0x3f, 0x39, 0xbd, 0xf6, 0x88, + 0x2b, 0x5e, 0x98, 0x6a, 0x3e, 0xf9, 0x29, 0xa5, 0x52, 0xd3, 0x4e, 0x07, 0xf6, 0xa9, 0x08, 0x73, + 0xec, 0x19, 0xff, 0x34, 0x03, 0x95, 0x4f, 0x0d, 0xaf, 0x27, 0x97, 0x0e, 0xed, 0x41, 0xcd, 0x0f, + 0x6e, 0x8c, 0x22, 0x6c, 0x89, 0xa4, 0x58, 0x36, 0x46, 0x96, 0xd2, 0x32, 0x3b, 0x56, 0x7b, 0x2a, + 0x81, 0x89, 0x32, 0xac, 0x1e, 0x19, 0xf8, 0xa2, 0x32, 0xe9, 0xa2, 0x18, 0xa3, 0x2a, 0x4a, 0x25, + 0xa0, 0x0e, 0xd4, 0x47, 0x8e, 0x7d, 0xee, 0x10, 0xd7, 0xf5, 0x85, 0xf1, 0x34, 0x86, 0x13, 0x84, + 0x1d, 0x09, 0xd6, 0x40, 0xdc, 0xc2, 0x28, 0x4c, 0x7a, 0xba, 0x10, 0xd4, 0x33, 0x3c, 0x38, 0xfd, + 0x57, 0x06, 0x50, 0x7c, 0x52, 0xbf, 0x6e, 0x89, 0xf7, 0x10, 0x6a, 0xae, 0x67, 0x38, 0xb1, 0xcd, + 0x56, 0x65, 0x54, 0x3f, 0xe2, 0xbf, 0x0b, 0xbe, 0x41, 0x27, 0x96, 0xed, 0x99, 0x67, 0xd7, 0xa2, + 0x4a, 0xae, 0x49, 0xf2, 0x21, 0xa3, 0xa2, 0x36, 0x14, 0xce, 0xcc, 0x81, 0x47, 0x1c, 0xb7, 0x91, + 0x5b, 0xcf, 0x6e, 0xd4, 0xb6, 0x1e, 0xdf, 0xb4, 0x0c, 0x9b, 0x1f, 0x31, 0xfe, 0xee, 0xf5, 0x88, + 0xe8, 0x72, 0xac, 0x5a, 0x79, 0xe6, 0x43, 0xd5, 0xf8, 0x6d, 0x28, 0xbe, 0xa6, 0x22, 0xe8, 0x5b, + 0x76, 0x81, 0x17, 0x8b, 0xac, 0xcd, 0x5f, 0xb2, 0xcf, 0x1c, 0xe3, 0x7c, 0x48, 0x2c, 0x4f, 0xbe, + 0x07, 0xca, 0x36, 0x7e, 0x08, 0x10, 0xa8, 0xa1, 0x21, 0xff, 0xb0, 0x73, 0xf4, 0xb2, 0x5b, 0x9f, + 0x43, 0x15, 0x28, 0x1e, 0x76, 0x76, 0xdb, 0x07, 0x6d, 0x9a, 0x1f, 0x70, 0x4b, 0xba, 0x34, 0xb4, + 0x96, 0xaa, 0x4e, 0x2d, 0xa4, 0x13, 0xaf, 0xc2, 0x72, 0xd2, 0x02, 0xd2, 0x5a, 0xb4, 0x2a, 0x76, + 0xe9, 0x4c, 0x47, 0x45, 0x55, 0x9d, 0x09, 0x4f, 0xb7, 0x01, 0x05, 0xbe, 0x7b, 0xfb, 0xa2, 0x38, + 0x97, 0x4d, 0xea, 0x08, 0xbe, 0x19, 0x49, 0x5f, 0xac, 0x92, 0xdf, 0x4e, 0x0c, 0x2f, 0xb9, 0xc4, + 0xf0, 0x82, 0x1e, 0x40, 0xd5, 0x3f, 0x0d, 0x86, 0x2b, 0x6a, 0x81, 0x92, 0x5e, 0x91, 0x1b, 0x9d, + 0xd2, 0x42, 0x4e, 0x2f, 0x84, 0x9d, 0x8e, 0x1e, 0x42, 0x9e, 0x4c, 0x88, 0xe5, 0xb9, 0x8d, 0x32, + 0xcb, 0x18, 0x55, 0x59, 0xbb, 0xb7, 0x29, 0x55, 0x17, 0x9d, 0xf8, 0xbb, 0xb0, 0xc8, 0xde, 0x91, + 0x9e, 0x39, 0x86, 0xa5, 0xbe, 0xcc, 0x75, 0xbb, 0x07, 0xc2, 0xdd, 0xf4, 0x11, 0xd5, 0x20, 0xb3, + 0xb7, 0x2b, 0x9c, 0x90, 0xd9, 0xdb, 0xc5, 0x3f, 0xd6, 0x00, 0xa9, 0xe3, 0x66, 0xf2, 0x73, 0x44, + 0xb8, 0x54, 0x9f, 0x0d, 0xd4, 0x2f, 0x43, 0x8e, 0x38, 0x8e, 0xed, 0x30, 0x8f, 0x96, 0x74, 0xde, + 0xc0, 0x6f, 0x0b, 0x1b, 0x74, 0x32, 0xb1, 0x2f, 0xfd, 0x33, 0xc8, 0xa5, 0x69, 0xbe, 0xa9, 0xfb, + 0xb0, 0x14, 0xe2, 0x9a, 0x29, 0x73, 0x7d, 0x04, 0x0b, 0x4c, 0xd8, 0xce, 0x05, 0xe9, 0x5d, 0x8e, + 0x6c, 0xd3, 0x8a, 0xe9, 0xa3, 0x2b, 0x17, 0x04, 0x58, 0x3a, 0x0f, 0x3e, 0xb1, 0x8a, 0x4f, 0xec, + 0x76, 0x0f, 0xf0, 0xe7, 0xb0, 0x1a, 0x91, 0x23, 0xcd, 0xff, 0x43, 0x28, 0xf7, 0x7c, 0xa2, 0x2b, + 0x6a, 0x9d, 0x7b, 0x61, 0xe3, 0xa2, 0x43, 0xd5, 0x11, 0xb8, 0x03, 0xb7, 0x62, 0xa2, 0x67, 0x9a, + 0xf3, 0xbb, 0xb0, 0xc2, 0x04, 0xee, 0x13, 0x32, 0xda, 0x1e, 0x98, 0x93, 0x54, 0x4f, 0x8f, 0xc4, + 0xa4, 0x14, 0xc6, 0xaf, 0x77, 0x5f, 0xe0, 0xdf, 0x17, 0x1a, 0xbb, 0xe6, 0x90, 0x74, 0xed, 0x83, + 0x74, 0xdb, 0x68, 0x36, 0xbb, 0x24, 0xd7, 0xae, 0x28, 0x6b, 0xd8, 0x33, 0xfe, 0x47, 0x4d, 0xb8, + 0x4a, 0x1d, 0xfe, 0x35, 0xef, 0xe4, 0x35, 0x80, 0x73, 0x7a, 0x64, 0x48, 0x9f, 0x76, 0x70, 0x44, + 0x45, 0xa1, 0xf8, 0x76, 0xd2, 0xf8, 0x5d, 0x11, 0x76, 0x2e, 0x8b, 0x7d, 0xce, 0xfe, 0xf8, 0x51, + 0xee, 0x1e, 0x94, 0x19, 0xe1, 0xd8, 0x33, 0xbc, 0xb1, 0x1b, 0x5b, 0x8c, 0x3f, 0x17, 0xdb, 0x5e, + 0x0e, 0x9a, 0x69, 0x5e, 0xdf, 0x82, 0x3c, 0x7b, 0x99, 0x90, 0xa5, 0xf4, 0xed, 0x84, 0xfd, 0xc8, + 0xed, 0xd0, 0x05, 0x23, 0xfe, 0xa9, 0x06, 0xf9, 0x17, 0x0c, 0x82, 0x55, 0x4c, 0x9b, 0x97, 0x6b, + 0x61, 0x19, 0x43, 0x0e, 0x0c, 0x95, 0x74, 0xf6, 0xcc, 0x4a, 0x4f, 0x42, 0x9c, 0x97, 0xfa, 0x01, + 0x2f, 0x71, 0x4b, 0xba, 0xdf, 0xa6, 0x3e, 0xeb, 0x0d, 0x4c, 0x62, 0x79, 0xac, 0x77, 0x9e, 0xf5, + 0x2a, 0x14, 0x5a, 0x3d, 0x9b, 0xee, 0x01, 0x31, 0x1c, 0x4b, 0x80, 0xa6, 0x45, 0x3d, 0x20, 0xe0, + 0x03, 0xa8, 0x73, 0x3b, 0xb6, 0xfb, 0x7d, 0xa5, 0xc0, 0xf4, 0xb5, 0x69, 0x11, 0x6d, 0x21, 0x69, + 0x99, 0xa8, 0xb4, 0x7f, 0xd2, 0x60, 0x51, 0x11, 0x37, 0x93, 0x57, 0xdf, 0x87, 0x3c, 0x07, 0xa9, + 0x45, 0xa5, 0xb3, 0x1c, 0x1e, 0xc5, 0xd5, 0xe8, 0x82, 0x07, 0x6d, 0x42, 0x81, 0x3f, 0xc9, 0x77, + 0x80, 0x64, 0x76, 0xc9, 0x84, 0x1f, 0xc2, 0x92, 0x20, 0x91, 0xa1, 0x9d, 0x74, 0x30, 0xd8, 0x62, + 0xe0, 0x3f, 0x85, 0xe5, 0x30, 0xdb, 0x4c, 0x53, 0x52, 0x8c, 0xcc, 0xbc, 0x89, 0x91, 0xdb, 0xd2, + 0xc8, 0x97, 0xa3, 0xbe, 0x52, 0x47, 0x45, 0x77, 0x8c, 0xba, 0x5e, 0x99, 0xf0, 0x7a, 0x05, 0x13, + 0x90, 0x22, 0xbe, 0xd1, 0x09, 0x2c, 0xc9, 0xed, 0x70, 0x60, 0xba, 0x7e, 0xb9, 0xfe, 0x25, 0x20, + 0x95, 0xf8, 0x8d, 0x1a, 0xf4, 0x8e, 0x74, 0xc7, 0x91, 0x63, 0x0f, 0xed, 0x54, 0x97, 0xe2, 0x3f, + 0x83, 0x95, 0x08, 0xdf, 0x37, 0xed, 0xb7, 0x5d, 0x22, 0x8b, 0x15, 0xe9, 0xb7, 0x8f, 0x01, 0xa9, + 0xc4, 0x99, 0xb2, 0x56, 0x0b, 0x16, 0x5f, 0xd8, 0x13, 0x1a, 0xfe, 0x28, 0x35, 0x38, 0xf7, 0x1c, + 0x63, 0xf0, 0x5d, 0xe1, 0xb7, 0xa9, 0x72, 0x75, 0xc0, 0x4c, 0xca, 0xff, 0x43, 0x83, 0xca, 0xf6, + 0xc0, 0x70, 0x86, 0x52, 0xf1, 0xf7, 0x20, 0xcf, 0xdf, 0x9c, 0x05, 0x58, 0xf5, 0x4e, 0x58, 0x8c, + 0xca, 0xcb, 0x1b, 0xdb, 0xfc, 0x3d, 0x5b, 0x8c, 0xa2, 0x86, 0x8b, 0xef, 0x59, 0xbb, 0x91, 0xef, + 0x5b, 0xbb, 0xe8, 0x03, 0xc8, 0x19, 0x74, 0x08, 0x4b, 0x33, 0xb5, 0x28, 0x66, 0xc1, 0xa4, 0xb1, + 0xfa, 0x9e, 0x73, 0xe1, 0xef, 0x40, 0x59, 0xd1, 0x80, 0x0a, 0x90, 0x7d, 0xd6, 0x16, 0xc5, 0xf8, + 0xf6, 0x4e, 0x77, 0xef, 0x15, 0x07, 0x6b, 0x6a, 0x00, 0xbb, 0x6d, 0xbf, 0x9d, 0xc1, 0x9f, 0x89, + 0x51, 0x22, 0xa4, 0xab, 0xf6, 0x68, 0x69, 0xf6, 0x64, 0xde, 0xc8, 0x9e, 0x2b, 0xa8, 0x8a, 0xe9, + 0xcf, 0x9a, 0xa2, 0x98, 0xbc, 0x94, 0x14, 0xa5, 0x18, 0xaf, 0x0b, 0x46, 0xbc, 0x00, 0x55, 0x91, + 0xb4, 0xc4, 0xfe, 0xfb, 0xf7, 0x0c, 0xd4, 0x24, 0x65, 0x56, 0x50, 0x5d, 0xe2, 0x81, 0x3c, 0xc9, + 0xf9, 0x68, 0xe0, 0x2a, 0xe4, 0xfb, 0xa7, 0xc7, 0xe6, 0x97, 0xf2, 0x03, 0x88, 0x68, 0x51, 0xfa, + 0x80, 0xeb, 0xe1, 0x5f, 0x21, 0x45, 0x8b, 0x66, 0x23, 0xc7, 0x38, 0xf3, 0xf6, 0xac, 0x3e, 0xb9, + 0x62, 0xb9, 0x6d, 0x5e, 0x0f, 0x08, 0x0c, 0x28, 0x11, 0x5f, 0x2b, 0xd9, 0x0b, 0x82, 0xf2, 0xf5, + 0x12, 0x3d, 0x82, 0x3a, 0x7d, 0xde, 0x1e, 0x8d, 0x06, 0x26, 0xe9, 0x73, 0x01, 0x05, 0xc6, 0x13, + 0xa3, 0x53, 0xed, 0xac, 0xa4, 0x76, 0x1b, 0x45, 0x16, 0x5d, 0x45, 0x0b, 0xad, 0x43, 0x99, 0xdb, + 0xb7, 0x67, 0xbd, 0x74, 0x09, 0xfb, 0x84, 0x97, 0xd5, 0x55, 0x52, 0x38, 0x5b, 0x42, 0x34, 0x5b, + 0x2e, 0xc1, 0xe2, 0xf6, 0xd8, 0xbb, 0x68, 0x5b, 0xc6, 0xe9, 0x40, 0x46, 0x22, 0x5a, 0xce, 0x50, + 0xe2, 0xae, 0xe9, 0xaa, 0xd4, 0x36, 0x2c, 0x51, 0x2a, 0xb1, 0x3c, 0xb3, 0xa7, 0x64, 0x02, 0x59, + 0x2b, 0x68, 0x91, 0x5a, 0xc1, 0x70, 0xdd, 0xd7, 0xb6, 0xd3, 0x17, 0xee, 0xf5, 0xdb, 0x78, 0xc2, + 0x85, 0xbf, 0x74, 0x43, 0xf9, 0xfe, 0xd7, 0x94, 0x82, 0x3e, 0x84, 0x82, 0x3d, 0x62, 0x9f, 0xa4, + 0x05, 0x6e, 0xb0, 0xba, 0xc9, 0x3f, 0x62, 0x6f, 0x0a, 0xc1, 0x1d, 0xde, 0xab, 0x4b, 0x36, 0xbc, + 0x11, 0xe8, 0x7d, 0x46, 0xbc, 0x29, 0x7a, 0xf1, 0x63, 0x58, 0x91, 0x9c, 0x02, 0x26, 0x9f, 0xc2, + 0xdc, 0x81, 0x7b, 0x92, 0x79, 0xe7, 0xc2, 0xb0, 0xce, 0xc9, 0x91, 0x30, 0xf1, 0x37, 0xf5, 0xcf, + 0x53, 0x68, 0xf8, 0x76, 0xb2, 0x57, 0x37, 0x7b, 0xa0, 0x1a, 0x30, 0x76, 0xc5, 0x4e, 0x2f, 0xe9, + 0xec, 0x99, 0xd2, 0x1c, 0x7b, 0xe0, 0xd7, 0x6a, 0xf4, 0x19, 0xef, 0xc0, 0x6d, 0x29, 0x43, 0xbc, + 0x54, 0x85, 0x85, 0xc4, 0x0c, 0x4a, 0x12, 0x22, 0x1c, 0x46, 0x87, 0x4e, 0x5f, 0x28, 0x95, 0x33, + 0xec, 0x5a, 0x26, 0x53, 0x53, 0x64, 0xae, 0xf0, 0x3d, 0x44, 0x0d, 0x53, 0xd3, 0xb1, 0x20, 0x53, + 0x01, 0x2a, 0x59, 0x2c, 0x04, 0x25, 0xc7, 0x16, 0x22, 0x26, 0xfa, 0x07, 0xb0, 0xe6, 0x1b, 0x41, + 0xfd, 0x76, 0x44, 0x9c, 0xa1, 0xe9, 0xba, 0x0a, 0xb0, 0x9a, 0x34, 0xf1, 0x77, 0x60, 0x7e, 0x44, + 0x44, 0x24, 0x2c, 0x6f, 0x21, 0xb9, 0x89, 0x94, 0xc1, 0xac, 0x1f, 0xf7, 0xe1, 0xbe, 0x94, 0xce, + 0x3d, 0x9a, 0x28, 0x3e, 0x6a, 0x94, 0x84, 0x9b, 0x32, 0x29, 0x70, 0x53, 0x36, 0x02, 0xf6, 0x7f, + 0xcc, 0x1d, 0x29, 0x4f, 0xe3, 0x4c, 0x19, 0x6e, 0x9f, 0xfb, 0xd4, 0x3f, 0xc4, 0x33, 0x09, 0x3b, + 0x85, 0xe5, 0xf0, 0xd9, 0x9f, 0x29, 0xf8, 0x2e, 0x43, 0xce, 0xb3, 0x2f, 0x89, 0x0c, 0xbd, 0xbc, + 0x21, 0x0d, 0xf6, 0x03, 0xc3, 0x4c, 0x06, 0x1b, 0x81, 0x30, 0xb6, 0x25, 0x67, 0xb5, 0x97, 0xae, + 0xa6, 0xac, 0x6c, 0x79, 0x03, 0x1f, 0xc2, 0x6a, 0x34, 0x4c, 0xcc, 0x64, 0xf2, 0x2b, 0xbe, 0x81, + 0x93, 0x22, 0xc9, 0x4c, 0x72, 0x3f, 0x09, 0x82, 0x81, 0x12, 0x50, 0x66, 0x12, 0xa9, 0x43, 0x33, + 0x29, 0xbe, 0xfc, 0x36, 0xf6, 0xab, 0x1f, 0x6e, 0x66, 0x12, 0xe6, 0x06, 0xc2, 0x66, 0x5f, 0xfe, + 0x20, 0x46, 0x64, 0xa7, 0xc6, 0x08, 0x71, 0x48, 0x82, 0x28, 0xf6, 0x35, 0x6c, 0x3a, 0xa1, 0x23, + 0x08, 0xa0, 0xb3, 0xea, 0xa0, 0x39, 0xc4, 0xd7, 0xc1, 0x1a, 0x72, 0x63, 0xab, 0x61, 0x77, 0xa6, + 0xc5, 0xf8, 0x34, 0x88, 0x9d, 0xb1, 0xc8, 0x3c, 0x93, 0xe0, 0xcf, 0x60, 0x3d, 0x3d, 0x28, 0xcf, + 0x22, 0xf9, 0x51, 0x0b, 0x4a, 0x7e, 0x19, 0xac, 0xdc, 0x22, 0x2a, 0x43, 0xe1, 0xb0, 0x73, 0x7c, + 0xb4, 0xbd, 0xd3, 0xe6, 0xd7, 0x88, 0x76, 0x3a, 0xba, 0xfe, 0xf2, 0xa8, 0x5b, 0xcf, 0x6c, 0xfd, + 0x32, 0x0b, 0x99, 0xfd, 0x57, 0xe8, 0x73, 0xc8, 0xf1, 0x6f, 0xea, 0x53, 0x2e, 0x52, 0x34, 0xa7, + 0x5d, 0x1b, 0xc0, 0xb7, 0x7e, 0xfc, 0xdf, 0xbf, 0xfc, 0x79, 0x66, 0x11, 0x57, 0x5a, 0x93, 0x6f, + 0xb7, 0x2e, 0x27, 0x2d, 0x96, 0x1b, 0x9e, 0x68, 0x8f, 0xd0, 0x27, 0x90, 0x3d, 0x1a, 0x7b, 0x28, + 0xf5, 0x82, 0x45, 0x33, 0xfd, 0x26, 0x01, 0x5e, 0x61, 0x42, 0x17, 0x30, 0x08, 0xa1, 0xa3, 0xb1, + 0x47, 0x45, 0xfe, 0x10, 0xca, 0xea, 0x3d, 0x80, 0x1b, 0x6f, 0x5d, 0x34, 0x6f, 0xbe, 0x63, 0x80, + 0xef, 0x31, 0x55, 0xb7, 0x30, 0x12, 0xaa, 0xf8, 0x4d, 0x05, 0x75, 0x16, 0xdd, 0x2b, 0x0b, 0xa5, + 0xde, 0xc9, 0x68, 0xa6, 0x5f, 0x3b, 0x88, 0xcd, 0xc2, 0xbb, 0xb2, 0xa8, 0xc8, 0x3f, 0x16, 0x37, + 0x0e, 0x7a, 0x1e, 0xba, 0x9f, 0xf0, 0xc5, 0x59, 0xfd, 0xb6, 0xda, 0x5c, 0x4f, 0x67, 0x10, 0x4a, + 0xee, 0x32, 0x25, 0xab, 0x78, 0x51, 0x28, 0xe9, 0xf9, 0x2c, 0x4f, 0xb4, 0x47, 0x5b, 0x3d, 0xc8, + 0xb1, 0xef, 0x16, 0xe8, 0x0b, 0xf9, 0xd0, 0x4c, 0xf8, 0x80, 0x93, 0xb2, 0xd0, 0xa1, 0x2f, 0x1e, + 0x78, 0x99, 0x29, 0xaa, 0xe1, 0x12, 0x55, 0xc4, 0xbe, 0x5a, 0x3c, 0xd1, 0x1e, 0x6d, 0x68, 0x1f, + 0x6a, 0x5b, 0xff, 0x9c, 0x83, 0x1c, 0x03, 0xec, 0xd0, 0x25, 0x40, 0x80, 0xe1, 0x47, 0x67, 0x17, + 0xfb, 0x2a, 0x10, 0x9d, 0x5d, 0x1c, 0xfe, 0xc7, 0x4d, 0xa6, 0x74, 0x19, 0x2f, 0x50, 0xa5, 0x0c, + 0x07, 0x6c, 0x31, 0x68, 0x93, 0xfa, 0xf1, 0xaf, 0x34, 0x81, 0x57, 0xf2, 0xb3, 0x84, 0x92, 0xa4, + 0x85, 0x80, 0xfc, 0xe8, 0x76, 0x48, 0x00, 0xf1, 0xf1, 0x77, 0x99, 0xc2, 0x16, 0xae, 0x07, 0x0a, + 0x1d, 0xc6, 0xf1, 0x44, 0x7b, 0xf4, 0x45, 0x03, 0x2f, 0x09, 0x2f, 0x47, 0x7a, 0xd0, 0x8f, 0xa0, + 0x16, 0x06, 0xaa, 0xd1, 0x83, 0x04, 0x5d, 0x51, 0xbc, 0xbb, 0xf9, 0xf6, 0x74, 0x26, 0x61, 0xd3, + 0x1a, 0xb3, 0x49, 0x28, 0xe7, 0x9a, 0x2f, 0x09, 0x19, 0x19, 0x94, 0x49, 0xac, 0x01, 0xfa, 0x7b, + 0x4d, 0x7c, 0x47, 0x08, 0x90, 0x67, 0x94, 0x24, 0x3d, 0x86, 0x6b, 0x37, 0x1f, 0xde, 0xc0, 0x25, + 0x8c, 0xf8, 0x03, 0x66, 0xc4, 0xef, 0xe2, 0xe5, 0xc0, 0x08, 0xcf, 0x1c, 0x12, 0xcf, 0x16, 0x56, + 0x7c, 0x71, 0x17, 0xdf, 0x0a, 0x39, 0x27, 0xd4, 0x1b, 0x2c, 0x16, 0x47, 0x8f, 0x13, 0x17, 0x2b, + 0x84, 0x46, 0x27, 0x2e, 0x56, 0x18, 0x7a, 0x4e, 0x5a, 0x2c, 0x8e, 0x15, 0x27, 0x2d, 0x96, 0xdf, + 0xb3, 0xf5, 0xff, 0xf3, 0x50, 0xd8, 0xe1, 0x37, 0x7d, 0x91, 0x0d, 0x25, 0x1f, 0x7c, 0x45, 0x6b, + 0x49, 0x08, 0x53, 0xf0, 0x2e, 0xd1, 0xbc, 0x9f, 0xda, 0x2f, 0x0c, 0x7a, 0x8b, 0x19, 0x74, 0x07, + 0xaf, 0x52, 0xcd, 0xe2, 0x32, 0x71, 0x8b, 0xc3, 0x18, 0x2d, 0xa3, 0xdf, 0xa7, 0x8e, 0xf8, 0x13, + 0xa8, 0xa8, 0xe8, 0x28, 0x7a, 0x2b, 0x11, 0xd5, 0x52, 0x01, 0xd6, 0x26, 0x9e, 0xc6, 0x22, 0x34, + 0xbf, 0xcd, 0x34, 0xaf, 0xe1, 0xdb, 0x09, 0x9a, 0x1d, 0xc6, 0x1a, 0x52, 0xce, 0x91, 0xcd, 0x64, + 0xe5, 0x21, 0xe0, 0x34, 0x59, 0x79, 0x18, 0x18, 0x9d, 0xaa, 0x7c, 0xcc, 0x58, 0xa9, 0x72, 0x17, + 0x20, 0xc0, 0x30, 0x51, 0xa2, 0x2f, 0x95, 0x97, 0xa9, 0x68, 0x70, 0x88, 0xc3, 0x9f, 0x18, 0x33, + 0xb5, 0x62, 0xdf, 0x45, 0xd4, 0x0e, 0x4c, 0xd7, 0xe3, 0x07, 0xb3, 0x1a, 0x02, 0x25, 0x51, 0xe2, + 0x7c, 0xc2, 0xc8, 0x66, 0xf3, 0xc1, 0x54, 0x1e, 0xa1, 0xfd, 0x21, 0xd3, 0x7e, 0x1f, 0x37, 0x13, + 0xb4, 0x8f, 0x38, 0x2f, 0xdd, 0x6c, 0x7f, 0x9d, 0x87, 0xf2, 0x0b, 0xc3, 0xb4, 0x3c, 0x62, 0x19, + 0x56, 0x8f, 0xa0, 0x53, 0xc8, 0xb1, 0x4c, 0x1d, 0x0d, 0xc4, 0x2a, 0x60, 0x17, 0x0d, 0xc4, 0x21, + 0x34, 0x0b, 0xaf, 0x33, 0xc5, 0x4d, 0xbc, 0x42, 0x15, 0x0f, 0x03, 0xd1, 0x2d, 0x06, 0x42, 0xd1, + 0x49, 0x9f, 0x41, 0x5e, 0x7c, 0xc3, 0x89, 0x08, 0x0a, 0x81, 0x53, 0xcd, 0xbb, 0xc9, 0x9d, 0x49, + 0x7b, 0x59, 0x55, 0xe3, 0x32, 0x3e, 0xaa, 0x67, 0x02, 0x10, 0xa0, 0xab, 0xd1, 0x15, 0x8d, 0x81, + 0xb1, 0xcd, 0xf5, 0x74, 0x86, 0x24, 0x9f, 0xaa, 0x3a, 0xfb, 0x3e, 0x2f, 0xd5, 0xfb, 0x47, 0x30, + 0xff, 0xdc, 0x70, 0x2f, 0x50, 0x24, 0xf7, 0x2a, 0x37, 0x80, 0x9a, 0xcd, 0xa4, 0x2e, 0xa1, 0xe5, + 0x3e, 0xd3, 0x72, 0x9b, 0x87, 0x32, 0x55, 0xcb, 0x85, 0xe1, 0xd2, 0xa4, 0x86, 0xfa, 0x90, 0xe7, + 0x17, 0x82, 0xa2, 0xfe, 0x0b, 0x5d, 0x2a, 0x8a, 0xfa, 0x2f, 0x7c, 0x87, 0xe8, 0x66, 0x2d, 0x23, + 0x28, 0xca, 0x1b, 0x38, 0x28, 0xf2, 0x39, 0x36, 0x72, 0x5b, 0xa7, 0xb9, 0x96, 0xd6, 0x2d, 0x74, + 0x3d, 0x60, 0xba, 0xee, 0xe1, 0x46, 0x6c, 0xad, 0x04, 0xe7, 0x13, 0xed, 0xd1, 0x87, 0x1a, 0xfa, + 0x11, 0x40, 0x00, 0x48, 0xc7, 0x4e, 0x60, 0x14, 0xdb, 0x8e, 0x9d, 0xc0, 0x18, 0x96, 0x8d, 0x37, + 0x99, 0xde, 0x0d, 0xfc, 0x20, 0xaa, 0xd7, 0x73, 0x0c, 0xcb, 0x3d, 0x23, 0xce, 0x07, 0x1c, 0x74, + 0x74, 0x2f, 0xcc, 0x11, 0x3d, 0x0c, 0xff, 0xba, 0x00, 0xf3, 0xb4, 0x02, 0xa6, 0x85, 0x42, 0x00, + 0x1c, 0x44, 0x2d, 0x89, 0x01, 0x7c, 0x51, 0x4b, 0xe2, 0x98, 0x43, 0xb8, 0x50, 0x60, 0xbf, 0x11, + 0x21, 0x8c, 0x81, 0x3a, 0xda, 0x86, 0xb2, 0x82, 0x2c, 0xa0, 0x04, 0x61, 0x61, 0xe4, 0x30, 0x9a, + 0x7a, 0x12, 0x60, 0x09, 0x7c, 0x87, 0xe9, 0x5b, 0xe1, 0xa9, 0x87, 0xe9, 0xeb, 0x73, 0x0e, 0xaa, + 0xf0, 0x35, 0x54, 0x54, 0xf4, 0x01, 0x25, 0xc8, 0x8b, 0xa0, 0x92, 0xd1, 0x30, 0x9b, 0x04, 0x5e, + 0x84, 0x0f, 0xbe, 0xff, 0x3b, 0x18, 0xc9, 0x46, 0x15, 0x0f, 0xa0, 0x20, 0xe0, 0x88, 0xa4, 0x59, + 0x86, 0x21, 0xcc, 0xa4, 0x59, 0x46, 0xb0, 0x8c, 0x70, 0x71, 0xc9, 0x34, 0xd2, 0x37, 0x2e, 0x99, + 0xca, 0x84, 0xb6, 0x67, 0xc4, 0x4b, 0xd3, 0x16, 0xa0, 0x6b, 0x69, 0xda, 0x94, 0xb7, 0xdd, 0x34, + 0x6d, 0xe7, 0xc4, 0x13, 0xc7, 0x45, 0xbe, 0x45, 0xa2, 0x14, 0x61, 0x6a, 0xfa, 0xc0, 0xd3, 0x58, + 0x92, 0x6a, 0xff, 0x40, 0xa1, 0xcc, 0x1d, 0x57, 0x00, 0x01, 0x58, 0x12, 0x2d, 0xe8, 0x12, 0x11, + 0xd7, 0x68, 0x41, 0x97, 0x8c, 0xb7, 0x84, 0x43, 0x43, 0xa0, 0x97, 0xbf, 0x7a, 0x50, 0xcd, 0x3f, + 0xd3, 0x00, 0xc5, 0x71, 0x15, 0xf4, 0x38, 0x59, 0x7a, 0x22, 0x8e, 0xdb, 0x7c, 0xff, 0xcd, 0x98, + 0x93, 0xa2, 0x7d, 0x60, 0x52, 0x8f, 0x71, 0x8f, 0x5e, 0x53, 0xa3, 0xfe, 0x42, 0x83, 0x6a, 0x08, + 0x94, 0x41, 0xef, 0xa4, 0xac, 0x69, 0x04, 0x06, 0x6e, 0xbe, 0x7b, 0x23, 0x5f, 0x52, 0xa5, 0xab, + 0xec, 0x00, 0x59, 0xf2, 0xff, 0x44, 0x83, 0x5a, 0x18, 0xc4, 0x41, 0x29, 0xb2, 0x63, 0x30, 0x72, + 0x73, 0xe3, 0x66, 0xc6, 0xe9, 0xcb, 0x13, 0x54, 0xfb, 0x03, 0x28, 0x08, 0xd8, 0x27, 0x69, 0xe3, + 0x87, 0x01, 0xe8, 0xa4, 0x8d, 0x1f, 0xc1, 0x8c, 0x12, 0x36, 0xbe, 0x63, 0x0f, 0x88, 0x72, 0xcc, + 0x04, 0x2e, 0x94, 0xa6, 0x6d, 0xfa, 0x31, 0x8b, 0x80, 0x4a, 0x69, 0xda, 0x82, 0x63, 0x26, 0x01, + 0x21, 0x94, 0x22, 0xec, 0x86, 0x63, 0x16, 0xc5, 0x93, 0x12, 0x8e, 0x19, 0x53, 0xa8, 0x1c, 0xb3, + 0x00, 0xba, 0x49, 0x3a, 0x66, 0x31, 0x3c, 0x3d, 0xe9, 0x98, 0xc5, 0xd1, 0x9f, 0x84, 0x75, 0x64, + 0x7a, 0x43, 0xc7, 0x6c, 0x29, 0x01, 0xe5, 0x41, 0xef, 0xa7, 0x38, 0x31, 0x11, 0xa6, 0x6f, 0x7e, + 0xf0, 0x86, 0xdc, 0xa9, 0x7b, 0x9c, 0xbb, 0x5f, 0xee, 0xf1, 0xbf, 0xd1, 0x60, 0x39, 0x09, 0x21, + 0x42, 0x29, 0x7a, 0x52, 0xe0, 0xfd, 0xe6, 0xe6, 0x9b, 0xb2, 0x4f, 0xf7, 0x96, 0xbf, 0xeb, 0x9f, + 0xd6, 0xff, 0xed, 0xab, 0x35, 0xed, 0x3f, 0xbf, 0x5a, 0xd3, 0xfe, 0xe7, 0xab, 0x35, 0xed, 0x6f, + 0xff, 0x77, 0x6d, 0xee, 0x34, 0xcf, 0x7e, 0x5d, 0xf9, 0xed, 0x5f, 0x05, 0x00, 0x00, 0xff, 0xff, + 0x52, 0x4e, 0xd7, 0x33, 0xe4, 0x39, 0x00, 0x00, +} diff --git a/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.proto b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.proto new file mode 100644 index 000000000..423eabada --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.proto @@ -0,0 +1,1146 @@ +syntax = "proto3"; +package etcdserverpb; + +import "gogoproto/gogo.proto"; +import "etcd/mvcc/mvccpb/kv.proto"; +import "etcd/auth/authpb/auth.proto"; + +// for grpc-gateway +import "google/api/annotations.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +service KV { + // Range gets the keys in the range from the key-value store. + rpc Range(RangeRequest) returns (RangeResponse) { + option (google.api.http) = { + post: "/v3/kv/range" + body: "*" + }; + } + + // Put puts the given key into the key-value store. + // A put request increments the revision of the key-value store + // and generates one event in the event history. + rpc Put(PutRequest) returns (PutResponse) { + option (google.api.http) = { + post: "/v3/kv/put" + body: "*" + }; + } + + // DeleteRange deletes the given range from the key-value store. + // A delete request increments the revision of the key-value store + // and generates a delete event in the event history for every deleted key. + rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) { + option (google.api.http) = { + post: "/v3/kv/deleterange" + body: "*" + }; + } + + // Txn processes multiple requests in a single transaction. + // A txn request increments the revision of the key-value store + // and generates events with the same revision for every completed request. + // It is not allowed to modify the same key several times within one txn. + rpc Txn(TxnRequest) returns (TxnResponse) { + option (google.api.http) = { + post: "/v3/kv/txn" + body: "*" + }; + } + + // Compact compacts the event history in the etcd key-value store. The key-value + // store should be periodically compacted or the event history will continue to grow + // indefinitely. + rpc Compact(CompactionRequest) returns (CompactionResponse) { + option (google.api.http) = { + post: "/v3/kv/compaction" + body: "*" + }; + } +} + +service Watch { + // Watch watches for events happening or that have happened. Both input and output + // are streams; the input stream is for creating and canceling watchers and the output + // stream sends events. One watch RPC can watch on multiple key ranges, streaming events + // for several watches at once. The entire event history can be watched starting from the + // last compaction revision. + rpc Watch(stream WatchRequest) returns (stream WatchResponse) { + option (google.api.http) = { + post: "/v3/watch" + body: "*" + }; + } +} + +service Lease { + // LeaseGrant creates a lease which expires if the server does not receive a keepAlive + // within a given time to live period. All keys attached to the lease will be expired and + // deleted if the lease expires. Each expired key generates a delete event in the event history. + rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) { + option (google.api.http) = { + post: "/v3/lease/grant" + body: "*" + }; + } + + // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. + rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) { + option (google.api.http) = { + post: "/v3/lease/revoke" + body: "*" + additional_bindings { + post: "/v3/kv/lease/revoke" + body: "*" + } + }; + } + + // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client + // to the server and streaming keep alive responses from the server to the client. + rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) { + option (google.api.http) = { + post: "/v3/lease/keepalive" + body: "*" + }; + } + + // LeaseTimeToLive retrieves lease information. + rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) { + option (google.api.http) = { + post: "/v3/lease/timetolive" + body: "*" + additional_bindings { + post: "/v3/kv/lease/timetolive" + body: "*" + } + }; + } + + // LeaseLeases lists all existing leases. + rpc LeaseLeases(LeaseLeasesRequest) returns (LeaseLeasesResponse) { + option (google.api.http) = { + post: "/v3/lease/leases" + body: "*" + additional_bindings { + post: "/v3/kv/lease/leases" + body: "*" + } + }; + } +} + +service Cluster { + // MemberAdd adds a member into the cluster. + rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) { + option (google.api.http) = { + post: "/v3/cluster/member/add" + body: "*" + }; + } + + // MemberRemove removes an existing member from the cluster. + rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) { + option (google.api.http) = { + post: "/v3/cluster/member/remove" + body: "*" + }; + } + + // MemberUpdate updates the member configuration. + rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) { + option (google.api.http) = { + post: "/v3/cluster/member/update" + body: "*" + }; + } + + // MemberList lists all the members in the cluster. + rpc MemberList(MemberListRequest) returns (MemberListResponse) { + option (google.api.http) = { + post: "/v3/cluster/member/list" + body: "*" + }; + } + + // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. + rpc MemberPromote(MemberPromoteRequest) returns (MemberPromoteResponse) { + option (google.api.http) = { + post: "/v3/cluster/member/promote" + body: "*" + }; + } +} + +service Maintenance { + // Alarm activates, deactivates, and queries alarms regarding cluster health. + rpc Alarm(AlarmRequest) returns (AlarmResponse) { + option (google.api.http) = { + post: "/v3/maintenance/alarm" + body: "*" + }; + } + + // Status gets the status of the member. + rpc Status(StatusRequest) returns (StatusResponse) { + option (google.api.http) = { + post: "/v3/maintenance/status" + body: "*" + }; + } + + // Defragment defragments a member's backend database to recover storage space. + rpc Defragment(DefragmentRequest) returns (DefragmentResponse) { + option (google.api.http) = { + post: "/v3/maintenance/defragment" + body: "*" + }; + } + + // Hash computes the hash of whole backend keyspace, + // including key, lease, and other buckets in storage. + // This is designed for testing ONLY! + // Do not rely on this in production with ongoing transactions, + // since Hash operation does not hold MVCC locks. + // Use "HashKV" API instead for "key" bucket consistency checks. + rpc Hash(HashRequest) returns (HashResponse) { + option (google.api.http) = { + post: "/v3/maintenance/hash" + body: "*" + }; + } + + // HashKV computes the hash of all MVCC keys up to a given revision. + // It only iterates "key" bucket in backend storage. + rpc HashKV(HashKVRequest) returns (HashKVResponse) { + option (google.api.http) = { + post: "/v3/maintenance/hash" + body: "*" + }; + } + + // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. + rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) { + option (google.api.http) = { + post: "/v3/maintenance/snapshot" + body: "*" + }; + } + + // MoveLeader requests current leader node to transfer its leadership to transferee. + rpc MoveLeader(MoveLeaderRequest) returns (MoveLeaderResponse) { + option (google.api.http) = { + post: "/v3/maintenance/transfer-leadership" + body: "*" + }; + } +} + +service Auth { + // AuthEnable enables authentication. + rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) { + option (google.api.http) = { + post: "/v3/auth/enable" + body: "*" + }; + } + + // AuthDisable disables authentication. + rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) { + option (google.api.http) = { + post: "/v3/auth/disable" + body: "*" + }; + } + + // Authenticate processes an authenticate request. + rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) { + option (google.api.http) = { + post: "/v3/auth/authenticate" + body: "*" + }; + } + + // UserAdd adds a new user. User name cannot be empty. + rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) { + option (google.api.http) = { + post: "/v3/auth/user/add" + body: "*" + }; + } + + // UserGet gets detailed user information. + rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) { + option (google.api.http) = { + post: "/v3/auth/user/get" + body: "*" + }; + } + + // UserList gets a list of all users. + rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) { + option (google.api.http) = { + post: "/v3/auth/user/list" + body: "*" + }; + } + + // UserDelete deletes a specified user. + rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) { + option (google.api.http) = { + post: "/v3/auth/user/delete" + body: "*" + }; + } + + // UserChangePassword changes the password of a specified user. + rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) { + option (google.api.http) = { + post: "/v3/auth/user/changepw" + body: "*" + }; + } + + // UserGrant grants a role to a specified user. + rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) { + option (google.api.http) = { + post: "/v3/auth/user/grant" + body: "*" + }; + } + + // UserRevokeRole revokes a role of specified user. + rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) { + option (google.api.http) = { + post: "/v3/auth/user/revoke" + body: "*" + }; + } + + // RoleAdd adds a new role. Role name cannot be empty. + rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) { + option (google.api.http) = { + post: "/v3/auth/role/add" + body: "*" + }; + } + + // RoleGet gets detailed role information. + rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) { + option (google.api.http) = { + post: "/v3/auth/role/get" + body: "*" + }; + } + + // RoleList gets lists of all roles. + rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) { + option (google.api.http) = { + post: "/v3/auth/role/list" + body: "*" + }; + } + + // RoleDelete deletes a specified role. + rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) { + option (google.api.http) = { + post: "/v3/auth/role/delete" + body: "*" + }; + } + + // RoleGrantPermission grants a permission of a specified key or range to a specified role. + rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) { + option (google.api.http) = { + post: "/v3/auth/role/grant" + body: "*" + }; + } + + // RoleRevokePermission revokes a key or range permission of a specified role. + rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) { + option (google.api.http) = { + post: "/v3/auth/role/revoke" + body: "*" + }; + } +} + +message ResponseHeader { + // cluster_id is the ID of the cluster which sent the response. + uint64 cluster_id = 1; + // member_id is the ID of the member which sent the response. + uint64 member_id = 2; + // revision is the key-value store revision when the request was applied. + // For watch progress responses, the header.revision indicates progress. All future events + // recieved in this stream are guaranteed to have a higher revision number than the + // header.revision number. + int64 revision = 3; + // raft_term is the raft term when the request was applied. + uint64 raft_term = 4; +} + +message RangeRequest { + enum SortOrder { + NONE = 0; // default, no sorting + ASCEND = 1; // lowest target value first + DESCEND = 2; // highest target value first + } + enum SortTarget { + KEY = 0; + VERSION = 1; + CREATE = 2; + MOD = 3; + VALUE = 4; + } + + // key is the first key for the range. If range_end is not given, the request only looks up key. + bytes key = 1; + // range_end is the upper bound on the requested range [key, range_end). + // If range_end is '\0', the range is all keys >= key. + // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), + // then the range request gets all keys prefixed with key. + // If both key and range_end are '\0', then the range request returns all keys. + bytes range_end = 2; + // limit is a limit on the number of keys returned for the request. When limit is set to 0, + // it is treated as no limit. + int64 limit = 3; + // revision is the point-in-time of the key-value store to use for the range. + // If revision is less or equal to zero, the range is over the newest key-value store. + // If the revision has been compacted, ErrCompacted is returned as a response. + int64 revision = 4; + + // sort_order is the order for returned sorted results. + SortOrder sort_order = 5; + + // sort_target is the key-value field to use for sorting. + SortTarget sort_target = 6; + + // serializable sets the range request to use serializable member-local reads. + // Range requests are linearizable by default; linearizable requests have higher + // latency and lower throughput than serializable requests but reflect the current + // consensus of the cluster. For better performance, in exchange for possible stale reads, + // a serializable range request is served locally without needing to reach consensus + // with other nodes in the cluster. + bool serializable = 7; + + // keys_only when set returns only the keys and not the values. + bool keys_only = 8; + + // count_only when set returns only the count of the keys in the range. + bool count_only = 9; + + // min_mod_revision is the lower bound for returned key mod revisions; all keys with + // lesser mod revisions will be filtered away. + int64 min_mod_revision = 10; + + // max_mod_revision is the upper bound for returned key mod revisions; all keys with + // greater mod revisions will be filtered away. + int64 max_mod_revision = 11; + + // min_create_revision is the lower bound for returned key create revisions; all keys with + // lesser create revisions will be filtered away. + int64 min_create_revision = 12; + + // max_create_revision is the upper bound for returned key create revisions; all keys with + // greater create revisions will be filtered away. + int64 max_create_revision = 13; +} + +message RangeResponse { + ResponseHeader header = 1; + // kvs is the list of key-value pairs matched by the range request. + // kvs is empty when count is requested. + repeated mvccpb.KeyValue kvs = 2; + // more indicates if there are more keys to return in the requested range. + bool more = 3; + // count is set to the number of keys within the range when requested. + int64 count = 4; +} + +message PutRequest { + // key is the key, in bytes, to put into the key-value store. + bytes key = 1; + // value is the value, in bytes, to associate with the key in the key-value store. + bytes value = 2; + // lease is the lease ID to associate with the key in the key-value store. A lease + // value of 0 indicates no lease. + int64 lease = 3; + + // If prev_kv is set, etcd gets the previous key-value pair before changing it. + // The previous key-value pair will be returned in the put response. + bool prev_kv = 4; + + // If ignore_value is set, etcd updates the key using its current value. + // Returns an error if the key does not exist. + bool ignore_value = 5; + + // If ignore_lease is set, etcd updates the key using its current lease. + // Returns an error if the key does not exist. + bool ignore_lease = 6; +} + +message PutResponse { + ResponseHeader header = 1; + // if prev_kv is set in the request, the previous key-value pair will be returned. + mvccpb.KeyValue prev_kv = 2; +} + +message DeleteRangeRequest { + // key is the first key to delete in the range. + bytes key = 1; + // range_end is the key following the last key to delete for the range [key, range_end). + // If range_end is not given, the range is defined to contain only the key argument. + // If range_end is one bit larger than the given key, then the range is all the keys + // with the prefix (the given key). + // If range_end is '\0', the range is all keys greater than or equal to the key argument. + bytes range_end = 2; + + // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. + // The previous key-value pairs will be returned in the delete response. + bool prev_kv = 3; +} + +message DeleteRangeResponse { + ResponseHeader header = 1; + // deleted is the number of keys deleted by the delete range request. + int64 deleted = 2; + // if prev_kv is set in the request, the previous key-value pairs will be returned. + repeated mvccpb.KeyValue prev_kvs = 3; +} + +message RequestOp { + // request is a union of request types accepted by a transaction. + oneof request { + RangeRequest request_range = 1; + PutRequest request_put = 2; + DeleteRangeRequest request_delete_range = 3; + TxnRequest request_txn = 4; + } +} + +message ResponseOp { + // response is a union of response types returned by a transaction. + oneof response { + RangeResponse response_range = 1; + PutResponse response_put = 2; + DeleteRangeResponse response_delete_range = 3; + TxnResponse response_txn = 4; + } +} + +message Compare { + enum CompareResult { + EQUAL = 0; + GREATER = 1; + LESS = 2; + NOT_EQUAL = 3; + } + enum CompareTarget { + VERSION = 0; + CREATE = 1; + MOD = 2; + VALUE = 3; + LEASE = 4; + } + // result is logical comparison operation for this comparison. + CompareResult result = 1; + // target is the key-value field to inspect for the comparison. + CompareTarget target = 2; + // key is the subject key for the comparison operation. + bytes key = 3; + oneof target_union { + // version is the version of the given key + int64 version = 4; + // create_revision is the creation revision of the given key + int64 create_revision = 5; + // mod_revision is the last modified revision of the given key. + int64 mod_revision = 6; + // value is the value of the given key, in bytes. + bytes value = 7; + // lease is the lease id of the given key. + int64 lease = 8; + // leave room for more target_union field tags, jump to 64 + } + + // range_end compares the given target to all keys in the range [key, range_end). + // See RangeRequest for more details on key ranges. + bytes range_end = 64; + // TODO: fill out with most of the rest of RangeRequest fields when needed. +} + +// From google paxosdb paper: +// Our implementation hinges around a powerful primitive which we call MultiOp. All other database +// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically +// and consists of three components: +// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check +// for the absence or presence of a value, or compare with a given value. Two different tests in the guard +// may apply to the same or different entries in the database. All tests in the guard are applied and +// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise +// it executes f op (see item 3 below). +// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or +// lookup operation, and applies to a single database entry. Two different operations in the list may apply +// to the same or different entries in the database. These operations are executed +// if guard evaluates to +// true. +// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false. +message TxnRequest { + // compare is a list of predicates representing a conjunction of terms. + // If the comparisons succeed, then the success requests will be processed in order, + // and the response will contain their respective responses in order. + // If the comparisons fail, then the failure requests will be processed in order, + // and the response will contain their respective responses in order. + repeated Compare compare = 1; + // success is a list of requests which will be applied when compare evaluates to true. + repeated RequestOp success = 2; + // failure is a list of requests which will be applied when compare evaluates to false. + repeated RequestOp failure = 3; +} + +message TxnResponse { + ResponseHeader header = 1; + // succeeded is set to true if the compare evaluated to true or false otherwise. + bool succeeded = 2; + // responses is a list of responses corresponding to the results from applying + // success if succeeded is true or failure if succeeded is false. + repeated ResponseOp responses = 3; +} + +// CompactionRequest compacts the key-value store up to a given revision. All superseded keys +// with a revision less than the compaction revision will be removed. +message CompactionRequest { + // revision is the key-value store revision for the compaction operation. + int64 revision = 1; + // physical is set so the RPC will wait until the compaction is physically + // applied to the local database such that compacted entries are totally + // removed from the backend database. + bool physical = 2; +} + +message CompactionResponse { + ResponseHeader header = 1; +} + +message HashRequest { +} + +message HashKVRequest { + // revision is the key-value store revision for the hash operation. + int64 revision = 1; +} + +message HashKVResponse { + ResponseHeader header = 1; + // hash is the hash value computed from the responding member's MVCC keys up to a given revision. + uint32 hash = 2; + // compact_revision is the compacted revision of key-value store when hash begins. + int64 compact_revision = 3; +} + +message HashResponse { + ResponseHeader header = 1; + // hash is the hash value computed from the responding member's KV's backend. + uint32 hash = 2; +} + +message SnapshotRequest { +} + +message SnapshotResponse { + // header has the current key-value store information. The first header in the snapshot + // stream indicates the point in time of the snapshot. + ResponseHeader header = 1; + + // remaining_bytes is the number of blob bytes to be sent after this message + uint64 remaining_bytes = 2; + + // blob contains the next chunk of the snapshot in the snapshot stream. + bytes blob = 3; +} + +message WatchRequest { + // request_union is a request to either create a new watcher or cancel an existing watcher. + oneof request_union { + WatchCreateRequest create_request = 1; + WatchCancelRequest cancel_request = 2; + WatchProgressRequest progress_request = 3; + } +} + +message WatchCreateRequest { + // key is the key to register for watching. + bytes key = 1; + + // range_end is the end of the range [key, range_end) to watch. If range_end is not given, + // only the key argument is watched. If range_end is equal to '\0', all keys greater than + // or equal to the key argument are watched. + // If the range_end is one bit larger than the given key, + // then all keys with the prefix (the given key) will be watched. + bytes range_end = 2; + + // start_revision is an optional revision to watch from (inclusive). No start_revision is "now". + int64 start_revision = 3; + + // progress_notify is set so that the etcd server will periodically send a WatchResponse with + // no events to the new watcher if there are no recent events. It is useful when clients + // wish to recover a disconnected watcher starting from a recent known revision. + // The etcd server may decide how often it will send notifications based on current load. + bool progress_notify = 4; + + enum FilterType { + // filter out put event. + NOPUT = 0; + // filter out delete event. + NODELETE = 1; + } + + // filters filter the events at server side before it sends back to the watcher. + repeated FilterType filters = 5; + + // If prev_kv is set, created watcher gets the previous KV before the event happens. + // If the previous KV is already compacted, nothing will be returned. + bool prev_kv = 6; + + // If watch_id is provided and non-zero, it will be assigned to this watcher. + // Since creating a watcher in etcd is not a synchronous operation, + // this can be used ensure that ordering is correct when creating multiple + // watchers on the same stream. Creating a watcher with an ID already in + // use on the stream will cause an error to be returned. + int64 watch_id = 7; + + // fragment enables splitting large revisions into multiple watch responses. + bool fragment = 8; +} + +message WatchCancelRequest { + // watch_id is the watcher id to cancel so that no more events are transmitted. + int64 watch_id = 1; +} + +// Requests the a watch stream progress status be sent in the watch response stream as soon as +// possible. +message WatchProgressRequest { +} + +message WatchResponse { + ResponseHeader header = 1; + // watch_id is the ID of the watcher that corresponds to the response. + int64 watch_id = 2; + + // created is set to true if the response is for a create watch request. + // The client should record the watch_id and expect to receive events for + // the created watcher from the same stream. + // All events sent to the created watcher will attach with the same watch_id. + bool created = 3; + + // canceled is set to true if the response is for a cancel watch request. + // No further events will be sent to the canceled watcher. + bool canceled = 4; + + // compact_revision is set to the minimum index if a watcher tries to watch + // at a compacted index. + // + // This happens when creating a watcher at a compacted revision or the watcher cannot + // catch up with the progress of the key-value store. + // + // The client should treat the watcher as canceled and should not try to create any + // watcher with the same start_revision again. + int64 compact_revision = 5; + + // cancel_reason indicates the reason for canceling the watcher. + string cancel_reason = 6; + + // framgment is true if large watch response was split over multiple responses. + bool fragment = 7; + + repeated mvccpb.Event events = 11; +} + +message LeaseGrantRequest { + // TTL is the advisory time-to-live in seconds. Expired lease will return -1. + int64 TTL = 1; + // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. + int64 ID = 2; +} + +message LeaseGrantResponse { + ResponseHeader header = 1; + // ID is the lease ID for the granted lease. + int64 ID = 2; + // TTL is the server chosen lease time-to-live in seconds. + int64 TTL = 3; + string error = 4; +} + +message LeaseRevokeRequest { + // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. + int64 ID = 1; +} + +message LeaseRevokeResponse { + ResponseHeader header = 1; +} + +message LeaseCheckpoint { + // ID is the lease ID to checkpoint. + int64 ID = 1; + + // Remaining_TTL is the remaining time until expiry of the lease. + int64 remaining_TTL = 2; +} + +message LeaseCheckpointRequest { + repeated LeaseCheckpoint checkpoints = 1; +} + +message LeaseCheckpointResponse { + ResponseHeader header = 1; +} + +message LeaseKeepAliveRequest { + // ID is the lease ID for the lease to keep alive. + int64 ID = 1; +} + +message LeaseKeepAliveResponse { + ResponseHeader header = 1; + // ID is the lease ID from the keep alive request. + int64 ID = 2; + // TTL is the new time-to-live for the lease. + int64 TTL = 3; +} + +message LeaseTimeToLiveRequest { + // ID is the lease ID for the lease. + int64 ID = 1; + // keys is true to query all the keys attached to this lease. + bool keys = 2; +} + +message LeaseTimeToLiveResponse { + ResponseHeader header = 1; + // ID is the lease ID from the keep alive request. + int64 ID = 2; + // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. + int64 TTL = 3; + // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. + int64 grantedTTL = 4; + // Keys is the list of keys attached to this lease. + repeated bytes keys = 5; +} + +message LeaseLeasesRequest { +} + +message LeaseStatus { + int64 ID = 1; + // TODO: int64 TTL = 2; +} + +message LeaseLeasesResponse { + ResponseHeader header = 1; + repeated LeaseStatus leases = 2; +} + +message Member { + // ID is the member ID for this member. + uint64 ID = 1; + // name is the human-readable name of the member. If the member is not started, the name will be an empty string. + string name = 2; + // peerURLs is the list of URLs the member exposes to the cluster for communication. + repeated string peerURLs = 3; + // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. + repeated string clientURLs = 4; + // isLearner indicates if the member is raft learner. + bool isLearner = 5; +} + +message MemberAddRequest { + // peerURLs is the list of URLs the added member will use to communicate with the cluster. + repeated string peerURLs = 1; + // isLearner indicates if the added member is raft learner. + bool isLearner = 2; +} + +message MemberAddResponse { + ResponseHeader header = 1; + // member is the member information for the added member. + Member member = 2; + // members is a list of all members after adding the new member. + repeated Member members = 3; +} + +message MemberRemoveRequest { + // ID is the member ID of the member to remove. + uint64 ID = 1; +} + +message MemberRemoveResponse { + ResponseHeader header = 1; + // members is a list of all members after removing the member. + repeated Member members = 2; +} + +message MemberUpdateRequest { + // ID is the member ID of the member to update. + uint64 ID = 1; + // peerURLs is the new list of URLs the member will use to communicate with the cluster. + repeated string peerURLs = 2; +} + +message MemberUpdateResponse{ + ResponseHeader header = 1; + // members is a list of all members after updating the member. + repeated Member members = 2; +} + +message MemberListRequest { +} + +message MemberListResponse { + ResponseHeader header = 1; + // members is a list of all members associated with the cluster. + repeated Member members = 2; +} + +message MemberPromoteRequest { + // ID is the member ID of the member to promote. + uint64 ID = 1; +} + +message MemberPromoteResponse { + ResponseHeader header = 1; + // members is a list of all members after promoting the member. + repeated Member members = 2; +} + +message DefragmentRequest { +} + +message DefragmentResponse { + ResponseHeader header = 1; +} + +message MoveLeaderRequest { + // targetID is the node ID for the new leader. + uint64 targetID = 1; +} + +message MoveLeaderResponse { + ResponseHeader header = 1; +} + +enum AlarmType { + NONE = 0; // default, used to query if any alarm is active + NOSPACE = 1; // space quota is exhausted + CORRUPT = 2; // kv store corruption detected +} + +message AlarmRequest { + enum AlarmAction { + GET = 0; + ACTIVATE = 1; + DEACTIVATE = 2; + } + // action is the kind of alarm request to issue. The action + // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a + // raised alarm. + AlarmAction action = 1; + // memberID is the ID of the member associated with the alarm. If memberID is 0, the + // alarm request covers all members. + uint64 memberID = 2; + // alarm is the type of alarm to consider for this request. + AlarmType alarm = 3; +} + +message AlarmMember { + // memberID is the ID of the member associated with the raised alarm. + uint64 memberID = 1; + // alarm is the type of alarm which has been raised. + AlarmType alarm = 2; +} + +message AlarmResponse { + ResponseHeader header = 1; + // alarms is a list of alarms associated with the alarm request. + repeated AlarmMember alarms = 2; +} + +message StatusRequest { +} + +message StatusResponse { + ResponseHeader header = 1; + // version is the cluster protocol version used by the responding member. + string version = 2; + // dbSize is the size of the backend database physically allocated, in bytes, of the responding member. + int64 dbSize = 3; + // leader is the member ID which the responding member believes is the current leader. + uint64 leader = 4; + // raftIndex is the current raft committed index of the responding member. + uint64 raftIndex = 5; + // raftTerm is the current raft term of the responding member. + uint64 raftTerm = 6; + // raftAppliedIndex is the current raft applied index of the responding member. + uint64 raftAppliedIndex = 7; + // errors contains alarm/health information and status. + repeated string errors = 8; + // dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member. + int64 dbSizeInUse = 9; + // isLearner indicates if the member is raft learner. + bool isLearner = 10; +} + +message AuthEnableRequest { +} + +message AuthDisableRequest { +} + +message AuthenticateRequest { + string name = 1; + string password = 2; +} + +message AuthUserAddRequest { + string name = 1; + string password = 2; + authpb.UserAddOptions options = 3; +} + +message AuthUserGetRequest { + string name = 1; +} + +message AuthUserDeleteRequest { + // name is the name of the user to delete. + string name = 1; +} + +message AuthUserChangePasswordRequest { + // name is the name of the user whose password is being changed. + string name = 1; + // password is the new password for the user. + string password = 2; +} + +message AuthUserGrantRoleRequest { + // user is the name of the user which should be granted a given role. + string user = 1; + // role is the name of the role to grant to the user. + string role = 2; +} + +message AuthUserRevokeRoleRequest { + string name = 1; + string role = 2; +} + +message AuthRoleAddRequest { + // name is the name of the role to add to the authentication system. + string name = 1; +} + +message AuthRoleGetRequest { + string role = 1; +} + +message AuthUserListRequest { +} + +message AuthRoleListRequest { +} + +message AuthRoleDeleteRequest { + string role = 1; +} + +message AuthRoleGrantPermissionRequest { + // name is the name of the role which will be granted the permission. + string name = 1; + // perm is the permission to grant to the role. + authpb.Permission perm = 2; +} + +message AuthRoleRevokePermissionRequest { + string role = 1; + bytes key = 2; + bytes range_end = 3; +} + +message AuthEnableResponse { + ResponseHeader header = 1; +} + +message AuthDisableResponse { + ResponseHeader header = 1; +} + +message AuthenticateResponse { + ResponseHeader header = 1; + // token is an authorized token that can be used in succeeding RPCs + string token = 2; +} + +message AuthUserAddResponse { + ResponseHeader header = 1; +} + +message AuthUserGetResponse { + ResponseHeader header = 1; + + repeated string roles = 2; +} + +message AuthUserDeleteResponse { + ResponseHeader header = 1; +} + +message AuthUserChangePasswordResponse { + ResponseHeader header = 1; +} + +message AuthUserGrantRoleResponse { + ResponseHeader header = 1; +} + +message AuthUserRevokeRoleResponse { + ResponseHeader header = 1; +} + +message AuthRoleAddResponse { + ResponseHeader header = 1; +} + +message AuthRoleGetResponse { + ResponseHeader header = 1; + + repeated authpb.Permission perm = 2; +} + +message AuthRoleListResponse { + ResponseHeader header = 1; + + repeated string roles = 2; +} + +message AuthUserListResponse { + ResponseHeader header = 1; + + repeated string users = 2; +} + +message AuthRoleDeleteResponse { + ResponseHeader header = 1; +} + +message AuthRoleGrantPermissionResponse { + ResponseHeader header = 1; +} + +message AuthRoleRevokePermissionResponse { + ResponseHeader header = 1; +} diff --git a/vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.pb.go b/vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.pb.go new file mode 100644 index 000000000..23fe337a5 --- /dev/null +++ b/vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.pb.go @@ -0,0 +1,718 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kv.proto + +/* + Package mvccpb is a generated protocol buffer package. + + It is generated from these files: + kv.proto + + It has these top-level messages: + KeyValue + Event +*/ +package mvccpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Event_EventType int32 + +const ( + PUT Event_EventType = 0 + DELETE Event_EventType = 1 +) + +var Event_EventType_name = map[int32]string{ + 0: "PUT", + 1: "DELETE", +} +var Event_EventType_value = map[string]int32{ + "PUT": 0, + "DELETE": 1, +} + +func (x Event_EventType) String() string { + return proto.EnumName(Event_EventType_name, int32(x)) +} +func (Event_EventType) EnumDescriptor() ([]byte, []int) { return fileDescriptorKv, []int{1, 0} } + +type KeyValue struct { + // key is the key in bytes. An empty key is not allowed. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // create_revision is the revision of last creation on this key. + CreateRevision int64 `protobuf:"varint,2,opt,name=create_revision,json=createRevision,proto3" json:"create_revision,omitempty"` + // mod_revision is the revision of last modification on this key. + ModRevision int64 `protobuf:"varint,3,opt,name=mod_revision,json=modRevision,proto3" json:"mod_revision,omitempty"` + // version is the version of the key. A deletion resets + // the version to zero and any modification of the key + // increases its version. + Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` + // value is the value held by the key, in bytes. + Value []byte `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"` + // lease is the ID of the lease that attached to key. + // When the attached lease expires, the key will be deleted. + // If lease is 0, then no lease is attached to the key. + Lease int64 `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"` +} + +func (m *KeyValue) Reset() { *m = KeyValue{} } +func (m *KeyValue) String() string { return proto.CompactTextString(m) } +func (*KeyValue) ProtoMessage() {} +func (*KeyValue) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{0} } + +type Event struct { + // type is the kind of event. If type is a PUT, it indicates + // new data has been stored to the key. If type is a DELETE, + // it indicates the key was deleted. + Type Event_EventType `protobuf:"varint,1,opt,name=type,proto3,enum=mvccpb.Event_EventType" json:"type,omitempty"` + // kv holds the KeyValue for the event. + // A PUT event contains current kv pair. + // A PUT event with kv.Version=1 indicates the creation of a key. + // A DELETE/EXPIRE event contains the deleted key with + // its modification revision set to the revision of deletion. + Kv *KeyValue `protobuf:"bytes,2,opt,name=kv" json:"kv,omitempty"` + // prev_kv holds the key-value pair before the event happens. + PrevKv *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"` +} + +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{1} } + +func init() { + proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue") + proto.RegisterType((*Event)(nil), "mvccpb.Event") + proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value) +} +func (m *KeyValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintKv(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if m.CreateRevision != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision)) + } + if m.ModRevision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.ModRevision)) + } + if m.Version != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.Version)) + } + if len(m.Value) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintKv(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + if m.Lease != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.Lease)) + } + return i, nil +} + +func (m *Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.Type)) + } + if m.Kv != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.Kv.Size())) + n1, err := m.Kv.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.PrevKv != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintKv(dAtA, i, uint64(m.PrevKv.Size())) + n2, err := m.PrevKv.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func encodeVarintKv(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *KeyValue) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovKv(uint64(l)) + } + if m.CreateRevision != 0 { + n += 1 + sovKv(uint64(m.CreateRevision)) + } + if m.ModRevision != 0 { + n += 1 + sovKv(uint64(m.ModRevision)) + } + if m.Version != 0 { + n += 1 + sovKv(uint64(m.Version)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovKv(uint64(l)) + } + if m.Lease != 0 { + n += 1 + sovKv(uint64(m.Lease)) + } + return n +} + +func (m *Event) Size() (n int) { + var l int + _ = l + if m.Type != 0 { + n += 1 + sovKv(uint64(m.Type)) + } + if m.Kv != nil { + l = m.Kv.Size() + n += 1 + l + sovKv(uint64(l)) + } + if m.PrevKv != nil { + l = m.PrevKv.Size() + n += 1 + l + sovKv(uint64(l)) + } + return n +} + +func sovKv(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozKv(x uint64) (n int) { + return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *KeyValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType) + } + m.CreateRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreateRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType) + } + m.ModRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ModRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + m.Lease = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lease |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipKv(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKv + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (Event_EventType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Kv == nil { + m.Kv = &KeyValue{} + } + if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PrevKv == nil { + m.PrevKv = &KeyValue{} + } + if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKv(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKv + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipKv(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthKv + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipKv(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowKv = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("kv.proto", fileDescriptorKv) } + +var fileDescriptorKv = []byte{ + // 303 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40, + 0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18, + 0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94, + 0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa, + 0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3, + 0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae, + 0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7, + 0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3, + 0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d, + 0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b, + 0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23, + 0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36, + 0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34, + 0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad, + 0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30, + 0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a, + 0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94, + 0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff, + 0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00, +} diff --git a/vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.proto b/vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.proto new file mode 100644 index 000000000..23c911b7d --- /dev/null +++ b/vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.proto @@ -0,0 +1,49 @@ +syntax = "proto3"; +package mvccpb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; +option (gogoproto.goproto_enum_prefix_all) = false; + +message KeyValue { + // key is the key in bytes. An empty key is not allowed. + bytes key = 1; + // create_revision is the revision of last creation on this key. + int64 create_revision = 2; + // mod_revision is the revision of last modification on this key. + int64 mod_revision = 3; + // version is the version of the key. A deletion resets + // the version to zero and any modification of the key + // increases its version. + int64 version = 4; + // value is the value held by the key, in bytes. + bytes value = 5; + // lease is the ID of the lease that attached to key. + // When the attached lease expires, the key will be deleted. + // If lease is 0, then no lease is attached to the key. + int64 lease = 6; +} + +message Event { + enum EventType { + PUT = 0; + DELETE = 1; + } + // type is the kind of event. If type is a PUT, it indicates + // new data has been stored to the key. If type is a DELETE, + // it indicates the key was deleted. + EventType type = 1; + // kv holds the KeyValue for the event. + // A PUT event contains current kv pair. + // A PUT event with kv.Version=1 indicates the creation of a key. + // A DELETE/EXPIRE event contains the deleted key with + // its modification revision set to the revision of deletion. + KeyValue kv = 2; + + // prev_kv holds the key-value pair before the event happens. + KeyValue prev_kv = 3; +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/dir_unix.go b/vendor/go.etcd.io/etcd/pkg/fileutil/dir_unix.go new file mode 100644 index 000000000..4ce15dc6b --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/dir_unix.go @@ -0,0 +1,27 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package fileutil + +import "os" + +const ( + // PrivateDirMode grants owner to make/remove files inside the directory. + PrivateDirMode = 0700 +) + +// OpenDir opens a directory for syncing. +func OpenDir(path string) (*os.File, error) { return os.Open(path) } diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/dir_windows.go b/vendor/go.etcd.io/etcd/pkg/fileutil/dir_windows.go new file mode 100644 index 000000000..a10a90583 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/dir_windows.go @@ -0,0 +1,51 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package fileutil + +import ( + "os" + "syscall" +) + +const ( + // PrivateDirMode grants owner to make/remove files inside the directory. + PrivateDirMode = 0777 +) + +// OpenDir opens a directory in windows with write access for syncing. +func OpenDir(path string) (*os.File, error) { + fd, err := openDir(path) + if err != nil { + return nil, err + } + return os.NewFile(uintptr(fd), path), nil +} + +func openDir(path string) (fd syscall.Handle, err error) { + if len(path) == 0 { + return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND + } + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return syscall.InvalidHandle, err + } + access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE) + sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE) + createmode := uint32(syscall.OPEN_EXISTING) + fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS) + return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0) +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/doc.go b/vendor/go.etcd.io/etcd/pkg/fileutil/doc.go new file mode 100644 index 000000000..69dde5a7d --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package fileutil implements utility functions related to files and paths. +package fileutil diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/fileutil.go b/vendor/go.etcd.io/etcd/pkg/fileutil/fileutil.go new file mode 100644 index 000000000..f36136182 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/fileutil.go @@ -0,0 +1,129 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/coreos/pkg/capnslog" +) + +const ( + // PrivateFileMode grants owner to read/write a file. + PrivateFileMode = 0600 +) + +var plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "pkg/fileutil") + +// IsDirWriteable checks if dir is writable by writing and removing a file +// to dir. It returns nil if dir is writable. +func IsDirWriteable(dir string) error { + f := filepath.Join(dir, ".touch") + if err := ioutil.WriteFile(f, []byte(""), PrivateFileMode); err != nil { + return err + } + return os.Remove(f) +} + +// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory +// does not exists. TouchDirAll also ensures the given directory is writable. +func TouchDirAll(dir string) error { + // If path is already a directory, MkdirAll does nothing and returns nil, so, + // first check if dir exist with an expected permission mode. + if Exist(dir) { + err := CheckDirPermission(dir, PrivateDirMode) + if err != nil { + plog.Warningf("check file permission: %v", err) + } + } else { + err := os.MkdirAll(dir, PrivateDirMode) + if err != nil { + // if mkdirAll("a/text") and "text" is not + // a directory, this will return syscall.ENOTDIR + return err + } + } + + return IsDirWriteable(dir) +} + +// CreateDirAll is similar to TouchDirAll but returns error +// if the deepest directory was not empty. +func CreateDirAll(dir string) error { + err := TouchDirAll(dir) + if err == nil { + var ns []string + ns, err = ReadDir(dir) + if err != nil { + return err + } + if len(ns) != 0 { + err = fmt.Errorf("expected %q to be empty, got %q", dir, ns) + } + } + return err +} + +// Exist returns true if a file or directory exists. +func Exist(name string) bool { + _, err := os.Stat(name) + return err == nil +} + +// ZeroToEnd zeros a file starting from SEEK_CUR to its SEEK_END. May temporarily +// shorten the length of the file. +func ZeroToEnd(f *os.File) error { + // TODO: support FALLOC_FL_ZERO_RANGE + off, err := f.Seek(0, io.SeekCurrent) + if err != nil { + return err + } + lenf, lerr := f.Seek(0, io.SeekEnd) + if lerr != nil { + return lerr + } + if err = f.Truncate(off); err != nil { + return err + } + // make sure blocks remain allocated + if err = Preallocate(f, lenf, true); err != nil { + return err + } + _, err = f.Seek(off, io.SeekStart) + return err +} + +// CheckDirPermission checks permission on an existing dir. +// Returns error if dir is empty or exist with a different permission than specified. +func CheckDirPermission(dir string, perm os.FileMode) error { + if !Exist(dir) { + return fmt.Errorf("directory %q empty, cannot check permission.", dir) + } + //check the existing permission on the directory + dirInfo, err := os.Stat(dir) + if err != nil { + return err + } + dirMode := dirInfo.Mode().Perm() + if dirMode != perm { + err = fmt.Errorf("directory %q exist, but the permission is %q. The recommended permission is %q to prevent possible unprivileged access to the data.", dir, dirInfo.Mode(), os.FileMode(PrivateDirMode)) + return err + } + return nil +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/lock.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock.go new file mode 100644 index 000000000..338627f43 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/lock.go @@ -0,0 +1,26 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "errors" + "os" +) + +var ( + ErrLocked = errors.New("fileutil: file already locked") +) + +type LockedFile struct{ *os.File } diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/lock_flock.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_flock.go new file mode 100644 index 000000000..542550bc8 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_flock.go @@ -0,0 +1,49 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows,!plan9,!solaris + +package fileutil + +import ( + "os" + "syscall" +) + +func flockTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil { + f.Close() + if err == syscall.EWOULDBLOCK { + err = ErrLocked + } + return nil, err + } + return &LockedFile{f}, nil +} + +func flockLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX); err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, err +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/lock_linux.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_linux.go new file mode 100644 index 000000000..b0abc98ee --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_linux.go @@ -0,0 +1,97 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package fileutil + +import ( + "fmt" + "io" + "os" + "syscall" +) + +// This used to call syscall.Flock() but that call fails with EBADF on NFS. +// An alternative is lockf() which works on NFS but that call lets a process lock +// the same file twice. Instead, use Linux's non-standard open file descriptor +// locks which will block if the process already holds the file lock. +// +// constants from /usr/include/bits/fcntl-linux.h +const ( + F_OFD_GETLK = 37 + F_OFD_SETLK = 37 + F_OFD_SETLKW = 38 +) + +var ( + wrlck = syscall.Flock_t{ + Type: syscall.F_WRLCK, + Whence: int16(io.SeekStart), + Start: 0, + Len: 0, + } + + linuxTryLockFile = flockTryLockFile + linuxLockFile = flockLockFile +) + +func init() { + // use open file descriptor locks if the system supports it + getlk := syscall.Flock_t{Type: syscall.F_RDLCK} + if err := syscall.FcntlFlock(0, F_OFD_GETLK, &getlk); err == nil { + linuxTryLockFile = ofdTryLockFile + linuxLockFile = ofdLockFile + } +} + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + return linuxTryLockFile(path, flag, perm) +} + +func ofdTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, fmt.Errorf("ofdTryLockFile failed to open %q (%v)", path, err) + } + + flock := wrlck + if err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLK, &flock); err != nil { + f.Close() + if err == syscall.EWOULDBLOCK { + err = ErrLocked + } + return nil, err + } + return &LockedFile{f}, nil +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + return linuxLockFile(path, flag, perm) +} + +func ofdLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, fmt.Errorf("ofdLockFile failed to open %q (%v)", path, err) + } + + flock := wrlck + err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLKW, &flock) + if err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, nil +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/lock_plan9.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_plan9.go new file mode 100644 index 000000000..fee6a7c8f --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_plan9.go @@ -0,0 +1,45 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "os" + "syscall" + "time" +) + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + if err := os.Chmod(path, syscall.DMEXCL|PrivateFileMode); err != nil { + return nil, err + } + f, err := os.Open(path, flag, perm) + if err != nil { + return nil, ErrLocked + } + return &LockedFile{f}, nil +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + if err := os.Chmod(path, syscall.DMEXCL|PrivateFileMode); err != nil { + return nil, err + } + for { + f, err := os.OpenFile(path, flag, perm) + if err == nil { + return &LockedFile{f}, nil + } + time.Sleep(10 * time.Millisecond) + } +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/lock_solaris.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_solaris.go new file mode 100644 index 000000000..352ca5590 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_solaris.go @@ -0,0 +1,62 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build solaris + +package fileutil + +import ( + "os" + "syscall" +) + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Pid = 0 + lock.Type = syscall.F_WRLCK + lock.Whence = 0 + lock.Pid = 0 + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + if err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock); err != nil { + f.Close() + if err == syscall.EAGAIN { + err = ErrLocked + } + return nil, err + } + return &LockedFile{f}, nil +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Pid = 0 + lock.Type = syscall.F_WRLCK + lock.Whence = 0 + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + if err = syscall.FcntlFlock(f.Fd(), syscall.F_SETLKW, &lock); err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, nil +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/lock_unix.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_unix.go new file mode 100644 index 000000000..ed01164de --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_unix.go @@ -0,0 +1,29 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows,!plan9,!solaris,!linux + +package fileutil + +import ( + "os" +) + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + return flockTryLockFile(path, flag, perm) +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + return flockLockFile(path, flag, perm) +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/lock_windows.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_windows.go new file mode 100644 index 000000000..b1817230a --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_windows.go @@ -0,0 +1,125 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package fileutil + +import ( + "errors" + "fmt" + "os" + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + + errLocked = errors.New("The process cannot access the file because another process has locked a portion of the file.") +) + +const ( + // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + LOCKFILE_EXCLUSIVE_LOCK = 2 + LOCKFILE_FAIL_IMMEDIATELY = 1 + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx + errLockViolation syscall.Errno = 0x21 +) + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := open(path, flag, perm) + if err != nil { + return nil, err + } + if err := lockFile(syscall.Handle(f.Fd()), LOCKFILE_FAIL_IMMEDIATELY); err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, nil +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := open(path, flag, perm) + if err != nil { + return nil, err + } + if err := lockFile(syscall.Handle(f.Fd()), 0); err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, nil +} + +func open(path string, flag int, perm os.FileMode) (*os.File, error) { + if path == "" { + return nil, fmt.Errorf("cannot open empty filename") + } + var access uint32 + switch flag { + case syscall.O_RDONLY: + access = syscall.GENERIC_READ + case syscall.O_WRONLY: + access = syscall.GENERIC_WRITE + case syscall.O_RDWR: + access = syscall.GENERIC_READ | syscall.GENERIC_WRITE + case syscall.O_WRONLY | syscall.O_CREAT: + access = syscall.GENERIC_ALL + default: + panic(fmt.Errorf("flag %v is not supported", flag)) + } + fd, err := syscall.CreateFile(&(syscall.StringToUTF16(path)[0]), + access, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + nil, + syscall.OPEN_ALWAYS, + syscall.FILE_ATTRIBUTE_NORMAL, + 0) + if err != nil { + return nil, err + } + return os.NewFile(uintptr(fd), path), nil +} + +func lockFile(fd syscall.Handle, flags uint32) error { + var flag uint32 = LOCKFILE_EXCLUSIVE_LOCK + flag |= flags + if fd == syscall.InvalidHandle { + return nil + } + err := lockFileEx(fd, flag, 1, 0, &syscall.Overlapped{}) + if err == nil { + return nil + } else if err.Error() == errLocked.Error() { + return ErrLocked + } else if err != errLockViolation { + return err + } + return nil +} + +func lockFileEx(h syscall.Handle, flags, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + var reserved uint32 = 0 + r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return err +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate.go b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate.go new file mode 100644 index 000000000..c747b7cf8 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate.go @@ -0,0 +1,54 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "io" + "os" +) + +// Preallocate tries to allocate the space for given +// file. This operation is only supported on linux by a +// few filesystems (btrfs, ext4, etc.). +// If the operation is unsupported, no error will be returned. +// Otherwise, the error encountered will be returned. +func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error { + if sizeInBytes == 0 { + // fallocate will return EINVAL if length is 0; skip + return nil + } + if extendFile { + return preallocExtend(f, sizeInBytes) + } + return preallocFixed(f, sizeInBytes) +} + +func preallocExtendTrunc(f *os.File, sizeInBytes int64) error { + curOff, err := f.Seek(0, io.SeekCurrent) + if err != nil { + return err + } + size, err := f.Seek(sizeInBytes, io.SeekEnd) + if err != nil { + return err + } + if _, err = f.Seek(curOff, io.SeekStart); err != nil { + return err + } + if sizeInBytes > size { + return nil + } + return f.Truncate(sizeInBytes) +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_darwin.go b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_darwin.go new file mode 100644 index 000000000..5a6dccfa7 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_darwin.go @@ -0,0 +1,65 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin + +package fileutil + +import ( + "os" + "syscall" + "unsafe" +) + +func preallocExtend(f *os.File, sizeInBytes int64) error { + if err := preallocFixed(f, sizeInBytes); err != nil { + return err + } + return preallocExtendTrunc(f, sizeInBytes) +} + +func preallocFixed(f *os.File, sizeInBytes int64) error { + // allocate all requested space or no space at all + // TODO: allocate contiguous space on disk with F_ALLOCATECONTIG flag + fstore := &syscall.Fstore_t{ + Flags: syscall.F_ALLOCATEALL, + Posmode: syscall.F_PEOFPOSMODE, + Length: sizeInBytes} + p := unsafe.Pointer(fstore) + _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_PREALLOCATE), uintptr(p)) + if errno == 0 || errno == syscall.ENOTSUP { + return nil + } + + // wrong argument to fallocate syscall + if errno == syscall.EINVAL { + // filesystem "st_blocks" are allocated in the units of + // "Allocation Block Size" (run "diskutil info /" command) + var stat syscall.Stat_t + syscall.Fstat(int(f.Fd()), &stat) + + // syscall.Statfs_t.Bsize is "optimal transfer block size" + // and contains matching 4096 value when latest OS X kernel + // supports 4,096 KB filesystem block size + var statfs syscall.Statfs_t + syscall.Fstatfs(int(f.Fd()), &statfs) + blockSize := int64(statfs.Bsize) + + if stat.Blocks*blockSize >= sizeInBytes { + // enough blocks are already allocated + return nil + } + } + return errno +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unix.go b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unix.go new file mode 100644 index 000000000..50bd84f02 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unix.go @@ -0,0 +1,49 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package fileutil + +import ( + "os" + "syscall" +) + +func preallocExtend(f *os.File, sizeInBytes int64) error { + // use mode = 0 to change size + err := syscall.Fallocate(int(f.Fd()), 0, 0, sizeInBytes) + if err != nil { + errno, ok := err.(syscall.Errno) + // not supported; fallback + // fallocate EINTRs frequently in some environments; fallback + if ok && (errno == syscall.ENOTSUP || errno == syscall.EINTR) { + return preallocExtendTrunc(f, sizeInBytes) + } + } + return err +} + +func preallocFixed(f *os.File, sizeInBytes int64) error { + // use mode = 1 to keep size; see FALLOC_FL_KEEP_SIZE + err := syscall.Fallocate(int(f.Fd()), 1, 0, sizeInBytes) + if err != nil { + errno, ok := err.(syscall.Errno) + // treat not supported as nil error + if ok && errno == syscall.ENOTSUP { + return nil + } + } + return err +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unsupported.go b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unsupported.go new file mode 100644 index 000000000..162fbc5f7 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unsupported.go @@ -0,0 +1,25 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux,!darwin + +package fileutil + +import "os" + +func preallocExtend(f *os.File, sizeInBytes int64) error { + return preallocExtendTrunc(f, sizeInBytes) +} + +func preallocFixed(f *os.File, sizeInBytes int64) error { return nil } diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/purge.go b/vendor/go.etcd.io/etcd/pkg/fileutil/purge.go new file mode 100644 index 000000000..d116f340b --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/purge.go @@ -0,0 +1,98 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "os" + "path/filepath" + "sort" + "strings" + "time" + + "go.uber.org/zap" +) + +func PurgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error { + return purgeFile(lg, dirname, suffix, max, interval, stop, nil, nil) +} + +func PurgeFileWithDoneNotify(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) (<-chan struct{}, <-chan error) { + doneC := make(chan struct{}) + errC := purgeFile(lg, dirname, suffix, max, interval, stop, nil, doneC) + return doneC, errC +} + +// purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil. +// if donec is non-nil, the function closes it to notify its exit. +func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string, donec chan<- struct{}) <-chan error { + errC := make(chan error, 1) + go func() { + if donec != nil { + defer close(donec) + } + for { + fnames, err := ReadDir(dirname) + if err != nil { + errC <- err + return + } + newfnames := make([]string, 0) + for _, fname := range fnames { + if strings.HasSuffix(fname, suffix) { + newfnames = append(newfnames, fname) + } + } + sort.Strings(newfnames) + fnames = newfnames + for len(newfnames) > int(max) { + f := filepath.Join(dirname, newfnames[0]) + l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode) + if err != nil { + break + } + if err = os.Remove(f); err != nil { + errC <- err + return + } + if err = l.Close(); err != nil { + if lg != nil { + lg.Warn("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err)) + } else { + plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err) + } + errC <- err + return + } + if lg != nil { + lg.Info("purged", zap.String("path", f)) + } else { + plog.Infof("purged file %s successfully", f) + } + newfnames = newfnames[1:] + } + if purgec != nil { + for i := 0; i < len(fnames)-len(newfnames); i++ { + purgec <- fnames[i] + } + } + select { + case <-time.After(interval): + case <-stop: + return + } + } + }() + return errC +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/read_dir.go b/vendor/go.etcd.io/etcd/pkg/fileutil/read_dir.go new file mode 100644 index 000000000..2eeaa89bc --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/read_dir.go @@ -0,0 +1,70 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "os" + "path/filepath" + "sort" +) + +// ReadDirOp represents an read-directory operation. +type ReadDirOp struct { + ext string +} + +// ReadDirOption configures archiver operations. +type ReadDirOption func(*ReadDirOp) + +// WithExt filters file names by their extensions. +// (e.g. WithExt(".wal") to list only WAL files) +func WithExt(ext string) ReadDirOption { + return func(op *ReadDirOp) { op.ext = ext } +} + +func (op *ReadDirOp) applyOpts(opts []ReadDirOption) { + for _, opt := range opts { + opt(op) + } +} + +// ReadDir returns the filenames in the given directory in sorted order. +func ReadDir(d string, opts ...ReadDirOption) ([]string, error) { + op := &ReadDirOp{} + op.applyOpts(opts) + + dir, err := os.Open(d) + if err != nil { + return nil, err + } + defer dir.Close() + + names, err := dir.Readdirnames(-1) + if err != nil { + return nil, err + } + sort.Strings(names) + + if op.ext != "" { + tss := make([]string, 0) + for _, v := range names { + if filepath.Ext(v) == op.ext { + tss = append(tss, v) + } + } + names = tss + } + return names, nil +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/sync.go b/vendor/go.etcd.io/etcd/pkg/fileutil/sync.go new file mode 100644 index 000000000..54dd41f4f --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/sync.go @@ -0,0 +1,29 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux,!darwin + +package fileutil + +import "os" + +// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform. +func Fsync(f *os.File) error { + return f.Sync() +} + +// Fdatasync is a wrapper around file.Sync(). Special handling is needed on linux platform. +func Fdatasync(f *os.File) error { + return f.Sync() +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/sync_darwin.go b/vendor/go.etcd.io/etcd/pkg/fileutil/sync_darwin.go new file mode 100644 index 000000000..c2f39bf20 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/sync_darwin.go @@ -0,0 +1,40 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin + +package fileutil + +import ( + "os" + "syscall" +) + +// Fsync on HFS/OSX flushes the data on to the physical drive but the drive +// may not write it to the persistent media for quite sometime and it may be +// written in out-of-order sequence. Using F_FULLFSYNC ensures that the +// physical drive's buffer will also get flushed to the media. +func Fsync(f *os.File) error { + _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_FULLFSYNC), uintptr(0)) + if errno == 0 { + return nil + } + return errno +} + +// Fdatasync on darwin platform invokes fcntl(F_FULLFSYNC) for actual persistence +// on physical drive media. +func Fdatasync(f *os.File) error { + return Fsync(f) +} diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/sync_linux.go b/vendor/go.etcd.io/etcd/pkg/fileutil/sync_linux.go new file mode 100644 index 000000000..1bbced915 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/sync_linux.go @@ -0,0 +1,34 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package fileutil + +import ( + "os" + "syscall" +) + +// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform. +func Fsync(f *os.File) error { + return f.Sync() +} + +// Fdatasync is similar to fsync(), but does not flush modified metadata +// unless that metadata is needed in order to allow a subsequent data retrieval +// to be correctly handled. +func Fdatasync(f *os.File) error { + return syscall.Fdatasync(int(f.Fd())) +} diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/discard_logger.go b/vendor/go.etcd.io/etcd/pkg/logutil/discard_logger.go new file mode 100644 index 000000000..81b0a9d03 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/logutil/discard_logger.go @@ -0,0 +1,46 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "log" + + "google.golang.org/grpc/grpclog" +) + +// assert that "discardLogger" satisfy "Logger" interface +var _ Logger = &discardLogger{} + +// NewDiscardLogger returns a new Logger that discards everything except "fatal". +func NewDiscardLogger() Logger { return &discardLogger{} } + +type discardLogger struct{} + +func (l *discardLogger) Info(args ...interface{}) {} +func (l *discardLogger) Infoln(args ...interface{}) {} +func (l *discardLogger) Infof(format string, args ...interface{}) {} +func (l *discardLogger) Warning(args ...interface{}) {} +func (l *discardLogger) Warningln(args ...interface{}) {} +func (l *discardLogger) Warningf(format string, args ...interface{}) {} +func (l *discardLogger) Error(args ...interface{}) {} +func (l *discardLogger) Errorln(args ...interface{}) {} +func (l *discardLogger) Errorf(format string, args ...interface{}) {} +func (l *discardLogger) Fatal(args ...interface{}) { log.Fatal(args...) } +func (l *discardLogger) Fatalln(args ...interface{}) { log.Fatalln(args...) } +func (l *discardLogger) Fatalf(format string, args ...interface{}) { log.Fatalf(format, args...) } +func (l *discardLogger) V(lvl int) bool { + return false +} +func (l *discardLogger) Lvl(lvl int) grpclog.LoggerV2 { return l } diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mips64.go b/vendor/go.etcd.io/etcd/pkg/logutil/doc.go similarity index 75% rename from vendor/github.com/prometheus/procfs/cpuinfo_mips64.go rename to vendor/go.etcd.io/etcd/pkg/logutil/doc.go index 22d93f8ef..e919f2499 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_mips64.go +++ b/vendor/go.etcd.io/etcd/pkg/logutil/doc.go @@ -1,9 +1,10 @@ -// Copyright 2020 The Prometheus Authors +// Copyright 2018 The etcd Authors +// // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -11,8 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux - -package procfs - -var parseCPUInfo = parseCPUInfoMips +// Package logutil includes utilities to facilitate logging. +package logutil diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/log_level.go b/vendor/go.etcd.io/etcd/pkg/logutil/log_level.go new file mode 100644 index 000000000..d57e17394 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/logutil/log_level.go @@ -0,0 +1,70 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "fmt" + + "github.com/coreos/pkg/capnslog" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +var DefaultLogLevel = "info" + +// ConvertToZapLevel converts log level string to zapcore.Level. +func ConvertToZapLevel(lvl string) zapcore.Level { + switch lvl { + case "debug": + return zap.DebugLevel + case "info": + return zap.InfoLevel + case "warn": + return zap.WarnLevel + case "error": + return zap.ErrorLevel + case "dpanic": + return zap.DPanicLevel + case "panic": + return zap.PanicLevel + case "fatal": + return zap.FatalLevel + default: + panic(fmt.Sprintf("unknown level %q", lvl)) + } +} + +// ConvertToCapnslogLogLevel convert log level string to capnslog.LogLevel. +// TODO: deprecate this in 3.5 +func ConvertToCapnslogLogLevel(lvl string) capnslog.LogLevel { + switch lvl { + case "debug": + return capnslog.DEBUG + case "info": + return capnslog.INFO + case "warn": + return capnslog.WARNING + case "error": + return capnslog.ERROR + case "dpanic": + return capnslog.CRITICAL + case "panic": + return capnslog.CRITICAL + case "fatal": + return capnslog.CRITICAL + default: + panic(fmt.Sprintf("unknown level %q", lvl)) + } +} diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/logger.go b/vendor/go.etcd.io/etcd/pkg/logutil/logger.go new file mode 100644 index 000000000..e7da80eff --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/logutil/logger.go @@ -0,0 +1,64 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import "google.golang.org/grpc/grpclog" + +// Logger defines logging interface. +// TODO: deprecate in v3.5. +type Logger interface { + grpclog.LoggerV2 + + // Lvl returns logger if logger's verbosity level >= "lvl". + // Otherwise, logger that discards everything. + Lvl(lvl int) grpclog.LoggerV2 +} + +// assert that "defaultLogger" satisfy "Logger" interface +var _ Logger = &defaultLogger{} + +// NewLogger wraps "grpclog.LoggerV2" that implements "Logger" interface. +// +// For example: +// +// var defaultLogger Logger +// g := grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 4) +// defaultLogger = NewLogger(g) +// +func NewLogger(g grpclog.LoggerV2) Logger { return &defaultLogger{g: g} } + +type defaultLogger struct { + g grpclog.LoggerV2 +} + +func (l *defaultLogger) Info(args ...interface{}) { l.g.Info(args...) } +func (l *defaultLogger) Infoln(args ...interface{}) { l.g.Info(args...) } +func (l *defaultLogger) Infof(format string, args ...interface{}) { l.g.Infof(format, args...) } +func (l *defaultLogger) Warning(args ...interface{}) { l.g.Warning(args...) } +func (l *defaultLogger) Warningln(args ...interface{}) { l.g.Warning(args...) } +func (l *defaultLogger) Warningf(format string, args ...interface{}) { l.g.Warningf(format, args...) } +func (l *defaultLogger) Error(args ...interface{}) { l.g.Error(args...) } +func (l *defaultLogger) Errorln(args ...interface{}) { l.g.Error(args...) } +func (l *defaultLogger) Errorf(format string, args ...interface{}) { l.g.Errorf(format, args...) } +func (l *defaultLogger) Fatal(args ...interface{}) { l.g.Fatal(args...) } +func (l *defaultLogger) Fatalln(args ...interface{}) { l.g.Fatal(args...) } +func (l *defaultLogger) Fatalf(format string, args ...interface{}) { l.g.Fatalf(format, args...) } +func (l *defaultLogger) V(lvl int) bool { return l.g.V(lvl) } +func (l *defaultLogger) Lvl(lvl int) grpclog.LoggerV2 { + if l.g.V(lvl) { + return l + } + return &discardLogger{} +} diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/merge_logger.go b/vendor/go.etcd.io/etcd/pkg/logutil/merge_logger.go new file mode 100644 index 000000000..866b6f7a8 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/logutil/merge_logger.go @@ -0,0 +1,194 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "fmt" + "sync" + "time" + + "github.com/coreos/pkg/capnslog" +) + +var ( + defaultMergePeriod = time.Second + defaultTimeOutputScale = 10 * time.Millisecond + + outputInterval = time.Second +) + +// line represents a log line that can be printed out +// through capnslog.PackageLogger. +type line struct { + level capnslog.LogLevel + str string +} + +func (l line) append(s string) line { + return line{ + level: l.level, + str: l.str + " " + s, + } +} + +// status represents the merge status of a line. +type status struct { + period time.Duration + + start time.Time // start time of latest merge period + count int // number of merged lines from starting +} + +func (s *status) isInMergePeriod(now time.Time) bool { + return s.period == 0 || s.start.Add(s.period).After(now) +} + +func (s *status) isEmpty() bool { return s.count == 0 } + +func (s *status) summary(now time.Time) string { + ts := s.start.Round(defaultTimeOutputScale) + took := now.Round(defaultTimeOutputScale).Sub(ts) + return fmt.Sprintf("[merged %d repeated lines in %s]", s.count, took) +} + +func (s *status) reset(now time.Time) { + s.start = now + s.count = 0 +} + +// MergeLogger supports merge logging, which merges repeated log lines +// and prints summary log lines instead. +// +// For merge logging, MergeLogger prints out the line when the line appears +// at the first time. MergeLogger holds the same log line printed within +// defaultMergePeriod, and prints out summary log line at the end of defaultMergePeriod. +// It stops merging when the line doesn't appear within the +// defaultMergePeriod. +type MergeLogger struct { + *capnslog.PackageLogger + + mu sync.Mutex // protect statusm + statusm map[line]*status +} + +func NewMergeLogger(logger *capnslog.PackageLogger) *MergeLogger { + l := &MergeLogger{ + PackageLogger: logger, + statusm: make(map[line]*status), + } + go l.outputLoop() + return l +} + +func (l *MergeLogger) MergeInfo(entries ...interface{}) { + l.merge(line{ + level: capnslog.INFO, + str: fmt.Sprint(entries...), + }) +} + +func (l *MergeLogger) MergeInfof(format string, args ...interface{}) { + l.merge(line{ + level: capnslog.INFO, + str: fmt.Sprintf(format, args...), + }) +} + +func (l *MergeLogger) MergeNotice(entries ...interface{}) { + l.merge(line{ + level: capnslog.NOTICE, + str: fmt.Sprint(entries...), + }) +} + +func (l *MergeLogger) MergeNoticef(format string, args ...interface{}) { + l.merge(line{ + level: capnslog.NOTICE, + str: fmt.Sprintf(format, args...), + }) +} + +func (l *MergeLogger) MergeWarning(entries ...interface{}) { + l.merge(line{ + level: capnslog.WARNING, + str: fmt.Sprint(entries...), + }) +} + +func (l *MergeLogger) MergeWarningf(format string, args ...interface{}) { + l.merge(line{ + level: capnslog.WARNING, + str: fmt.Sprintf(format, args...), + }) +} + +func (l *MergeLogger) MergeError(entries ...interface{}) { + l.merge(line{ + level: capnslog.ERROR, + str: fmt.Sprint(entries...), + }) +} + +func (l *MergeLogger) MergeErrorf(format string, args ...interface{}) { + l.merge(line{ + level: capnslog.ERROR, + str: fmt.Sprintf(format, args...), + }) +} + +func (l *MergeLogger) merge(ln line) { + l.mu.Lock() + + // increase count if the logger is merging the line + if status, ok := l.statusm[ln]; ok { + status.count++ + l.mu.Unlock() + return + } + + // initialize status of the line + l.statusm[ln] = &status{ + period: defaultMergePeriod, + start: time.Now(), + } + // release the lock before IO operation + l.mu.Unlock() + // print out the line at its first time + l.PackageLogger.Logf(ln.level, ln.str) +} + +func (l *MergeLogger) outputLoop() { + for now := range time.Tick(outputInterval) { + var outputs []line + + l.mu.Lock() + for ln, status := range l.statusm { + if status.isInMergePeriod(now) { + continue + } + if status.isEmpty() { + delete(l.statusm, ln) + continue + } + outputs = append(outputs, ln.append(status.summary(now))) + status.reset(now) + } + l.mu.Unlock() + + for _, o := range outputs { + l.PackageLogger.Logf(o.level, o.str) + } + } +} diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/package_logger.go b/vendor/go.etcd.io/etcd/pkg/logutil/package_logger.go new file mode 100644 index 000000000..729cbdb57 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/logutil/package_logger.go @@ -0,0 +1,60 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "github.com/coreos/pkg/capnslog" + "google.golang.org/grpc/grpclog" +) + +// assert that "packageLogger" satisfy "Logger" interface +var _ Logger = &packageLogger{} + +// NewPackageLogger wraps "*capnslog.PackageLogger" that implements "Logger" interface. +// +// For example: +// +// var defaultLogger Logger +// defaultLogger = NewPackageLogger("go.etcd.io/etcd", "snapshot") +// +func NewPackageLogger(repo, pkg string) Logger { + return &packageLogger{p: capnslog.NewPackageLogger(repo, pkg)} +} + +type packageLogger struct { + p *capnslog.PackageLogger +} + +func (l *packageLogger) Info(args ...interface{}) { l.p.Info(args...) } +func (l *packageLogger) Infoln(args ...interface{}) { l.p.Info(args...) } +func (l *packageLogger) Infof(format string, args ...interface{}) { l.p.Infof(format, args...) } +func (l *packageLogger) Warning(args ...interface{}) { l.p.Warning(args...) } +func (l *packageLogger) Warningln(args ...interface{}) { l.p.Warning(args...) } +func (l *packageLogger) Warningf(format string, args ...interface{}) { l.p.Warningf(format, args...) } +func (l *packageLogger) Error(args ...interface{}) { l.p.Error(args...) } +func (l *packageLogger) Errorln(args ...interface{}) { l.p.Error(args...) } +func (l *packageLogger) Errorf(format string, args ...interface{}) { l.p.Errorf(format, args...) } +func (l *packageLogger) Fatal(args ...interface{}) { l.p.Fatal(args...) } +func (l *packageLogger) Fatalln(args ...interface{}) { l.p.Fatal(args...) } +func (l *packageLogger) Fatalf(format string, args ...interface{}) { l.p.Fatalf(format, args...) } +func (l *packageLogger) V(lvl int) bool { + return l.p.LevelAt(capnslog.LogLevel(lvl)) +} +func (l *packageLogger) Lvl(lvl int) grpclog.LoggerV2 { + if l.p.LevelAt(capnslog.LogLevel(lvl)) { + return l + } + return &discardLogger{} +} diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/zap.go b/vendor/go.etcd.io/etcd/pkg/logutil/zap.go new file mode 100644 index 000000000..8fc6e03b7 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/logutil/zap.go @@ -0,0 +1,91 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "sort" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// DefaultZapLoggerConfig defines default zap logger configuration. +var DefaultZapLoggerConfig = zap.Config{ + Level: zap.NewAtomicLevelAt(ConvertToZapLevel(DefaultLogLevel)), + + Development: false, + Sampling: &zap.SamplingConfig{ + Initial: 100, + Thereafter: 100, + }, + + Encoding: "json", + + // copied from "zap.NewProductionEncoderConfig" with some updates + EncoderConfig: zapcore.EncoderConfig{ + TimeKey: "ts", + LevelKey: "level", + NameKey: "logger", + CallerKey: "caller", + MessageKey: "msg", + StacktraceKey: "stacktrace", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + }, + + // Use "/dev/null" to discard all + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, +} + +// MergeOutputPaths merges logging output paths, resolving conflicts. +func MergeOutputPaths(cfg zap.Config) zap.Config { + outputs := make(map[string]struct{}) + for _, v := range cfg.OutputPaths { + outputs[v] = struct{}{} + } + outputSlice := make([]string, 0) + if _, ok := outputs["/dev/null"]; ok { + // "/dev/null" to discard all + outputSlice = []string{"/dev/null"} + } else { + for k := range outputs { + outputSlice = append(outputSlice, k) + } + } + cfg.OutputPaths = outputSlice + sort.Strings(cfg.OutputPaths) + + errOutputs := make(map[string]struct{}) + for _, v := range cfg.ErrorOutputPaths { + errOutputs[v] = struct{}{} + } + errOutputSlice := make([]string, 0) + if _, ok := errOutputs["/dev/null"]; ok { + // "/dev/null" to discard all + errOutputSlice = []string{"/dev/null"} + } else { + for k := range errOutputs { + errOutputSlice = append(errOutputSlice, k) + } + } + cfg.ErrorOutputPaths = errOutputSlice + sort.Strings(cfg.ErrorOutputPaths) + + return cfg +} diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/zap_grpc.go b/vendor/go.etcd.io/etcd/pkg/logutil/zap_grpc.go new file mode 100644 index 000000000..3f48d813d --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/logutil/zap_grpc.go @@ -0,0 +1,111 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "google.golang.org/grpc/grpclog" +) + +// NewGRPCLoggerV2 converts "*zap.Logger" to "grpclog.LoggerV2". +// It discards all INFO level logging in gRPC, if debug level +// is not enabled in "*zap.Logger". +func NewGRPCLoggerV2(lcfg zap.Config) (grpclog.LoggerV2, error) { + lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil" + if err != nil { + return nil, err + } + return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()}, nil +} + +// NewGRPCLoggerV2FromZapCore creates "grpclog.LoggerV2" from "zap.Core" +// and "zapcore.WriteSyncer". It discards all INFO level logging in gRPC, +// if debug level is not enabled in "*zap.Logger". +func NewGRPCLoggerV2FromZapCore(cr zapcore.Core, syncer zapcore.WriteSyncer) grpclog.LoggerV2 { + // "AddCallerSkip" to annotate caller outside of "logutil" + lg := zap.New(cr, zap.AddCaller(), zap.AddCallerSkip(1), zap.ErrorOutput(syncer)) + return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()} +} + +type zapGRPCLogger struct { + lg *zap.Logger + sugar *zap.SugaredLogger +} + +func (zl *zapGRPCLogger) Info(args ...interface{}) { + if !zl.lg.Core().Enabled(zapcore.DebugLevel) { + return + } + zl.sugar.Info(args...) +} + +func (zl *zapGRPCLogger) Infoln(args ...interface{}) { + if !zl.lg.Core().Enabled(zapcore.DebugLevel) { + return + } + zl.sugar.Info(args...) +} + +func (zl *zapGRPCLogger) Infof(format string, args ...interface{}) { + if !zl.lg.Core().Enabled(zapcore.DebugLevel) { + return + } + zl.sugar.Infof(format, args...) +} + +func (zl *zapGRPCLogger) Warning(args ...interface{}) { + zl.sugar.Warn(args...) +} + +func (zl *zapGRPCLogger) Warningln(args ...interface{}) { + zl.sugar.Warn(args...) +} + +func (zl *zapGRPCLogger) Warningf(format string, args ...interface{}) { + zl.sugar.Warnf(format, args...) +} + +func (zl *zapGRPCLogger) Error(args ...interface{}) { + zl.sugar.Error(args...) +} + +func (zl *zapGRPCLogger) Errorln(args ...interface{}) { + zl.sugar.Error(args...) +} + +func (zl *zapGRPCLogger) Errorf(format string, args ...interface{}) { + zl.sugar.Errorf(format, args...) +} + +func (zl *zapGRPCLogger) Fatal(args ...interface{}) { + zl.sugar.Fatal(args...) +} + +func (zl *zapGRPCLogger) Fatalln(args ...interface{}) { + zl.sugar.Fatal(args...) +} + +func (zl *zapGRPCLogger) Fatalf(format string, args ...interface{}) { + zl.sugar.Fatalf(format, args...) +} + +func (zl *zapGRPCLogger) V(l int) bool { + // infoLog == 0 + if l <= 0 { // debug level, then we ignore info level in gRPC + return !zl.lg.Core().Enabled(zapcore.DebugLevel) + } + return true +} diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/zap_journal.go b/vendor/go.etcd.io/etcd/pkg/logutil/zap_journal.go new file mode 100644 index 000000000..fcd390381 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/logutil/zap_journal.go @@ -0,0 +1,92 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package logutil + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + + "go.etcd.io/etcd/pkg/systemd" + + "github.com/coreos/go-systemd/journal" + "go.uber.org/zap/zapcore" +) + +// NewJournalWriter wraps "io.Writer" to redirect log output +// to the local systemd journal. If journald send fails, it fails +// back to writing to the original writer. +// The decode overhead is only <30µs per write. +// Reference: https://github.com/coreos/pkg/blob/master/capnslog/journald_formatter.go +func NewJournalWriter(wr io.Writer) (io.Writer, error) { + return &journalWriter{Writer: wr}, systemd.DialJournal() +} + +type journalWriter struct { + io.Writer +} + +// WARN: assume that etcd uses default field names in zap encoder config +// make sure to keep this up-to-date! +type logLine struct { + Level string `json:"level"` + Caller string `json:"caller"` +} + +func (w *journalWriter) Write(p []byte) (int, error) { + line := &logLine{} + if err := json.NewDecoder(bytes.NewReader(p)).Decode(line); err != nil { + return 0, err + } + + var pri journal.Priority + switch line.Level { + case zapcore.DebugLevel.String(): + pri = journal.PriDebug + case zapcore.InfoLevel.String(): + pri = journal.PriInfo + + case zapcore.WarnLevel.String(): + pri = journal.PriWarning + case zapcore.ErrorLevel.String(): + pri = journal.PriErr + + case zapcore.DPanicLevel.String(): + pri = journal.PriCrit + case zapcore.PanicLevel.String(): + pri = journal.PriCrit + case zapcore.FatalLevel.String(): + pri = journal.PriCrit + + default: + panic(fmt.Errorf("unknown log level: %q", line.Level)) + } + + err := journal.Send(string(p), pri, map[string]string{ + "PACKAGE": filepath.Dir(line.Caller), + "SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]), + }) + if err != nil { + // "journal" also falls back to stderr + // "fmt.Fprintln(os.Stderr, s)" + return w.Writer.Write(p) + } + return 0, nil +} diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/zap_raft.go b/vendor/go.etcd.io/etcd/pkg/logutil/zap_raft.go new file mode 100644 index 000000000..f016b3054 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/logutil/zap_raft.go @@ -0,0 +1,102 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "errors" + + "go.etcd.io/etcd/raft" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// NewRaftLogger builds "raft.Logger" from "*zap.Config". +func NewRaftLogger(lcfg *zap.Config) (raft.Logger, error) { + if lcfg == nil { + return nil, errors.New("nil zap.Config") + } + lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil" + if err != nil { + return nil, err + } + return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}, nil +} + +// NewRaftLoggerZap converts "*zap.Logger" to "raft.Logger". +func NewRaftLoggerZap(lg *zap.Logger) raft.Logger { + return &zapRaftLogger{lg: lg, sugar: lg.Sugar()} +} + +// NewRaftLoggerFromZapCore creates "raft.Logger" from "zap.Core" +// and "zapcore.WriteSyncer". +func NewRaftLoggerFromZapCore(cr zapcore.Core, syncer zapcore.WriteSyncer) raft.Logger { + // "AddCallerSkip" to annotate caller outside of "logutil" + lg := zap.New(cr, zap.AddCaller(), zap.AddCallerSkip(1), zap.ErrorOutput(syncer)) + return &zapRaftLogger{lg: lg, sugar: lg.Sugar()} +} + +type zapRaftLogger struct { + lg *zap.Logger + sugar *zap.SugaredLogger +} + +func (zl *zapRaftLogger) Debug(args ...interface{}) { + zl.sugar.Debug(args...) +} + +func (zl *zapRaftLogger) Debugf(format string, args ...interface{}) { + zl.sugar.Debugf(format, args...) +} + +func (zl *zapRaftLogger) Error(args ...interface{}) { + zl.sugar.Error(args...) +} + +func (zl *zapRaftLogger) Errorf(format string, args ...interface{}) { + zl.sugar.Errorf(format, args...) +} + +func (zl *zapRaftLogger) Info(args ...interface{}) { + zl.sugar.Info(args...) +} + +func (zl *zapRaftLogger) Infof(format string, args ...interface{}) { + zl.sugar.Infof(format, args...) +} + +func (zl *zapRaftLogger) Warning(args ...interface{}) { + zl.sugar.Warn(args...) +} + +func (zl *zapRaftLogger) Warningf(format string, args ...interface{}) { + zl.sugar.Warnf(format, args...) +} + +func (zl *zapRaftLogger) Fatal(args ...interface{}) { + zl.sugar.Fatal(args...) +} + +func (zl *zapRaftLogger) Fatalf(format string, args ...interface{}) { + zl.sugar.Fatalf(format, args...) +} + +func (zl *zapRaftLogger) Panic(args ...interface{}) { + zl.sugar.Panic(args...) +} + +func (zl *zapRaftLogger) Panicf(format string, args ...interface{}) { + zl.sugar.Panicf(format, args...) +} diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mips.go b/vendor/go.etcd.io/etcd/pkg/systemd/doc.go similarity index 75% rename from vendor/github.com/prometheus/procfs/cpuinfo_mips.go rename to vendor/go.etcd.io/etcd/pkg/systemd/doc.go index 22d93f8ef..30e77ce04 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_mips.go +++ b/vendor/go.etcd.io/etcd/pkg/systemd/doc.go @@ -1,9 +1,10 @@ -// Copyright 2020 The Prometheus Authors +// Copyright 2018 The etcd Authors +// // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -11,8 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux - -package procfs - -var parseCPUInfo = parseCPUInfoMips +// Package systemd provides utility functions for systemd. +package systemd diff --git a/vendor/go.etcd.io/etcd/pkg/systemd/journal.go b/vendor/go.etcd.io/etcd/pkg/systemd/journal.go new file mode 100644 index 000000000..b861c6942 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/systemd/journal.go @@ -0,0 +1,29 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package systemd + +import "net" + +// DialJournal returns no error if the process can dial journal socket. +// Returns an error if dial failed, whichi indicates journald is not available +// (e.g. run embedded etcd as docker daemon). +// Reference: https://github.com/coreos/go-systemd/blob/master/journal/journal.go. +func DialJournal() error { + conn, err := net.Dial("unixgram", "/run/systemd/journal/socket") + if conn != nil { + defer conn.Close() + } + return err +} diff --git a/vendor/go.etcd.io/etcd/pkg/tlsutil/cipher_suites.go b/vendor/go.etcd.io/etcd/pkg/tlsutil/cipher_suites.go new file mode 100644 index 000000000..b5916bb54 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/tlsutil/cipher_suites.go @@ -0,0 +1,51 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tlsutil + +import "crypto/tls" + +// cipher suites implemented by Go +// https://github.com/golang/go/blob/dev.boringcrypto.go1.10/src/crypto/tls/cipher_suites.go +var cipherSuites = map[string]uint16{ + "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, + "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, + "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, +} + +// GetCipherSuite returns the corresponding cipher suite, +// and boolean value if it is supported. +func GetCipherSuite(s string) (uint16, bool) { + v, ok := cipherSuites[s] + return v, ok +} diff --git a/vendor/go.etcd.io/etcd/pkg/tlsutil/doc.go b/vendor/go.etcd.io/etcd/pkg/tlsutil/doc.go new file mode 100644 index 000000000..3b6aa670b --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/tlsutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tlsutil provides utility functions for handling TLS. +package tlsutil diff --git a/vendor/go.etcd.io/etcd/pkg/tlsutil/tlsutil.go b/vendor/go.etcd.io/etcd/pkg/tlsutil/tlsutil.go new file mode 100644 index 000000000..3a5aef089 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/tlsutil/tlsutil.go @@ -0,0 +1,73 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tlsutil + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "io/ioutil" +) + +// NewCertPool creates x509 certPool with provided CA files. +func NewCertPool(CAFiles []string) (*x509.CertPool, error) { + certPool := x509.NewCertPool() + + for _, CAFile := range CAFiles { + pemByte, err := ioutil.ReadFile(CAFile) + if err != nil { + return nil, err + } + + for { + var block *pem.Block + block, pemByte = pem.Decode(pemByte) + if block == nil { + break + } + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + + certPool.AddCert(cert) + } + } + + return certPool, nil +} + +// NewCert generates TLS cert by using the given cert,key and parse function. +func NewCert(certfile, keyfile string, parseFunc func([]byte, []byte) (tls.Certificate, error)) (*tls.Certificate, error) { + cert, err := ioutil.ReadFile(certfile) + if err != nil { + return nil, err + } + + key, err := ioutil.ReadFile(keyfile) + if err != nil { + return nil, err + } + + if parseFunc == nil { + parseFunc = tls.X509KeyPair + } + + tlsCert, err := parseFunc(cert, key) + if err != nil { + return nil, err + } + return &tlsCert, nil +} diff --git a/vendor/go.etcd.io/etcd/pkg/transport/doc.go b/vendor/go.etcd.io/etcd/pkg/transport/doc.go new file mode 100644 index 000000000..37658ce59 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/transport/doc.go @@ -0,0 +1,17 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport implements various HTTP transport utilities based on Go +// net package. +package transport diff --git a/vendor/go.etcd.io/etcd/pkg/transport/keepalive_listener.go b/vendor/go.etcd.io/etcd/pkg/transport/keepalive_listener.go new file mode 100644 index 000000000..4ff8e7f00 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/transport/keepalive_listener.go @@ -0,0 +1,94 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "crypto/tls" + "fmt" + "net" + "time" +) + +type keepAliveConn interface { + SetKeepAlive(bool) error + SetKeepAlivePeriod(d time.Duration) error +} + +// NewKeepAliveListener returns a listener that listens on the given address. +// Be careful when wrap around KeepAliveListener with another Listener if TLSInfo is not nil. +// Some pkgs (like go/http) might expect Listener to return TLSConn type to start TLS handshake. +// http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html +func NewKeepAliveListener(l net.Listener, scheme string, tlscfg *tls.Config) (net.Listener, error) { + if scheme == "https" { + if tlscfg == nil { + return nil, fmt.Errorf("cannot listen on TLS for given listener: KeyFile and CertFile are not presented") + } + return newTLSKeepaliveListener(l, tlscfg), nil + } + + return &keepaliveListener{ + Listener: l, + }, nil +} + +type keepaliveListener struct{ net.Listener } + +func (kln *keepaliveListener) Accept() (net.Conn, error) { + c, err := kln.Listener.Accept() + if err != nil { + return nil, err + } + kac := c.(keepAliveConn) + // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl + // default on linux: 30 + 8 * 30 + // default on osx: 30 + 8 * 75 + kac.SetKeepAlive(true) + kac.SetKeepAlivePeriod(30 * time.Second) + return c, nil +} + +// A tlsKeepaliveListener implements a network listener (net.Listener) for TLS connections. +type tlsKeepaliveListener struct { + net.Listener + config *tls.Config +} + +// Accept waits for and returns the next incoming TLS connection. +// The returned connection c is a *tls.Conn. +func (l *tlsKeepaliveListener) Accept() (c net.Conn, err error) { + c, err = l.Listener.Accept() + if err != nil { + return + } + kac := c.(keepAliveConn) + // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl + // default on linux: 30 + 8 * 30 + // default on osx: 30 + 8 * 75 + kac.SetKeepAlive(true) + kac.SetKeepAlivePeriod(30 * time.Second) + c = tls.Server(c, l.config) + return c, nil +} + +// NewListener creates a Listener which accepts connections from an inner +// Listener and wraps each connection with Server. +// The configuration config must be non-nil and must have +// at least one certificate. +func newTLSKeepaliveListener(inner net.Listener, config *tls.Config) net.Listener { + l := &tlsKeepaliveListener{} + l.Listener = inner + l.config = config + return l +} diff --git a/vendor/go.etcd.io/etcd/pkg/transport/limit_listen.go b/vendor/go.etcd.io/etcd/pkg/transport/limit_listen.go new file mode 100644 index 000000000..930c54206 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/transport/limit_listen.go @@ -0,0 +1,80 @@ +// Copyright 2013 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport provides network utility functions, complementing the more +// common ones in the net package. +package transport + +import ( + "errors" + "net" + "sync" + "time" +) + +var ( + ErrNotTCP = errors.New("only tcp connections have keepalive") +) + +// LimitListener returns a Listener that accepts at most n simultaneous +// connections from the provided Listener. +func LimitListener(l net.Listener, n int) net.Listener { + return &limitListener{l, make(chan struct{}, n)} +} + +type limitListener struct { + net.Listener + sem chan struct{} +} + +func (l *limitListener) acquire() { l.sem <- struct{}{} } +func (l *limitListener) release() { <-l.sem } + +func (l *limitListener) Accept() (net.Conn, error) { + l.acquire() + c, err := l.Listener.Accept() + if err != nil { + l.release() + return nil, err + } + return &limitListenerConn{Conn: c, release: l.release}, nil +} + +type limitListenerConn struct { + net.Conn + releaseOnce sync.Once + release func() +} + +func (l *limitListenerConn) Close() error { + err := l.Conn.Close() + l.releaseOnce.Do(l.release) + return err +} + +func (l *limitListenerConn) SetKeepAlive(doKeepAlive bool) error { + tcpc, ok := l.Conn.(*net.TCPConn) + if !ok { + return ErrNotTCP + } + return tcpc.SetKeepAlive(doKeepAlive) +} + +func (l *limitListenerConn) SetKeepAlivePeriod(d time.Duration) error { + tcpc, ok := l.Conn.(*net.TCPConn) + if !ok { + return ErrNotTCP + } + return tcpc.SetKeepAlivePeriod(d) +} diff --git a/vendor/go.etcd.io/etcd/pkg/transport/listener.go b/vendor/go.etcd.io/etcd/pkg/transport/listener.go new file mode 100644 index 000000000..7260e4d07 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/transport/listener.go @@ -0,0 +1,449 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net" + "os" + "path/filepath" + "strings" + "time" + + "go.etcd.io/etcd/pkg/fileutil" + "go.etcd.io/etcd/pkg/tlsutil" + + "go.uber.org/zap" +) + +// NewListener creates a new listner. +func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err error) { + if l, err = newListener(addr, scheme); err != nil { + return nil, err + } + return wrapTLS(scheme, tlsinfo, l) +} + +func newListener(addr string, scheme string) (net.Listener, error) { + if scheme == "unix" || scheme == "unixs" { + // unix sockets via unix://laddr + return NewUnixListener(addr) + } + return net.Listen("tcp", addr) +} + +func wrapTLS(scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) { + if scheme != "https" && scheme != "unixs" { + return l, nil + } + if tlsinfo != nil && tlsinfo.SkipClientSANVerify { + return NewTLSListener(l, tlsinfo) + } + return newTLSListener(l, tlsinfo, checkSAN) +} + +type TLSInfo struct { + CertFile string + KeyFile string + TrustedCAFile string + ClientCertAuth bool + CRLFile string + InsecureSkipVerify bool + SkipClientSANVerify bool + + // ServerName ensures the cert matches the given host in case of discovery / virtual hosting + ServerName string + + // HandshakeFailure is optionally called when a connection fails to handshake. The + // connection will be closed immediately afterwards. + HandshakeFailure func(*tls.Conn, error) + + // CipherSuites is a list of supported cipher suites. + // If empty, Go auto-populates it by default. + // Note that cipher suites are prioritized in the given order. + CipherSuites []uint16 + + selfCert bool + + // parseFunc exists to simplify testing. Typically, parseFunc + // should be left nil. In that case, tls.X509KeyPair will be used. + parseFunc func([]byte, []byte) (tls.Certificate, error) + + // AllowedCN is a CN which must be provided by a client. + AllowedCN string + + // AllowedHostname is an IP address or hostname that must match the TLS + // certificate provided by a client. + AllowedHostname string + + // Logger logs TLS errors. + // If nil, all logs are discarded. + Logger *zap.Logger + + // EmptyCN indicates that the cert must have empty CN. + // If true, ClientConfig() will return an error for a cert with non empty CN. + EmptyCN bool +} + +func (info TLSInfo) String() string { + return fmt.Sprintf("cert = %s, key = %s, trusted-ca = %s, client-cert-auth = %v, crl-file = %s", info.CertFile, info.KeyFile, info.TrustedCAFile, info.ClientCertAuth, info.CRLFile) +} + +func (info TLSInfo) Empty() bool { + return info.CertFile == "" && info.KeyFile == "" +} + +func SelfCert(lg *zap.Logger, dirpath string, hosts []string, additionalUsages ...x509.ExtKeyUsage) (info TLSInfo, err error) { + info.Logger = lg + err = fileutil.TouchDirAll(dirpath) + if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot create cert directory", + zap.Error(err), + ) + } + return + } + + certPath := filepath.Join(dirpath, "cert.pem") + keyPath := filepath.Join(dirpath, "key.pem") + _, errcert := os.Stat(certPath) + _, errkey := os.Stat(keyPath) + if errcert == nil && errkey == nil { + info.CertFile = certPath + info.KeyFile = keyPath + info.selfCert = true + return + } + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot generate random number", + zap.Error(err), + ) + } + return + } + + tmpl := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{Organization: []string{"etcd"}}, + NotBefore: time.Now(), + NotAfter: time.Now().Add(365 * (24 * time.Hour)), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: append([]x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, additionalUsages...), + BasicConstraintsValid: true, + } + + for _, host := range hosts { + h, _, _ := net.SplitHostPort(host) + if ip := net.ParseIP(h); ip != nil { + tmpl.IPAddresses = append(tmpl.IPAddresses, ip) + } else { + tmpl.DNSNames = append(tmpl.DNSNames, h) + } + } + + priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot generate ECDSA key", + zap.Error(err), + ) + } + return + } + + derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv) + if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot generate x509 certificate", + zap.Error(err), + ) + } + return + } + + certOut, err := os.Create(certPath) + if err != nil { + info.Logger.Warn( + "cannot cert file", + zap.String("path", certPath), + zap.Error(err), + ) + return + } + pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) + certOut.Close() + if info.Logger != nil { + info.Logger.Info("created cert file", zap.String("path", certPath)) + } + + b, err := x509.MarshalECPrivateKey(priv) + if err != nil { + return + } + keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot key file", + zap.String("path", keyPath), + zap.Error(err), + ) + } + return + } + pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}) + keyOut.Close() + if info.Logger != nil { + info.Logger.Info("created key file", zap.String("path", keyPath)) + } + return SelfCert(lg, dirpath, hosts) +} + +// baseConfig is called on initial TLS handshake start. +// +// Previously, +// 1. Server has non-empty (*tls.Config).Certificates on client hello +// 2. Server calls (*tls.Config).GetCertificate iff: +// - Server's (*tls.Config).Certificates is not empty, or +// - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName +// +// When (*tls.Config).Certificates is always populated on initial handshake, +// client is expected to provide a valid matching SNI to pass the TLS +// verification, thus trigger server (*tls.Config).GetCertificate to reload +// TLS assets. However, a cert whose SAN field does not include domain names +// but only IP addresses, has empty (*tls.ClientHelloInfo).ServerName, thus +// it was never able to trigger TLS reload on initial handshake; first +// ceritifcate object was being used, never being updated. +// +// Now, (*tls.Config).Certificates is created empty on initial TLS client +// handshake, in order to trigger (*tls.Config).GetCertificate and populate +// rest of the certificates on every new TLS connection, even when client +// SNI is empty (e.g. cert only includes IPs). +func (info TLSInfo) baseConfig() (*tls.Config, error) { + if info.KeyFile == "" || info.CertFile == "" { + return nil, fmt.Errorf("KeyFile and CertFile must both be present[key: %v, cert: %v]", info.KeyFile, info.CertFile) + } + if info.Logger == nil { + info.Logger = zap.NewNop() + } + + _, err := tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + if err != nil { + return nil, err + } + + cfg := &tls.Config{ + MinVersion: tls.VersionTLS12, + ServerName: info.ServerName, + } + + if len(info.CipherSuites) > 0 { + cfg.CipherSuites = info.CipherSuites + } + + // Client certificates may be verified by either an exact match on the CN, + // or a more general check of the CN and SANs. + var verifyCertificate func(*x509.Certificate) bool + if info.AllowedCN != "" { + if info.AllowedHostname != "" { + return nil, fmt.Errorf("AllowedCN and AllowedHostname are mutually exclusive (cn=%q, hostname=%q)", info.AllowedCN, info.AllowedHostname) + } + verifyCertificate = func(cert *x509.Certificate) bool { + return info.AllowedCN == cert.Subject.CommonName + } + } + if info.AllowedHostname != "" { + verifyCertificate = func(cert *x509.Certificate) bool { + return cert.VerifyHostname(info.AllowedHostname) == nil + } + } + if verifyCertificate != nil { + cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + for _, chains := range verifiedChains { + if len(chains) != 0 { + if verifyCertificate(chains[0]) { + return nil + } + } + } + return errors.New("client certificate authentication failed") + } + } + + // this only reloads certs when there's a client request + // TODO: support server-side refresh (e.g. inotify, SIGHUP), caching + cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (cert *tls.Certificate, err error) { + cert, err = tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + if os.IsNotExist(err) { + if info.Logger != nil { + info.Logger.Warn( + "failed to find peer cert files", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) + } + } else if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "failed to create peer certificate", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) + } + } + return cert, err + } + cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (cert *tls.Certificate, err error) { + cert, err = tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + if os.IsNotExist(err) { + if info.Logger != nil { + info.Logger.Warn( + "failed to find client cert files", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) + } + } else if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "failed to create client certificate", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) + } + } + return cert, err + } + return cfg, nil +} + +// cafiles returns a list of CA file paths. +func (info TLSInfo) cafiles() []string { + cs := make([]string, 0) + if info.TrustedCAFile != "" { + cs = append(cs, info.TrustedCAFile) + } + return cs +} + +// ServerConfig generates a tls.Config object for use by an HTTP server. +func (info TLSInfo) ServerConfig() (*tls.Config, error) { + cfg, err := info.baseConfig() + if err != nil { + return nil, err + } + + cfg.ClientAuth = tls.NoClientCert + if info.TrustedCAFile != "" || info.ClientCertAuth { + cfg.ClientAuth = tls.RequireAndVerifyClientCert + } + + cs := info.cafiles() + if len(cs) > 0 { + cp, err := tlsutil.NewCertPool(cs) + if err != nil { + return nil, err + } + cfg.ClientCAs = cp + } + + // "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP server + cfg.NextProtos = []string{"h2"} + + return cfg, nil +} + +// ClientConfig generates a tls.Config object for use by an HTTP client. +func (info TLSInfo) ClientConfig() (*tls.Config, error) { + var cfg *tls.Config + var err error + + if !info.Empty() { + cfg, err = info.baseConfig() + if err != nil { + return nil, err + } + } else { + cfg = &tls.Config{ServerName: info.ServerName} + } + cfg.InsecureSkipVerify = info.InsecureSkipVerify + + cs := info.cafiles() + if len(cs) > 0 { + cfg.RootCAs, err = tlsutil.NewCertPool(cs) + if err != nil { + return nil, err + } + } + + if info.selfCert { + cfg.InsecureSkipVerify = true + } + + if info.EmptyCN { + hasNonEmptyCN := false + cn := "" + tlsutil.NewCert(info.CertFile, info.KeyFile, func(certPEMBlock []byte, keyPEMBlock []byte) (tls.Certificate, error) { + var block *pem.Block + block, _ = pem.Decode(certPEMBlock) + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return tls.Certificate{}, err + } + if len(cert.Subject.CommonName) != 0 { + hasNonEmptyCN = true + cn = cert.Subject.CommonName + } + return tls.X509KeyPair(certPEMBlock, keyPEMBlock) + }) + if hasNonEmptyCN { + return nil, fmt.Errorf("cert has non empty Common Name (%s)", cn) + } + } + + return cfg, nil +} + +// IsClosedConnError returns true if the error is from closing listener, cmux. +// copied from golang.org/x/net/http2/http2.go +func IsClosedConnError(err error) bool { + // 'use of closed network connection' (Go <=1.8) + // 'use of closed file or network connection' (Go >1.8, internal/poll.ErrClosing) + // 'mux: listener closed' (cmux.ErrListenerClosed) + return err != nil && strings.Contains(err.Error(), "closed") +} diff --git a/vendor/go.etcd.io/etcd/pkg/transport/listener_tls.go b/vendor/go.etcd.io/etcd/pkg/transport/listener_tls.go new file mode 100644 index 000000000..6f1600945 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/transport/listener_tls.go @@ -0,0 +1,272 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "strings" + "sync" +) + +// tlsListener overrides a TLS listener so it will reject client +// certificates with insufficient SAN credentials or CRL revoked +// certificates. +type tlsListener struct { + net.Listener + connc chan net.Conn + donec chan struct{} + err error + handshakeFailure func(*tls.Conn, error) + check tlsCheckFunc +} + +type tlsCheckFunc func(context.Context, *tls.Conn) error + +// NewTLSListener handshakes TLS connections and performs optional CRL checking. +func NewTLSListener(l net.Listener, tlsinfo *TLSInfo) (net.Listener, error) { + check := func(context.Context, *tls.Conn) error { return nil } + return newTLSListener(l, tlsinfo, check) +} + +func newTLSListener(l net.Listener, tlsinfo *TLSInfo, check tlsCheckFunc) (net.Listener, error) { + if tlsinfo == nil || tlsinfo.Empty() { + l.Close() + return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", l.Addr().String()) + } + tlscfg, err := tlsinfo.ServerConfig() + if err != nil { + return nil, err + } + + hf := tlsinfo.HandshakeFailure + if hf == nil { + hf = func(*tls.Conn, error) {} + } + + if len(tlsinfo.CRLFile) > 0 { + prevCheck := check + check = func(ctx context.Context, tlsConn *tls.Conn) error { + if err := prevCheck(ctx, tlsConn); err != nil { + return err + } + st := tlsConn.ConnectionState() + if certs := st.PeerCertificates; len(certs) > 0 { + return checkCRL(tlsinfo.CRLFile, certs) + } + return nil + } + } + + tlsl := &tlsListener{ + Listener: tls.NewListener(l, tlscfg), + connc: make(chan net.Conn), + donec: make(chan struct{}), + handshakeFailure: hf, + check: check, + } + go tlsl.acceptLoop() + return tlsl, nil +} + +func (l *tlsListener) Accept() (net.Conn, error) { + select { + case conn := <-l.connc: + return conn, nil + case <-l.donec: + return nil, l.err + } +} + +func checkSAN(ctx context.Context, tlsConn *tls.Conn) error { + st := tlsConn.ConnectionState() + if certs := st.PeerCertificates; len(certs) > 0 { + addr := tlsConn.RemoteAddr().String() + return checkCertSAN(ctx, certs[0], addr) + } + return nil +} + +// acceptLoop launches each TLS handshake in a separate goroutine +// to prevent a hanging TLS connection from blocking other connections. +func (l *tlsListener) acceptLoop() { + var wg sync.WaitGroup + var pendingMu sync.Mutex + + pending := make(map[net.Conn]struct{}) + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + pendingMu.Lock() + for c := range pending { + c.Close() + } + pendingMu.Unlock() + wg.Wait() + close(l.donec) + }() + + for { + conn, err := l.Listener.Accept() + if err != nil { + l.err = err + return + } + + pendingMu.Lock() + pending[conn] = struct{}{} + pendingMu.Unlock() + + wg.Add(1) + go func() { + defer func() { + if conn != nil { + conn.Close() + } + wg.Done() + }() + + tlsConn := conn.(*tls.Conn) + herr := tlsConn.Handshake() + pendingMu.Lock() + delete(pending, conn) + pendingMu.Unlock() + + if herr != nil { + l.handshakeFailure(tlsConn, herr) + return + } + if err := l.check(ctx, tlsConn); err != nil { + l.handshakeFailure(tlsConn, err) + return + } + + select { + case l.connc <- tlsConn: + conn = nil + case <-ctx.Done(): + } + }() + } +} + +func checkCRL(crlPath string, cert []*x509.Certificate) error { + // TODO: cache + crlBytes, err := ioutil.ReadFile(crlPath) + if err != nil { + return err + } + certList, err := x509.ParseCRL(crlBytes) + if err != nil { + return err + } + revokedSerials := make(map[string]struct{}) + for _, rc := range certList.TBSCertList.RevokedCertificates { + revokedSerials[string(rc.SerialNumber.Bytes())] = struct{}{} + } + for _, c := range cert { + serial := string(c.SerialNumber.Bytes()) + if _, ok := revokedSerials[serial]; ok { + return fmt.Errorf("transport: certificate serial %x revoked", serial) + } + } + return nil +} + +func checkCertSAN(ctx context.Context, cert *x509.Certificate, remoteAddr string) error { + if len(cert.IPAddresses) == 0 && len(cert.DNSNames) == 0 { + return nil + } + h, _, herr := net.SplitHostPort(remoteAddr) + if herr != nil { + return herr + } + if len(cert.IPAddresses) > 0 { + cerr := cert.VerifyHostname(h) + if cerr == nil { + return nil + } + if len(cert.DNSNames) == 0 { + return cerr + } + } + if len(cert.DNSNames) > 0 { + ok, err := isHostInDNS(ctx, h, cert.DNSNames) + if ok { + return nil + } + errStr := "" + if err != nil { + errStr = " (" + err.Error() + ")" + } + return fmt.Errorf("tls: %q does not match any of DNSNames %q"+errStr, h, cert.DNSNames) + } + return nil +} + +func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) { + // reverse lookup + wildcards, names := []string{}, []string{} + for _, dns := range dnsNames { + if strings.HasPrefix(dns, "*.") { + wildcards = append(wildcards, dns[1:]) + } else { + names = append(names, dns) + } + } + lnames, lerr := net.DefaultResolver.LookupAddr(ctx, host) + for _, name := range lnames { + // strip trailing '.' from PTR record + if name[len(name)-1] == '.' { + name = name[:len(name)-1] + } + for _, wc := range wildcards { + if strings.HasSuffix(name, wc) { + return true, nil + } + } + for _, n := range names { + if n == name { + return true, nil + } + } + } + err = lerr + + // forward lookup + for _, dns := range names { + addrs, lerr := net.DefaultResolver.LookupHost(ctx, dns) + if lerr != nil { + err = lerr + continue + } + for _, addr := range addrs { + if addr == host { + return true, nil + } + } + } + return false, err +} + +func (l *tlsListener) Close() error { + err := l.Listener.Close() + <-l.donec + return err +} diff --git a/vendor/go.etcd.io/etcd/pkg/transport/timeout_conn.go b/vendor/go.etcd.io/etcd/pkg/transport/timeout_conn.go new file mode 100644 index 000000000..7e8c02030 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/transport/timeout_conn.go @@ -0,0 +1,44 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "time" +) + +type timeoutConn struct { + net.Conn + wtimeoutd time.Duration + rdtimeoutd time.Duration +} + +func (c timeoutConn) Write(b []byte) (n int, err error) { + if c.wtimeoutd > 0 { + if err := c.SetWriteDeadline(time.Now().Add(c.wtimeoutd)); err != nil { + return 0, err + } + } + return c.Conn.Write(b) +} + +func (c timeoutConn) Read(b []byte) (n int, err error) { + if c.rdtimeoutd > 0 { + if err := c.SetReadDeadline(time.Now().Add(c.rdtimeoutd)); err != nil { + return 0, err + } + } + return c.Conn.Read(b) +} diff --git a/vendor/go.etcd.io/etcd/pkg/transport/timeout_dialer.go b/vendor/go.etcd.io/etcd/pkg/transport/timeout_dialer.go new file mode 100644 index 000000000..6ae39ecfc --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/transport/timeout_dialer.go @@ -0,0 +1,36 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "time" +) + +type rwTimeoutDialer struct { + wtimeoutd time.Duration + rdtimeoutd time.Duration + net.Dialer +} + +func (d *rwTimeoutDialer) Dial(network, address string) (net.Conn, error) { + conn, err := d.Dialer.Dial(network, address) + tconn := &timeoutConn{ + rdtimeoutd: d.rdtimeoutd, + wtimeoutd: d.wtimeoutd, + Conn: conn, + } + return tconn, err +} diff --git a/vendor/go.etcd.io/etcd/pkg/transport/timeout_listener.go b/vendor/go.etcd.io/etcd/pkg/transport/timeout_listener.go new file mode 100644 index 000000000..273e99fe0 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/transport/timeout_listener.go @@ -0,0 +1,57 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "time" +) + +// NewTimeoutListener returns a listener that listens on the given address. +// If read/write on the accepted connection blocks longer than its time limit, +// it will return timeout error. +func NewTimeoutListener(addr string, scheme string, tlsinfo *TLSInfo, rdtimeoutd, wtimeoutd time.Duration) (net.Listener, error) { + ln, err := newListener(addr, scheme) + if err != nil { + return nil, err + } + ln = &rwTimeoutListener{ + Listener: ln, + rdtimeoutd: rdtimeoutd, + wtimeoutd: wtimeoutd, + } + if ln, err = wrapTLS(scheme, tlsinfo, ln); err != nil { + return nil, err + } + return ln, nil +} + +type rwTimeoutListener struct { + net.Listener + wtimeoutd time.Duration + rdtimeoutd time.Duration +} + +func (rwln *rwTimeoutListener) Accept() (net.Conn, error) { + c, err := rwln.Listener.Accept() + if err != nil { + return nil, err + } + return timeoutConn{ + Conn: c, + wtimeoutd: rwln.wtimeoutd, + rdtimeoutd: rwln.rdtimeoutd, + }, nil +} diff --git a/vendor/go.etcd.io/etcd/pkg/transport/timeout_transport.go b/vendor/go.etcd.io/etcd/pkg/transport/timeout_transport.go new file mode 100644 index 000000000..ea16b4c0f --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/transport/timeout_transport.go @@ -0,0 +1,51 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "net/http" + "time" +) + +// NewTimeoutTransport returns a transport created using the given TLS info. +// If read/write on the created connection blocks longer than its time limit, +// it will return timeout error. +// If read/write timeout is set, transport will not be able to reuse connection. +func NewTimeoutTransport(info TLSInfo, dialtimeoutd, rdtimeoutd, wtimeoutd time.Duration) (*http.Transport, error) { + tr, err := NewTransport(info, dialtimeoutd) + if err != nil { + return nil, err + } + + if rdtimeoutd != 0 || wtimeoutd != 0 { + // the timed out connection will timeout soon after it is idle. + // it should not be put back to http transport as an idle connection for future usage. + tr.MaxIdleConnsPerHost = -1 + } else { + // allow more idle connections between peers to avoid unnecessary port allocation. + tr.MaxIdleConnsPerHost = 1024 + } + + tr.Dial = (&rwTimeoutDialer{ + Dialer: net.Dialer{ + Timeout: dialtimeoutd, + KeepAlive: 30 * time.Second, + }, + rdtimeoutd: rdtimeoutd, + wtimeoutd: wtimeoutd, + }).Dial + return tr, nil +} diff --git a/vendor/go.etcd.io/etcd/pkg/transport/tls.go b/vendor/go.etcd.io/etcd/pkg/transport/tls.go new file mode 100644 index 000000000..62fe0d385 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/transport/tls.go @@ -0,0 +1,49 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "fmt" + "strings" + "time" +) + +// ValidateSecureEndpoints scans the given endpoints against tls info, returning only those +// endpoints that could be validated as secure. +func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) { + t, err := NewTransport(tlsInfo, 5*time.Second) + if err != nil { + return nil, err + } + var errs []string + var endpoints []string + for _, ep := range eps { + if !strings.HasPrefix(ep, "https://") { + errs = append(errs, fmt.Sprintf("%q is insecure", ep)) + continue + } + conn, cerr := t.Dial("tcp", ep[len("https://"):]) + if cerr != nil { + errs = append(errs, fmt.Sprintf("%q failed to dial (%v)", ep, cerr)) + continue + } + conn.Close() + endpoints = append(endpoints, ep) + } + if len(errs) != 0 { + err = fmt.Errorf("%s", strings.Join(errs, ",")) + } + return endpoints, err +} diff --git a/vendor/go.etcd.io/etcd/pkg/transport/transport.go b/vendor/go.etcd.io/etcd/pkg/transport/transport.go new file mode 100644 index 000000000..4a7fe69d2 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/transport/transport.go @@ -0,0 +1,71 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "net/http" + "strings" + "time" +) + +type unixTransport struct{ *http.Transport } + +func NewTransport(info TLSInfo, dialtimeoutd time.Duration) (*http.Transport, error) { + cfg, err := info.ClientConfig() + if err != nil { + return nil, err + } + + t := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: dialtimeoutd, + // value taken from http.DefaultTransport + KeepAlive: 30 * time.Second, + }).Dial, + // value taken from http.DefaultTransport + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: cfg, + } + + dialer := (&net.Dialer{ + Timeout: dialtimeoutd, + KeepAlive: 30 * time.Second, + }) + dial := func(net, addr string) (net.Conn, error) { + return dialer.Dial("unix", addr) + } + + tu := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: cfg, + } + ut := &unixTransport{tu} + + t.RegisterProtocol("unix", ut) + t.RegisterProtocol("unixs", ut) + + return t, nil +} + +func (urt *unixTransport) RoundTrip(req *http.Request) (*http.Response, error) { + url := *req.URL + req.URL = &url + req.URL.Scheme = strings.Replace(req.URL.Scheme, "unix", "http", 1) + return urt.Transport.RoundTrip(req) +} diff --git a/vendor/go.etcd.io/etcd/pkg/transport/unix_listener.go b/vendor/go.etcd.io/etcd/pkg/transport/unix_listener.go new file mode 100644 index 000000000..123e2036f --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/transport/unix_listener.go @@ -0,0 +1,40 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "os" +) + +type unixListener struct{ net.Listener } + +func NewUnixListener(addr string) (net.Listener, error) { + if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { + return nil, err + } + l, err := net.Listen("unix", addr) + if err != nil { + return nil, err + } + return &unixListener{l}, nil +} + +func (ul *unixListener) Close() error { + if err := os.Remove(ul.Addr().String()); err != nil && !os.IsNotExist(err) { + return err + } + return ul.Listener.Close() +} diff --git a/vendor/go.etcd.io/etcd/pkg/types/doc.go b/vendor/go.etcd.io/etcd/pkg/types/doc.go new file mode 100644 index 000000000..de8ef0bd7 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/types/doc.go @@ -0,0 +1,17 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package types declares various data types and implements type-checking +// functions. +package types diff --git a/vendor/go.etcd.io/etcd/pkg/types/id.go b/vendor/go.etcd.io/etcd/pkg/types/id.go new file mode 100644 index 000000000..ae00388dd --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/types/id.go @@ -0,0 +1,39 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import "strconv" + +// ID represents a generic identifier which is canonically +// stored as a uint64 but is typically represented as a +// base-16 string for input/output +type ID uint64 + +func (i ID) String() string { + return strconv.FormatUint(uint64(i), 16) +} + +// IDFromString attempts to create an ID from a base-16 string. +func IDFromString(s string) (ID, error) { + i, err := strconv.ParseUint(s, 16, 64) + return ID(i), err +} + +// IDSlice implements the sort interface +type IDSlice []ID + +func (p IDSlice) Len() int { return len(p) } +func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) } +func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/go.etcd.io/etcd/pkg/types/set.go b/vendor/go.etcd.io/etcd/pkg/types/set.go new file mode 100644 index 000000000..e7a3cdc9a --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/types/set.go @@ -0,0 +1,195 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "reflect" + "sort" + "sync" +) + +type Set interface { + Add(string) + Remove(string) + Contains(string) bool + Equals(Set) bool + Length() int + Values() []string + Copy() Set + Sub(Set) Set +} + +func NewUnsafeSet(values ...string) *unsafeSet { + set := &unsafeSet{make(map[string]struct{})} + for _, v := range values { + set.Add(v) + } + return set +} + +func NewThreadsafeSet(values ...string) *tsafeSet { + us := NewUnsafeSet(values...) + return &tsafeSet{us, sync.RWMutex{}} +} + +type unsafeSet struct { + d map[string]struct{} +} + +// Add adds a new value to the set (no-op if the value is already present) +func (us *unsafeSet) Add(value string) { + us.d[value] = struct{}{} +} + +// Remove removes the given value from the set +func (us *unsafeSet) Remove(value string) { + delete(us.d, value) +} + +// Contains returns whether the set contains the given value +func (us *unsafeSet) Contains(value string) (exists bool) { + _, exists = us.d[value] + return exists +} + +// ContainsAll returns whether the set contains all given values +func (us *unsafeSet) ContainsAll(values []string) bool { + for _, s := range values { + if !us.Contains(s) { + return false + } + } + return true +} + +// Equals returns whether the contents of two sets are identical +func (us *unsafeSet) Equals(other Set) bool { + v1 := sort.StringSlice(us.Values()) + v2 := sort.StringSlice(other.Values()) + v1.Sort() + v2.Sort() + return reflect.DeepEqual(v1, v2) +} + +// Length returns the number of elements in the set +func (us *unsafeSet) Length() int { + return len(us.d) +} + +// Values returns the values of the Set in an unspecified order. +func (us *unsafeSet) Values() (values []string) { + values = make([]string, 0) + for val := range us.d { + values = append(values, val) + } + return values +} + +// Copy creates a new Set containing the values of the first +func (us *unsafeSet) Copy() Set { + cp := NewUnsafeSet() + for val := range us.d { + cp.Add(val) + } + + return cp +} + +// Sub removes all elements in other from the set +func (us *unsafeSet) Sub(other Set) Set { + oValues := other.Values() + result := us.Copy().(*unsafeSet) + + for _, val := range oValues { + if _, ok := result.d[val]; !ok { + continue + } + delete(result.d, val) + } + + return result +} + +type tsafeSet struct { + us *unsafeSet + m sync.RWMutex +} + +func (ts *tsafeSet) Add(value string) { + ts.m.Lock() + defer ts.m.Unlock() + ts.us.Add(value) +} + +func (ts *tsafeSet) Remove(value string) { + ts.m.Lock() + defer ts.m.Unlock() + ts.us.Remove(value) +} + +func (ts *tsafeSet) Contains(value string) (exists bool) { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Contains(value) +} + +func (ts *tsafeSet) Equals(other Set) bool { + ts.m.RLock() + defer ts.m.RUnlock() + + // If ts and other represent the same variable, avoid calling + // ts.us.Equals(other), to avoid double RLock bug + if _other, ok := other.(*tsafeSet); ok { + if _other == ts { + return true + } + } + return ts.us.Equals(other) +} + +func (ts *tsafeSet) Length() int { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Length() +} + +func (ts *tsafeSet) Values() (values []string) { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Values() +} + +func (ts *tsafeSet) Copy() Set { + ts.m.RLock() + defer ts.m.RUnlock() + usResult := ts.us.Copy().(*unsafeSet) + return &tsafeSet{usResult, sync.RWMutex{}} +} + +func (ts *tsafeSet) Sub(other Set) Set { + ts.m.RLock() + defer ts.m.RUnlock() + + // If ts and other represent the same variable, avoid calling + // ts.us.Sub(other), to avoid double RLock bug + if _other, ok := other.(*tsafeSet); ok { + if _other == ts { + usResult := NewUnsafeSet() + return &tsafeSet{usResult, sync.RWMutex{}} + } + } + usResult := ts.us.Sub(other).(*unsafeSet) + return &tsafeSet{usResult, sync.RWMutex{}} +} diff --git a/vendor/go.etcd.io/etcd/pkg/types/slice.go b/vendor/go.etcd.io/etcd/pkg/types/slice.go new file mode 100644 index 000000000..0dd9ca798 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/types/slice.go @@ -0,0 +1,22 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +// Uint64Slice implements sort interface +type Uint64Slice []uint64 + +func (p Uint64Slice) Len() int { return len(p) } +func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/go.etcd.io/etcd/pkg/types/urls.go b/vendor/go.etcd.io/etcd/pkg/types/urls.go new file mode 100644 index 000000000..9e5d03ff6 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/types/urls.go @@ -0,0 +1,82 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "errors" + "fmt" + "net" + "net/url" + "sort" + "strings" +) + +type URLs []url.URL + +func NewURLs(strs []string) (URLs, error) { + all := make([]url.URL, len(strs)) + if len(all) == 0 { + return nil, errors.New("no valid URLs given") + } + for i, in := range strs { + in = strings.TrimSpace(in) + u, err := url.Parse(in) + if err != nil { + return nil, err + } + if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" { + return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in) + } + if _, _, err := net.SplitHostPort(u.Host); err != nil { + return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in) + } + if u.Path != "" { + return nil, fmt.Errorf("URL must not contain a path: %s", in) + } + all[i] = *u + } + us := URLs(all) + us.Sort() + + return us, nil +} + +func MustNewURLs(strs []string) URLs { + urls, err := NewURLs(strs) + if err != nil { + panic(err) + } + return urls +} + +func (us URLs) String() string { + return strings.Join(us.StringSlice(), ",") +} + +func (us *URLs) Sort() { + sort.Sort(us) +} +func (us URLs) Len() int { return len(us) } +func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() } +func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] } + +func (us URLs) StringSlice() []string { + out := make([]string, len(us)) + for i := range us { + out[i] = us[i].String() + } + + return out +} diff --git a/vendor/go.etcd.io/etcd/pkg/types/urlsmap.go b/vendor/go.etcd.io/etcd/pkg/types/urlsmap.go new file mode 100644 index 000000000..47690cc38 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/types/urlsmap.go @@ -0,0 +1,107 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "sort" + "strings" +) + +// URLsMap is a map from a name to its URLs. +type URLsMap map[string]URLs + +// NewURLsMap returns a URLsMap instantiated from the given string, +// which consists of discovery-formatted names-to-URLs, like: +// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380 +func NewURLsMap(s string) (URLsMap, error) { + m := parse(s) + + cl := URLsMap{} + for name, urls := range m { + us, err := NewURLs(urls) + if err != nil { + return nil, err + } + cl[name] = us + } + return cl, nil +} + +// NewURLsMapFromStringMap takes a map of strings and returns a URLsMap. The +// string values in the map can be multiple values separated by the sep string. +func NewURLsMapFromStringMap(m map[string]string, sep string) (URLsMap, error) { + var err error + um := URLsMap{} + for k, v := range m { + um[k], err = NewURLs(strings.Split(v, sep)) + if err != nil { + return nil, err + } + } + return um, nil +} + +// String turns URLsMap into discovery-formatted name-to-URLs sorted by name. +func (c URLsMap) String() string { + var pairs []string + for name, urls := range c { + for _, url := range urls { + pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String())) + } + } + sort.Strings(pairs) + return strings.Join(pairs, ",") +} + +// URLs returns a list of all URLs. +// The returned list is sorted in ascending lexicographical order. +func (c URLsMap) URLs() []string { + var urls []string + for _, us := range c { + for _, u := range us { + urls = append(urls, u.String()) + } + } + sort.Strings(urls) + return urls +} + +// Len returns the size of URLsMap. +func (c URLsMap) Len() int { + return len(c) +} + +// parse parses the given string and returns a map listing the values specified for each key. +func parse(s string) map[string][]string { + m := make(map[string][]string) + for s != "" { + key := s + if i := strings.IndexAny(key, ","); i >= 0 { + key, s = key[:i], key[i+1:] + } else { + s = "" + } + if key == "" { + continue + } + value := "" + if i := strings.Index(key, "="); i >= 0 { + key, value = key[:i], key[i+1:] + } + m[key] = append(m[key], value) + } + return m +} diff --git a/vendor/go.etcd.io/etcd/raft/OWNERS b/vendor/go.etcd.io/etcd/raft/OWNERS new file mode 100644 index 000000000..ab781066e --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/OWNERS @@ -0,0 +1,19 @@ +approvers: +- heyitsanthony +- philips +- fanminshi +- gyuho +- mitake +- jpbetz +- xiang90 +- bdarnell +reviewers: +- heyitsanthony +- philips +- fanminshi +- gyuho +- mitake +- jpbetz +- xiang90 +- bdarnell +- tschottdorf diff --git a/vendor/go.etcd.io/etcd/raft/README.md b/vendor/go.etcd.io/etcd/raft/README.md new file mode 100644 index 000000000..83cf04035 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/README.md @@ -0,0 +1,197 @@ +# Raft library + +Raft is a protocol with which a cluster of nodes can maintain a replicated state machine. +The state machine is kept in sync through the use of a replicated log. +For more details on Raft, see "In Search of an Understandable Consensus Algorithm" +(https://raft.github.io/raft.pdf) by Diego Ongaro and John Ousterhout. + +This Raft library is stable and feature complete. As of 2016, it is **the most widely used** Raft library in production, serving tens of thousands clusters each day. It powers distributed systems such as etcd, Kubernetes, Docker Swarm, Cloud Foundry Diego, CockroachDB, TiDB, Project Calico, Flannel, and more. + +Most Raft implementations have a monolithic design, including storage handling, messaging serialization, and network transport. This library instead follows a minimalistic design philosophy by only implementing the core raft algorithm. This minimalism buys flexibility, determinism, and performance. + +To keep the codebase small as well as provide flexibility, the library only implements the Raft algorithm; both network and disk IO are left to the user. Library users must implement their own transportation layer for message passing between Raft peers over the wire. Similarly, users must implement their own storage layer to persist the Raft log and state. + +In order to easily test the Raft library, its behavior should be deterministic. To achieve this determinism, the library models Raft as a state machine. The state machine takes a `Message` as input. A message can either be a local timer update or a network message sent from a remote peer. The state machine's output is a 3-tuple `{[]Messages, []LogEntries, NextState}` consisting of an array of `Messages`, `log entries`, and `Raft state changes`. For state machines with the same state, the same state machine input should always generate the same state machine output. + +A simple example application, _raftexample_, is also available to help illustrate how to use this package in practice: https://github.com/etcd-io/etcd/tree/master/contrib/raftexample + +# Features + +This raft implementation is a full feature implementation of Raft protocol. Features includes: + +- Leader election +- Log replication +- Log compaction +- Membership changes +- Leadership transfer extension +- Efficient linearizable read-only queries served by both the leader and followers + - leader checks with quorum and bypasses Raft log before processing read-only queries + - followers asks leader to get a safe read index before processing read-only queries +- More efficient lease-based linearizable read-only queries served by both the leader and followers + - leader bypasses Raft log and processing read-only queries locally + - followers asks leader to get a safe read index before processing read-only queries + - this approach relies on the clock of the all the machines in raft group + +This raft implementation also includes a few optional enhancements: + +- Optimistic pipelining to reduce log replication latency +- Flow control for log replication +- Batching Raft messages to reduce synchronized network I/O calls +- Batching log entries to reduce disk synchronized I/O +- Writing to leader's disk in parallel +- Internal proposal redirection from followers to leader +- Automatic stepping down when the leader loses quorum +- Protection against unbounded log growth when quorum is lost + +## Notable Users + +- [cockroachdb](https://github.com/cockroachdb/cockroach) A Scalable, Survivable, Strongly-Consistent SQL Database +- [dgraph](https://github.com/dgraph-io/dgraph) A Scalable, Distributed, Low Latency, High Throughput Graph Database +- [etcd](https://github.com/etcd-io/etcd) A distributed reliable key-value store +- [tikv](https://github.com/pingcap/tikv) A Distributed transactional key value database powered by Rust and Raft +- [swarmkit](https://github.com/docker/swarmkit) A toolkit for orchestrating distributed systems at any scale. +- [chain core](https://github.com/chain/chain) Software for operating permissioned, multi-asset blockchain networks + +## Usage + +The primary object in raft is a Node. Either start a Node from scratch using raft.StartNode or start a Node from some initial state using raft.RestartNode. + +To start a three-node cluster +```go + storage := raft.NewMemoryStorage() + c := &Config{ + ID: 0x01, + ElectionTick: 10, + HeartbeatTick: 1, + Storage: storage, + MaxSizePerMsg: 4096, + MaxInflightMsgs: 256, + } + // Set peer list to the other nodes in the cluster. + // Note that they need to be started separately as well. + n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}}) +``` + +Start a single node cluster, like so: +```go + // Create storage and config as shown above. + // Set peer list to itself, so this node can become the leader of this single-node cluster. + peers := []raft.Peer{{ID: 0x01}} + n := raft.StartNode(c, peers) +``` + +To allow a new node to join this cluster, do not pass in any peers. First, add the node to the existing cluster by calling `ProposeConfChange` on any existing node inside the cluster. Then, start the node with an empty peer list, like so: +```go + // Create storage and config as shown above. + n := raft.StartNode(c, nil) +``` + +To restart a node from previous state: +```go + storage := raft.NewMemoryStorage() + + // Recover the in-memory storage from persistent snapshot, state and entries. + storage.ApplySnapshot(snapshot) + storage.SetHardState(state) + storage.Append(entries) + + c := &Config{ + ID: 0x01, + ElectionTick: 10, + HeartbeatTick: 1, + Storage: storage, + MaxSizePerMsg: 4096, + MaxInflightMsgs: 256, + } + + // Restart raft without peer information. + // Peer information is already included in the storage. + n := raft.RestartNode(c) +``` + +After creating a Node, the user has a few responsibilities: + +First, read from the Node.Ready() channel and process the updates it contains. These steps may be performed in parallel, except as noted in step 2. + +1. Write Entries, HardState and Snapshot to persistent storage in order, i.e. Entries first, then HardState and Snapshot if they are not empty. If persistent storage supports atomic writes then all of them can be written together. Note that when writing an Entry with Index i, any previously-persisted entries with Index >= i must be discarded. + +2. Send all Messages to the nodes named in the To field. It is important that no messages be sent until the latest HardState has been persisted to disk, and all Entries written by any previous Ready batch (Messages may be sent while entries from the same batch are being persisted). To reduce the I/O latency, an optimization can be applied to make leader write to disk in parallel with its followers (as explained at section 10.2.1 in Raft thesis). If any Message has type MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be large). Note: Marshalling messages is not thread-safe; it is important to make sure that no new entries are persisted while marshalling. The easiest way to achieve this is to serialise the messages directly inside the main raft loop. + +3. Apply Snapshot (if any) and CommittedEntries to the state machine. If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() to apply it to the node. The configuration change may be cancelled at this point by setting the NodeID field to zero before calling ApplyConfChange (but ApplyConfChange must be called one way or the other, and the decision to cancel must be based solely on the state machine and not external information such as the observed health of the node). + +4. Call Node.Advance() to signal readiness for the next batch of updates. This may be done at any time after step 1, although all updates must be processed in the order they were returned by Ready. + +Second, all persisted log entries must be made available via an implementation of the Storage interface. The provided MemoryStorage type can be used for this (if repopulating its state upon a restart), or a custom disk-backed implementation can be supplied. + +Third, after receiving a message from another node, pass it to Node.Step: + +```go + func recvRaftRPC(ctx context.Context, m raftpb.Message) { + n.Step(ctx, m) + } +``` + +Finally, call `Node.Tick()` at regular intervals (probably via a `time.Ticker`). Raft has two important timeouts: heartbeat and the election timeout. However, internally to the raft package time is represented by an abstract "tick". + +The total state machine handling loop will look something like this: + +```go + for { + select { + case <-s.Ticker: + n.Tick() + case rd := <-s.Node.Ready(): + saveToStorage(rd.HardState, rd.Entries, rd.Snapshot) + send(rd.Messages) + if !raft.IsEmptySnap(rd.Snapshot) { + processSnapshot(rd.Snapshot) + } + for _, entry := range rd.CommittedEntries { + process(entry) + if entry.Type == raftpb.EntryConfChange { + var cc raftpb.ConfChange + cc.Unmarshal(entry.Data) + s.Node.ApplyConfChange(cc) + } + } + s.Node.Advance() + case <-s.done: + return + } + } +``` + +To propose changes to the state machine from the node to take application data, serialize it into a byte slice and call: + +```go + n.Propose(ctx, data) +``` + +If the proposal is committed, data will appear in committed entries with type raftpb.EntryNormal. There is no guarantee that a proposed command will be committed; the command may have to be reproposed after a timeout. + +To add or remove node in a cluster, build ConfChange struct 'cc' and call: + +```go + n.ProposeConfChange(ctx, cc) +``` + +After config change is committed, some committed entry with type raftpb.EntryConfChange will be returned. This must be applied to node through: + +```go + var cc raftpb.ConfChange + cc.Unmarshal(data) + n.ApplyConfChange(cc) +``` + +Note: An ID represents a unique node in a cluster for all time. A +given ID MUST be used only once even if the old node has been removed. +This means that for example IP addresses make poor node IDs since they +may be reused. Node IDs must be non-zero. + +## Implementation notes + +This implementation is up to date with the final Raft thesis (https://github.com/ongardie/dissertation/blob/master/stanford.pdf), although this implementation of the membership change protocol differs somewhat from that described in chapter 4. The key invariant that membership changes happen one node at a time is preserved, but in our implementation the membership change takes effect when its entry is applied, not when it is added to the log (so the entry is committed under the old membership instead of the new). This is equivalent in terms of safety, since the old and new configurations are guaranteed to overlap. + +To ensure there is no attempt to commit two membership changes at once by matching log positions (which would be unsafe since they should have different quorum requirements), any proposed membership change is simply disallowed while any uncommitted change appears in the leader's log. + +This approach introduces a problem when removing a member from a two-member cluster: If one of the members dies before the other one receives the commit of the confchange entry, then the member cannot be removed any more since the cluster cannot make progress. For this reason it is highly recommended to use three or more nodes in every cluster. diff --git a/vendor/go.etcd.io/etcd/raft/bootstrap.go b/vendor/go.etcd.io/etcd/raft/bootstrap.go new file mode 100644 index 000000000..bd82b2041 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/bootstrap.go @@ -0,0 +1,80 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "errors" + + pb "go.etcd.io/etcd/raft/raftpb" +) + +// Bootstrap initializes the RawNode for first use by appending configuration +// changes for the supplied peers. This method returns an error if the Storage +// is nonempty. +// +// It is recommended that instead of calling this method, applications bootstrap +// their state manually by setting up a Storage that has a first index > 1 and +// which stores the desired ConfState as its InitialState. +func (rn *RawNode) Bootstrap(peers []Peer) error { + if len(peers) == 0 { + return errors.New("must provide at least one peer to Bootstrap") + } + lastIndex, err := rn.raft.raftLog.storage.LastIndex() + if err != nil { + return err + } + + if lastIndex != 0 { + return errors.New("can't bootstrap a nonempty Storage") + } + + // We've faked out initial entries above, but nothing has been + // persisted. Start with an empty HardState (thus the first Ready will + // emit a HardState update for the app to persist). + rn.prevHardSt = emptyState + + // TODO(tbg): remove StartNode and give the application the right tools to + // bootstrap the initial membership in a cleaner way. + rn.raft.becomeFollower(1, None) + ents := make([]pb.Entry, len(peers)) + for i, peer := range peers { + cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context} + data, err := cc.Marshal() + if err != nil { + return err + } + + ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data} + } + rn.raft.raftLog.append(ents...) + + // Now apply them, mainly so that the application can call Campaign + // immediately after StartNode in tests. Note that these nodes will + // be added to raft twice: here and when the application's Ready + // loop calls ApplyConfChange. The calls to addNode must come after + // all calls to raftLog.append so progress.next is set after these + // bootstrapping entries (it is an error if we try to append these + // entries since they have already been committed). + // We do not set raftLog.applied so the application will be able + // to observe all conf changes via Ready.CommittedEntries. + // + // TODO(bdarnell): These entries are still unstable; do we need to preserve + // the invariant that committed < unstable? + rn.raft.raftLog.committed = uint64(len(ents)) + for _, peer := range peers { + rn.raft.applyConfChange(pb.ConfChange{NodeID: peer.ID, Type: pb.ConfChangeAddNode}.AsV2()) + } + return nil +} diff --git a/vendor/go.etcd.io/etcd/raft/confchange/confchange.go b/vendor/go.etcd.io/etcd/raft/confchange/confchange.go new file mode 100644 index 000000000..a0dc486df --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/confchange/confchange.go @@ -0,0 +1,425 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package confchange + +import ( + "errors" + "fmt" + "strings" + + "go.etcd.io/etcd/raft/quorum" + pb "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/raft/tracker" +) + +// Changer facilitates configuration changes. It exposes methods to handle +// simple and joint consensus while performing the proper validation that allows +// refusing invalid configuration changes before they affect the active +// configuration. +type Changer struct { + Tracker tracker.ProgressTracker + LastIndex uint64 +} + +// EnterJoint verifies that the outgoing (=right) majority config of the joint +// config is empty and initializes it with a copy of the incoming (=left) +// majority config. That is, it transitions from +// +// (1 2 3)&&() +// to +// (1 2 3)&&(1 2 3). +// +// The supplied changes are then applied to the incoming majority config, +// resulting in a joint configuration that in terms of the Raft thesis[1] +// (Section 4.3) corresponds to `C_{new,old}`. +// +// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf +func (c Changer) EnterJoint(autoLeave bool, ccs ...pb.ConfChangeSingle) (tracker.Config, tracker.ProgressMap, error) { + cfg, prs, err := c.checkAndCopy() + if err != nil { + return c.err(err) + } + if joint(cfg) { + err := errors.New("config is already joint") + return c.err(err) + } + if len(incoming(cfg.Voters)) == 0 { + // We allow adding nodes to an empty config for convenience (testing and + // bootstrap), but you can't enter a joint state. + err := errors.New("can't make a zero-voter config joint") + return c.err(err) + } + // Clear the outgoing config. + *outgoingPtr(&cfg.Voters) = quorum.MajorityConfig{} + // Copy incoming to outgoing. + for id := range incoming(cfg.Voters) { + outgoing(cfg.Voters)[id] = struct{}{} + } + + if err := c.apply(&cfg, prs, ccs...); err != nil { + return c.err(err) + } + cfg.AutoLeave = autoLeave + return checkAndReturn(cfg, prs) +} + +// LeaveJoint transitions out of a joint configuration. It is an error to call +// this method if the configuration is not joint, i.e. if the outgoing majority +// config Voters[1] is empty. +// +// The outgoing majority config of the joint configuration will be removed, +// that is, the incoming config is promoted as the sole decision maker. In the +// notation of the Raft thesis[1] (Section 4.3), this method transitions from +// `C_{new,old}` into `C_new`. +// +// At the same time, any staged learners (LearnersNext) the addition of which +// was held back by an overlapping voter in the former outgoing config will be +// inserted into Learners. +// +// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf +func (c Changer) LeaveJoint() (tracker.Config, tracker.ProgressMap, error) { + cfg, prs, err := c.checkAndCopy() + if err != nil { + return c.err(err) + } + if !joint(cfg) { + err := errors.New("can't leave a non-joint config") + return c.err(err) + } + if len(outgoing(cfg.Voters)) == 0 { + err := fmt.Errorf("configuration is not joint: %v", cfg) + return c.err(err) + } + for id := range cfg.LearnersNext { + nilAwareAdd(&cfg.Learners, id) + prs[id].IsLearner = true + } + cfg.LearnersNext = nil + + for id := range outgoing(cfg.Voters) { + _, isVoter := incoming(cfg.Voters)[id] + _, isLearner := cfg.Learners[id] + + if !isVoter && !isLearner { + delete(prs, id) + } + } + *outgoingPtr(&cfg.Voters) = nil + cfg.AutoLeave = false + + return checkAndReturn(cfg, prs) +} + +// Simple carries out a series of configuration changes that (in aggregate) +// mutates the incoming majority config Voters[0] by at most one. This method +// will return an error if that is not the case, if the resulting quorum is +// zero, or if the configuration is in a joint state (i.e. if there is an +// outgoing configuration). +func (c Changer) Simple(ccs ...pb.ConfChangeSingle) (tracker.Config, tracker.ProgressMap, error) { + cfg, prs, err := c.checkAndCopy() + if err != nil { + return c.err(err) + } + if joint(cfg) { + err := errors.New("can't apply simple config change in joint config") + return c.err(err) + } + if err := c.apply(&cfg, prs, ccs...); err != nil { + return c.err(err) + } + if n := symdiff(incoming(c.Tracker.Voters), incoming(cfg.Voters)); n > 1 { + return tracker.Config{}, nil, errors.New("more than one voter changed without entering joint config") + } + if err := checkInvariants(cfg, prs); err != nil { + return tracker.Config{}, tracker.ProgressMap{}, nil + } + + return checkAndReturn(cfg, prs) +} + +// apply a change to the configuration. By convention, changes to voters are +// always made to the incoming majority config Voters[0]. Voters[1] is either +// empty or preserves the outgoing majority configuration while in a joint state. +func (c Changer) apply(cfg *tracker.Config, prs tracker.ProgressMap, ccs ...pb.ConfChangeSingle) error { + for _, cc := range ccs { + if cc.NodeID == 0 { + // etcd replaces the NodeID with zero if it decides (downstream of + // raft) to not apply a change, so we have to have explicit code + // here to ignore these. + continue + } + switch cc.Type { + case pb.ConfChangeAddNode: + c.makeVoter(cfg, prs, cc.NodeID) + case pb.ConfChangeAddLearnerNode: + c.makeLearner(cfg, prs, cc.NodeID) + case pb.ConfChangeRemoveNode: + c.remove(cfg, prs, cc.NodeID) + case pb.ConfChangeUpdateNode: + default: + return fmt.Errorf("unexpected conf type %d", cc.Type) + } + } + if len(incoming(cfg.Voters)) == 0 { + return errors.New("removed all voters") + } + return nil +} + +// makeVoter adds or promotes the given ID to be a voter in the incoming +// majority config. +func (c Changer) makeVoter(cfg *tracker.Config, prs tracker.ProgressMap, id uint64) { + pr := prs[id] + if pr == nil { + c.initProgress(cfg, prs, id, false /* isLearner */) + return + } + + pr.IsLearner = false + nilAwareDelete(&cfg.Learners, id) + nilAwareDelete(&cfg.LearnersNext, id) + incoming(cfg.Voters)[id] = struct{}{} + return +} + +// makeLearner makes the given ID a learner or stages it to be a learner once +// an active joint configuration is exited. +// +// The former happens when the peer is not a part of the outgoing config, in +// which case we either add a new learner or demote a voter in the incoming +// config. +// +// The latter case occurs when the configuration is joint and the peer is a +// voter in the outgoing config. In that case, we do not want to add the peer +// as a learner because then we'd have to track a peer as a voter and learner +// simultaneously. Instead, we add the learner to LearnersNext, so that it will +// be added to Learners the moment the outgoing config is removed by +// LeaveJoint(). +func (c Changer) makeLearner(cfg *tracker.Config, prs tracker.ProgressMap, id uint64) { + pr := prs[id] + if pr == nil { + c.initProgress(cfg, prs, id, true /* isLearner */) + return + } + if pr.IsLearner { + return + } + // Remove any existing voter in the incoming config... + c.remove(cfg, prs, id) + // ... but save the Progress. + prs[id] = pr + // Use LearnersNext if we can't add the learner to Learners directly, i.e. + // if the peer is still tracked as a voter in the outgoing config. It will + // be turned into a learner in LeaveJoint(). + // + // Otherwise, add a regular learner right away. + if _, onRight := outgoing(cfg.Voters)[id]; onRight { + nilAwareAdd(&cfg.LearnersNext, id) + } else { + pr.IsLearner = true + nilAwareAdd(&cfg.Learners, id) + } +} + +// remove this peer as a voter or learner from the incoming config. +func (c Changer) remove(cfg *tracker.Config, prs tracker.ProgressMap, id uint64) { + if _, ok := prs[id]; !ok { + return + } + + delete(incoming(cfg.Voters), id) + nilAwareDelete(&cfg.Learners, id) + nilAwareDelete(&cfg.LearnersNext, id) + + // If the peer is still a voter in the outgoing config, keep the Progress. + if _, onRight := outgoing(cfg.Voters)[id]; !onRight { + delete(prs, id) + } +} + +// initProgress initializes a new progress for the given node or learner. +func (c Changer) initProgress(cfg *tracker.Config, prs tracker.ProgressMap, id uint64, isLearner bool) { + if !isLearner { + incoming(cfg.Voters)[id] = struct{}{} + } else { + nilAwareAdd(&cfg.Learners, id) + } + prs[id] = &tracker.Progress{ + // Initializing the Progress with the last index means that the follower + // can be probed (with the last index). + // + // TODO(tbg): seems awfully optimistic. Using the first index would be + // better. The general expectation here is that the follower has no log + // at all (and will thus likely need a snapshot), though the app may + // have applied a snapshot out of band before adding the replica (thus + // making the first index the better choice). + Next: c.LastIndex, + Match: 0, + Inflights: tracker.NewInflights(c.Tracker.MaxInflight), + IsLearner: isLearner, + // When a node is first added, we should mark it as recently active. + // Otherwise, CheckQuorum may cause us to step down if it is invoked + // before the added node has had a chance to communicate with us. + RecentActive: true, + } +} + +// checkInvariants makes sure that the config and progress are compatible with +// each other. This is used to check both what the Changer is initialized with, +// as well as what it returns. +func checkInvariants(cfg tracker.Config, prs tracker.ProgressMap) error { + // NB: intentionally allow the empty config. In production we'll never see a + // non-empty config (we prevent it from being created) but we will need to + // be able to *create* an initial config, for example during bootstrap (or + // during tests). Instead of having to hand-code this, we allow + // transitioning from an empty config into any other legal and non-empty + // config. + for _, ids := range []map[uint64]struct{}{ + cfg.Voters.IDs(), + cfg.Learners, + cfg.LearnersNext, + } { + for id := range ids { + if _, ok := prs[id]; !ok { + return fmt.Errorf("no progress for %d", id) + } + } + } + + // Any staged learner was staged because it could not be directly added due + // to a conflicting voter in the outgoing config. + for id := range cfg.LearnersNext { + if _, ok := outgoing(cfg.Voters)[id]; !ok { + return fmt.Errorf("%d is in LearnersNext, but not Voters[1]", id) + } + if prs[id].IsLearner { + return fmt.Errorf("%d is in LearnersNext, but is already marked as learner", id) + } + } + // Conversely Learners and Voters doesn't intersect at all. + for id := range cfg.Learners { + if _, ok := outgoing(cfg.Voters)[id]; ok { + return fmt.Errorf("%d is in Learners and Voters[1]", id) + } + if _, ok := incoming(cfg.Voters)[id]; ok { + return fmt.Errorf("%d is in Learners and Voters[0]", id) + } + if !prs[id].IsLearner { + return fmt.Errorf("%d is in Learners, but is not marked as learner", id) + } + } + + if !joint(cfg) { + // We enforce that empty maps are nil instead of zero. + if outgoing(cfg.Voters) != nil { + return fmt.Errorf("Voters[1] must be nil when not joint") + } + if cfg.LearnersNext != nil { + return fmt.Errorf("LearnersNext must be nil when not joint") + } + if cfg.AutoLeave { + return fmt.Errorf("AutoLeave must be false when not joint") + } + } + + return nil +} + +// checkAndCopy copies the tracker's config and progress map (deeply enough for +// the purposes of the Changer) and returns those copies. It returns an error +// if checkInvariants does. +func (c Changer) checkAndCopy() (tracker.Config, tracker.ProgressMap, error) { + cfg := c.Tracker.Config.Clone() + prs := tracker.ProgressMap{} + + for id, pr := range c.Tracker.Progress { + // A shallow copy is enough because we only mutate the Learner field. + ppr := *pr + prs[id] = &ppr + } + return checkAndReturn(cfg, prs) +} + +// checkAndReturn calls checkInvariants on the input and returns either the +// resulting error or the input. +func checkAndReturn(cfg tracker.Config, prs tracker.ProgressMap) (tracker.Config, tracker.ProgressMap, error) { + if err := checkInvariants(cfg, prs); err != nil { + return tracker.Config{}, tracker.ProgressMap{}, err + } + return cfg, prs, nil +} + +// err returns zero values and an error. +func (c Changer) err(err error) (tracker.Config, tracker.ProgressMap, error) { + return tracker.Config{}, nil, err +} + +// nilAwareAdd populates a map entry, creating the map if necessary. +func nilAwareAdd(m *map[uint64]struct{}, id uint64) { + if *m == nil { + *m = map[uint64]struct{}{} + } + (*m)[id] = struct{}{} +} + +// nilAwareDelete deletes from a map, nil'ing the map itself if it is empty after. +func nilAwareDelete(m *map[uint64]struct{}, id uint64) { + if *m == nil { + return + } + delete(*m, id) + if len(*m) == 0 { + *m = nil + } +} + +// symdiff returns the count of the symmetric difference between the sets of +// uint64s, i.e. len( (l - r) \union (r - l)). +func symdiff(l, r map[uint64]struct{}) int { + var n int + pairs := [][2]quorum.MajorityConfig{ + {l, r}, // count elems in l but not in r + {r, l}, // count elems in r but not in l + } + for _, p := range pairs { + for id := range p[0] { + if _, ok := p[1][id]; !ok { + n++ + } + } + } + return n +} + +func joint(cfg tracker.Config) bool { + return len(outgoing(cfg.Voters)) > 0 +} + +func incoming(voters quorum.JointConfig) quorum.MajorityConfig { return voters[0] } +func outgoing(voters quorum.JointConfig) quorum.MajorityConfig { return voters[1] } +func outgoingPtr(voters *quorum.JointConfig) *quorum.MajorityConfig { return &voters[1] } + +// Describe prints the type and NodeID of the configuration changes as a +// space-delimited string. +func Describe(ccs ...pb.ConfChangeSingle) string { + var buf strings.Builder + for _, cc := range ccs { + if buf.Len() > 0 { + buf.WriteByte(' ') + } + fmt.Fprintf(&buf, "%s(%d)", cc.Type, cc.NodeID) + } + return buf.String() +} diff --git a/vendor/go.etcd.io/etcd/raft/confchange/restore.go b/vendor/go.etcd.io/etcd/raft/confchange/restore.go new file mode 100644 index 000000000..724068da0 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/confchange/restore.go @@ -0,0 +1,155 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package confchange + +import ( + pb "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/raft/tracker" +) + +// toConfChangeSingle translates a conf state into 1) a slice of operations creating +// first the config that will become the outgoing one, and then the incoming one, and +// b) another slice that, when applied to the config resulted from 1), represents the +// ConfState. +func toConfChangeSingle(cs pb.ConfState) (out []pb.ConfChangeSingle, in []pb.ConfChangeSingle) { + // Example to follow along this code: + // voters=(1 2 3) learners=(5) outgoing=(1 2 4 6) learners_next=(4) + // + // This means that before entering the joint config, the configuration + // had voters (1 2 4) and perhaps some learners that are already gone. + // The new set of voters is (1 2 3), i.e. (1 2) were kept around, and (4 6) + // are no longer voters; however 4 is poised to become a learner upon leaving + // the joint state. + // We can't tell whether 5 was a learner before entering the joint config, + // but it doesn't matter (we'll pretend that it wasn't). + // + // The code below will construct + // outgoing = add 1; add 2; add 4; add 6 + // incoming = remove 1; remove 2; remove 4; remove 6 + // add 1; add 2; add 3; + // add-learner 5; + // add-learner 4; + // + // So, when starting with an empty config, after applying 'outgoing' we have + // + // quorum=(1 2 4 6) + // + // From which we enter a joint state via 'incoming' + // + // quorum=(1 2 3)&&(1 2 4 6) learners=(5) learners_next=(4) + // + // as desired. + + for _, id := range cs.VotersOutgoing { + // If there are outgoing voters, first add them one by one so that the + // (non-joint) config has them all. + out = append(out, pb.ConfChangeSingle{ + Type: pb.ConfChangeAddNode, + NodeID: id, + }) + + } + + // We're done constructing the outgoing slice, now on to the incoming one + // (which will apply on top of the config created by the outgoing slice). + + // First, we'll remove all of the outgoing voters. + for _, id := range cs.VotersOutgoing { + in = append(in, pb.ConfChangeSingle{ + Type: pb.ConfChangeRemoveNode, + NodeID: id, + }) + } + // Then we'll add the incoming voters and learners. + for _, id := range cs.Voters { + in = append(in, pb.ConfChangeSingle{ + Type: pb.ConfChangeAddNode, + NodeID: id, + }) + } + for _, id := range cs.Learners { + in = append(in, pb.ConfChangeSingle{ + Type: pb.ConfChangeAddLearnerNode, + NodeID: id, + }) + } + // Same for LearnersNext; these are nodes we want to be learners but which + // are currently voters in the outgoing config. + for _, id := range cs.LearnersNext { + in = append(in, pb.ConfChangeSingle{ + Type: pb.ConfChangeAddLearnerNode, + NodeID: id, + }) + } + return out, in +} + +func chain(chg Changer, ops ...func(Changer) (tracker.Config, tracker.ProgressMap, error)) (tracker.Config, tracker.ProgressMap, error) { + for _, op := range ops { + cfg, prs, err := op(chg) + if err != nil { + return tracker.Config{}, nil, err + } + chg.Tracker.Config = cfg + chg.Tracker.Progress = prs + } + return chg.Tracker.Config, chg.Tracker.Progress, nil +} + +// Restore takes a Changer (which must represent an empty configuration), and +// runs a sequence of changes enacting the configuration described in the +// ConfState. +// +// TODO(tbg) it's silly that this takes a Changer. Unravel this by making sure +// the Changer only needs a ProgressMap (not a whole Tracker) at which point +// this can just take LastIndex and MaxInflight directly instead and cook up +// the results from that alone. +func Restore(chg Changer, cs pb.ConfState) (tracker.Config, tracker.ProgressMap, error) { + outgoing, incoming := toConfChangeSingle(cs) + + var ops []func(Changer) (tracker.Config, tracker.ProgressMap, error) + + if len(outgoing) == 0 { + // No outgoing config, so just apply the incoming changes one by one. + for _, cc := range incoming { + cc := cc // loop-local copy + ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) { + return chg.Simple(cc) + }) + } + } else { + // The ConfState describes a joint configuration. + // + // First, apply all of the changes of the outgoing config one by one, so + // that it temporarily becomes the incoming active config. For example, + // if the config is (1 2 3)&(2 3 4), this will establish (2 3 4)&(). + for _, cc := range outgoing { + cc := cc // loop-local copy + ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) { + return chg.Simple(cc) + }) + } + // Now enter the joint state, which rotates the above additions into the + // outgoing config, and adds the incoming config in. Continuing the + // example above, we'd get (1 2 3)&(2 3 4), i.e. the incoming operations + // would be removing 2,3,4 and then adding in 1,2,3 while transitioning + // into a joint state. + ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) { + return chg.EnterJoint(cs.AutoLeave, incoming...) + }) + } + + return chain(chg, ops...) +} diff --git a/vendor/go.etcd.io/etcd/raft/design.md b/vendor/go.etcd.io/etcd/raft/design.md new file mode 100644 index 000000000..7bc0531dc --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/design.md @@ -0,0 +1,57 @@ +## Progress + +Progress represents a follower’s progress in the view of the leader. Leader maintains progresses of all followers, and sends `replication message` to the follower based on its progress. + +`replication message` is a `msgApp` with log entries. + +A progress has two attribute: `match` and `next`. `match` is the index of the highest known matched entry. If leader knows nothing about follower’s replication status, `match` is set to zero. `next` is the index of the first entry that will be replicated to the follower. Leader puts entries from `next` to its latest one in next `replication message`. + +A progress is in one of the three state: `probe`, `replicate`, `snapshot`. + +``` + +--------------------------------------------------------+ + | send snapshot | + | | + +---------+----------+ +----------v---------+ + +---> probe | | snapshot | + | | max inflight = 1 <----------------------------------+ max inflight = 0 | + | +---------+----------+ +--------------------+ + | | 1. snapshot success + | | (next=snapshot.index + 1) + | | 2. snapshot failure + | | (no change) + | | 3. receives msgAppResp(rej=false&&index>lastsnap.index) + | | (match=m.index,next=match+1) +receives msgAppResp(rej=true) +(next=match+1)| | + | | + | | + | | receives msgAppResp(rej=false&&index>match) + | | (match=m.index,next=match+1) + | | + | | + | | + | +---------v----------+ + | | replicate | + +---+ max inflight = n | + +--------------------+ +``` + +When the progress of a follower is in `probe` state, leader sends at most one `replication message` per heartbeat interval. The leader sends `replication message` slowly and probing the actual progress of the follower. A `msgHeartbeatResp` or a `msgAppResp` with reject might trigger the sending of the next `replication message`. + +When the progress of a follower is in `replicate` state, leader sends `replication message`, then optimistically increases `next` to the latest entry sent. This is an optimized state for fast replicating log entries to the follower. + +When the progress of a follower is in `snapshot` state, leader stops sending any `replication message`. + +A newly elected leader sets the progresses of all the followers to `probe` state with `match` = 0 and `next` = last index. The leader slowly (at most once per heartbeat) sends `replication message` to the follower and probes its progress. + +A progress changes to `replicate` when the follower replies with a non-rejection `msgAppResp`, which implies that it has matched the index sent. At this point, leader starts to stream log entries to the follower fast. The progress will fall back to `probe` when the follower replies a rejection `msgAppResp` or the link layer reports the follower is unreachable. We aggressively reset `next` to `match`+1 since if we receive any `msgAppResp` soon, both `match` and `next` will increase directly to the `index` in `msgAppResp`. (We might end up with sending some duplicate entries when aggressively reset `next` too low. see open question) + +A progress changes from `probe` to `snapshot` when the follower falls very far behind and requires a snapshot. After sending `msgSnap`, the leader waits until the success, failure or abortion of the previous snapshot sent. The progress will go back to `probe` after the sending result is applied. + +### Flow Control + +1. limit the max size of message sent per message. Max should be configurable. +Lower the cost at probing state as we limit the size per message; lower the penalty when aggressively decreased to a too low `next` + +2. limit the # of in flight messages < N when in `replicate` state. N should be configurable. Most implementation will have a sending buffer on top of its actual network transport layer (not blocking raft node). We want to make sure raft does not overflow that buffer, which can cause message dropping and triggering a bunch of unnecessary resending repeatedly. diff --git a/vendor/go.etcd.io/etcd/raft/doc.go b/vendor/go.etcd.io/etcd/raft/doc.go new file mode 100644 index 000000000..68fe6f0a6 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/doc.go @@ -0,0 +1,300 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package raft sends and receives messages in the Protocol Buffer format +defined in the raftpb package. + +Raft is a protocol with which a cluster of nodes can maintain a replicated state machine. +The state machine is kept in sync through the use of a replicated log. +For more details on Raft, see "In Search of an Understandable Consensus Algorithm" +(https://raft.github.io/raft.pdf) by Diego Ongaro and John Ousterhout. + +A simple example application, _raftexample_, is also available to help illustrate +how to use this package in practice: +https://github.com/etcd-io/etcd/tree/master/contrib/raftexample + +Usage + +The primary object in raft is a Node. You either start a Node from scratch +using raft.StartNode or start a Node from some initial state using raft.RestartNode. + +To start a node from scratch: + + storage := raft.NewMemoryStorage() + c := &Config{ + ID: 0x01, + ElectionTick: 10, + HeartbeatTick: 1, + Storage: storage, + MaxSizePerMsg: 4096, + MaxInflightMsgs: 256, + } + n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}}) + +To restart a node from previous state: + + storage := raft.NewMemoryStorage() + + // recover the in-memory storage from persistent + // snapshot, state and entries. + storage.ApplySnapshot(snapshot) + storage.SetHardState(state) + storage.Append(entries) + + c := &Config{ + ID: 0x01, + ElectionTick: 10, + HeartbeatTick: 1, + Storage: storage, + MaxSizePerMsg: 4096, + MaxInflightMsgs: 256, + } + + // restart raft without peer information. + // peer information is already included in the storage. + n := raft.RestartNode(c) + +Now that you are holding onto a Node you have a few responsibilities: + +First, you must read from the Node.Ready() channel and process the updates +it contains. These steps may be performed in parallel, except as noted in step +2. + +1. Write HardState, Entries, and Snapshot to persistent storage if they are +not empty. Note that when writing an Entry with Index i, any +previously-persisted entries with Index >= i must be discarded. + +2. Send all Messages to the nodes named in the To field. It is important that +no messages be sent until the latest HardState has been persisted to disk, +and all Entries written by any previous Ready batch (Messages may be sent while +entries from the same batch are being persisted). To reduce the I/O latency, an +optimization can be applied to make leader write to disk in parallel with its +followers (as explained at section 10.2.1 in Raft thesis). If any Message has type +MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be +large). + +Note: Marshalling messages is not thread-safe; it is important that you +make sure that no new entries are persisted while marshalling. +The easiest way to achieve this is to serialize the messages directly inside +your main raft loop. + +3. Apply Snapshot (if any) and CommittedEntries to the state machine. +If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() +to apply it to the node. The configuration change may be cancelled at this point +by setting the NodeID field to zero before calling ApplyConfChange +(but ApplyConfChange must be called one way or the other, and the decision to cancel +must be based solely on the state machine and not external information such as +the observed health of the node). + +4. Call Node.Advance() to signal readiness for the next batch of updates. +This may be done at any time after step 1, although all updates must be processed +in the order they were returned by Ready. + +Second, all persisted log entries must be made available via an +implementation of the Storage interface. The provided MemoryStorage +type can be used for this (if you repopulate its state upon a +restart), or you can supply your own disk-backed implementation. + +Third, when you receive a message from another node, pass it to Node.Step: + + func recvRaftRPC(ctx context.Context, m raftpb.Message) { + n.Step(ctx, m) + } + +Finally, you need to call Node.Tick() at regular intervals (probably +via a time.Ticker). Raft has two important timeouts: heartbeat and the +election timeout. However, internally to the raft package time is +represented by an abstract "tick". + +The total state machine handling loop will look something like this: + + for { + select { + case <-s.Ticker: + n.Tick() + case rd := <-s.Node.Ready(): + saveToStorage(rd.State, rd.Entries, rd.Snapshot) + send(rd.Messages) + if !raft.IsEmptySnap(rd.Snapshot) { + processSnapshot(rd.Snapshot) + } + for _, entry := range rd.CommittedEntries { + process(entry) + if entry.Type == raftpb.EntryConfChange { + var cc raftpb.ConfChange + cc.Unmarshal(entry.Data) + s.Node.ApplyConfChange(cc) + } + } + s.Node.Advance() + case <-s.done: + return + } + } + +To propose changes to the state machine from your node take your application +data, serialize it into a byte slice and call: + + n.Propose(ctx, data) + +If the proposal is committed, data will appear in committed entries with type +raftpb.EntryNormal. There is no guarantee that a proposed command will be +committed; you may have to re-propose after a timeout. + +To add or remove a node in a cluster, build ConfChange struct 'cc' and call: + + n.ProposeConfChange(ctx, cc) + +After config change is committed, some committed entry with type +raftpb.EntryConfChange will be returned. You must apply it to node through: + + var cc raftpb.ConfChange + cc.Unmarshal(data) + n.ApplyConfChange(cc) + +Note: An ID represents a unique node in a cluster for all time. A +given ID MUST be used only once even if the old node has been removed. +This means that for example IP addresses make poor node IDs since they +may be reused. Node IDs must be non-zero. + +Implementation notes + +This implementation is up to date with the final Raft thesis +(https://github.com/ongardie/dissertation/blob/master/stanford.pdf), although our +implementation of the membership change protocol differs somewhat from +that described in chapter 4. The key invariant that membership changes +happen one node at a time is preserved, but in our implementation the +membership change takes effect when its entry is applied, not when it +is added to the log (so the entry is committed under the old +membership instead of the new). This is equivalent in terms of safety, +since the old and new configurations are guaranteed to overlap. + +To ensure that we do not attempt to commit two membership changes at +once by matching log positions (which would be unsafe since they +should have different quorum requirements), we simply disallow any +proposed membership change while any uncommitted change appears in +the leader's log. + +This approach introduces a problem when you try to remove a member +from a two-member cluster: If one of the members dies before the +other one receives the commit of the confchange entry, then the member +cannot be removed any more since the cluster cannot make progress. +For this reason it is highly recommended to use three or more nodes in +every cluster. + +MessageType + +Package raft sends and receives message in Protocol Buffer format (defined +in raftpb package). Each state (follower, candidate, leader) implements its +own 'step' method ('stepFollower', 'stepCandidate', 'stepLeader') when +advancing with the given raftpb.Message. Each step is determined by its +raftpb.MessageType. Note that every step is checked by one common method +'Step' that safety-checks the terms of node and incoming message to prevent +stale log entries: + + 'MsgHup' is used for election. If a node is a follower or candidate, the + 'tick' function in 'raft' struct is set as 'tickElection'. If a follower or + candidate has not received any heartbeat before the election timeout, it + passes 'MsgHup' to its Step method and becomes (or remains) a candidate to + start a new election. + + 'MsgBeat' is an internal type that signals the leader to send a heartbeat of + the 'MsgHeartbeat' type. If a node is a leader, the 'tick' function in + the 'raft' struct is set as 'tickHeartbeat', and triggers the leader to + send periodic 'MsgHeartbeat' messages to its followers. + + 'MsgProp' proposes to append data to its log entries. This is a special + type to redirect proposals to leader. Therefore, send method overwrites + raftpb.Message's term with its HardState's term to avoid attaching its + local term to 'MsgProp'. When 'MsgProp' is passed to the leader's 'Step' + method, the leader first calls the 'appendEntry' method to append entries + to its log, and then calls 'bcastAppend' method to send those entries to + its peers. When passed to candidate, 'MsgProp' is dropped. When passed to + follower, 'MsgProp' is stored in follower's mailbox(msgs) by the send + method. It is stored with sender's ID and later forwarded to leader by + rafthttp package. + + 'MsgApp' contains log entries to replicate. A leader calls bcastAppend, + which calls sendAppend, which sends soon-to-be-replicated logs in 'MsgApp' + type. When 'MsgApp' is passed to candidate's Step method, candidate reverts + back to follower, because it indicates that there is a valid leader sending + 'MsgApp' messages. Candidate and follower respond to this message in + 'MsgAppResp' type. + + 'MsgAppResp' is response to log replication request('MsgApp'). When + 'MsgApp' is passed to candidate or follower's Step method, it responds by + calling 'handleAppendEntries' method, which sends 'MsgAppResp' to raft + mailbox. + + 'MsgVote' requests votes for election. When a node is a follower or + candidate and 'MsgHup' is passed to its Step method, then the node calls + 'campaign' method to campaign itself to become a leader. Once 'campaign' + method is called, the node becomes candidate and sends 'MsgVote' to peers + in cluster to request votes. When passed to leader or candidate's Step + method and the message's Term is lower than leader's or candidate's, + 'MsgVote' will be rejected ('MsgVoteResp' is returned with Reject true). + If leader or candidate receives 'MsgVote' with higher term, it will revert + back to follower. When 'MsgVote' is passed to follower, it votes for the + sender only when sender's last term is greater than MsgVote's term or + sender's last term is equal to MsgVote's term but sender's last committed + index is greater than or equal to follower's. + + 'MsgVoteResp' contains responses from voting request. When 'MsgVoteResp' is + passed to candidate, the candidate calculates how many votes it has won. If + it's more than majority (quorum), it becomes leader and calls 'bcastAppend'. + If candidate receives majority of votes of denials, it reverts back to + follower. + + 'MsgPreVote' and 'MsgPreVoteResp' are used in an optional two-phase election + protocol. When Config.PreVote is true, a pre-election is carried out first + (using the same rules as a regular election), and no node increases its term + number unless the pre-election indicates that the campaigning node would win. + This minimizes disruption when a partitioned node rejoins the cluster. + + 'MsgSnap' requests to install a snapshot message. When a node has just + become a leader or the leader receives 'MsgProp' message, it calls + 'bcastAppend' method, which then calls 'sendAppend' method to each + follower. In 'sendAppend', if a leader fails to get term or entries, + the leader requests snapshot by sending 'MsgSnap' type message. + + 'MsgSnapStatus' tells the result of snapshot install message. When a + follower rejected 'MsgSnap', it indicates the snapshot request with + 'MsgSnap' had failed from network issues which causes the network layer + to fail to send out snapshots to its followers. Then leader considers + follower's progress as probe. When 'MsgSnap' were not rejected, it + indicates that the snapshot succeeded and the leader sets follower's + progress to probe and resumes its log replication. + + 'MsgHeartbeat' sends heartbeat from leader. When 'MsgHeartbeat' is passed + to candidate and message's term is higher than candidate's, the candidate + reverts back to follower and updates its committed index from the one in + this heartbeat. And it sends the message to its mailbox. When + 'MsgHeartbeat' is passed to follower's Step method and message's term is + higher than follower's, the follower updates its leaderID with the ID + from the message. + + 'MsgHeartbeatResp' is a response to 'MsgHeartbeat'. When 'MsgHeartbeatResp' + is passed to leader's Step method, the leader knows which follower + responded. And only when the leader's last committed index is greater than + follower's Match index, the leader runs 'sendAppend` method. + + 'MsgUnreachable' tells that request(message) wasn't delivered. When + 'MsgUnreachable' is passed to leader's Step method, the leader discovers + that the follower that sent this 'MsgUnreachable' is not reachable, often + indicating 'MsgApp' is lost. When follower's progress state is replicate, + the leader sets it back to probe. + +*/ +package raft diff --git a/vendor/go.etcd.io/etcd/raft/log.go b/vendor/go.etcd.io/etcd/raft/log.go new file mode 100644 index 000000000..77eedfccb --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/log.go @@ -0,0 +1,372 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "fmt" + "log" + + pb "go.etcd.io/etcd/raft/raftpb" +) + +type raftLog struct { + // storage contains all stable entries since the last snapshot. + storage Storage + + // unstable contains all unstable entries and snapshot. + // they will be saved into storage. + unstable unstable + + // committed is the highest log position that is known to be in + // stable storage on a quorum of nodes. + committed uint64 + // applied is the highest log position that the application has + // been instructed to apply to its state machine. + // Invariant: applied <= committed + applied uint64 + + logger Logger + + // maxNextEntsSize is the maximum number aggregate byte size of the messages + // returned from calls to nextEnts. + maxNextEntsSize uint64 +} + +// newLog returns log using the given storage and default options. It +// recovers the log to the state that it just commits and applies the +// latest snapshot. +func newLog(storage Storage, logger Logger) *raftLog { + return newLogWithSize(storage, logger, noLimit) +} + +// newLogWithSize returns a log using the given storage and max +// message size. +func newLogWithSize(storage Storage, logger Logger, maxNextEntsSize uint64) *raftLog { + if storage == nil { + log.Panic("storage must not be nil") + } + log := &raftLog{ + storage: storage, + logger: logger, + maxNextEntsSize: maxNextEntsSize, + } + firstIndex, err := storage.FirstIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + lastIndex, err := storage.LastIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + log.unstable.offset = lastIndex + 1 + log.unstable.logger = logger + // Initialize our committed and applied pointers to the time of the last compaction. + log.committed = firstIndex - 1 + log.applied = firstIndex - 1 + + return log +} + +func (l *raftLog) String() string { + return fmt.Sprintf("committed=%d, applied=%d, unstable.offset=%d, len(unstable.Entries)=%d", l.committed, l.applied, l.unstable.offset, len(l.unstable.entries)) +} + +// maybeAppend returns (0, false) if the entries cannot be appended. Otherwise, +// it returns (last index of new entries, true). +func (l *raftLog) maybeAppend(index, logTerm, committed uint64, ents ...pb.Entry) (lastnewi uint64, ok bool) { + if l.matchTerm(index, logTerm) { + lastnewi = index + uint64(len(ents)) + ci := l.findConflict(ents) + switch { + case ci == 0: + case ci <= l.committed: + l.logger.Panicf("entry %d conflict with committed entry [committed(%d)]", ci, l.committed) + default: + offset := index + 1 + l.append(ents[ci-offset:]...) + } + l.commitTo(min(committed, lastnewi)) + return lastnewi, true + } + return 0, false +} + +func (l *raftLog) append(ents ...pb.Entry) uint64 { + if len(ents) == 0 { + return l.lastIndex() + } + if after := ents[0].Index - 1; after < l.committed { + l.logger.Panicf("after(%d) is out of range [committed(%d)]", after, l.committed) + } + l.unstable.truncateAndAppend(ents) + return l.lastIndex() +} + +// findConflict finds the index of the conflict. +// It returns the first pair of conflicting entries between the existing +// entries and the given entries, if there are any. +// If there is no conflicting entries, and the existing entries contains +// all the given entries, zero will be returned. +// If there is no conflicting entries, but the given entries contains new +// entries, the index of the first new entry will be returned. +// An entry is considered to be conflicting if it has the same index but +// a different term. +// The first entry MUST have an index equal to the argument 'from'. +// The index of the given entries MUST be continuously increasing. +func (l *raftLog) findConflict(ents []pb.Entry) uint64 { + for _, ne := range ents { + if !l.matchTerm(ne.Index, ne.Term) { + if ne.Index <= l.lastIndex() { + l.logger.Infof("found conflict at index %d [existing term: %d, conflicting term: %d]", + ne.Index, l.zeroTermOnErrCompacted(l.term(ne.Index)), ne.Term) + } + return ne.Index + } + } + return 0 +} + +func (l *raftLog) unstableEntries() []pb.Entry { + if len(l.unstable.entries) == 0 { + return nil + } + return l.unstable.entries +} + +// nextEnts returns all the available entries for execution. +// If applied is smaller than the index of snapshot, it returns all committed +// entries after the index of snapshot. +func (l *raftLog) nextEnts() (ents []pb.Entry) { + off := max(l.applied+1, l.firstIndex()) + if l.committed+1 > off { + ents, err := l.slice(off, l.committed+1, l.maxNextEntsSize) + if err != nil { + l.logger.Panicf("unexpected error when getting unapplied entries (%v)", err) + } + return ents + } + return nil +} + +// hasNextEnts returns if there is any available entries for execution. This +// is a fast check without heavy raftLog.slice() in raftLog.nextEnts(). +func (l *raftLog) hasNextEnts() bool { + off := max(l.applied+1, l.firstIndex()) + return l.committed+1 > off +} + +func (l *raftLog) snapshot() (pb.Snapshot, error) { + if l.unstable.snapshot != nil { + return *l.unstable.snapshot, nil + } + return l.storage.Snapshot() +} + +func (l *raftLog) firstIndex() uint64 { + if i, ok := l.unstable.maybeFirstIndex(); ok { + return i + } + index, err := l.storage.FirstIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + return index +} + +func (l *raftLog) lastIndex() uint64 { + if i, ok := l.unstable.maybeLastIndex(); ok { + return i + } + i, err := l.storage.LastIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + return i +} + +func (l *raftLog) commitTo(tocommit uint64) { + // never decrease commit + if l.committed < tocommit { + if l.lastIndex() < tocommit { + l.logger.Panicf("tocommit(%d) is out of range [lastIndex(%d)]. Was the raft log corrupted, truncated, or lost?", tocommit, l.lastIndex()) + } + l.committed = tocommit + } +} + +func (l *raftLog) appliedTo(i uint64) { + if i == 0 { + return + } + if l.committed < i || i < l.applied { + l.logger.Panicf("applied(%d) is out of range [prevApplied(%d), committed(%d)]", i, l.applied, l.committed) + } + l.applied = i +} + +func (l *raftLog) stableTo(i, t uint64) { l.unstable.stableTo(i, t) } + +func (l *raftLog) stableSnapTo(i uint64) { l.unstable.stableSnapTo(i) } + +func (l *raftLog) lastTerm() uint64 { + t, err := l.term(l.lastIndex()) + if err != nil { + l.logger.Panicf("unexpected error when getting the last term (%v)", err) + } + return t +} + +func (l *raftLog) term(i uint64) (uint64, error) { + // the valid term range is [index of dummy entry, last index] + dummyIndex := l.firstIndex() - 1 + if i < dummyIndex || i > l.lastIndex() { + // TODO: return an error instead? + return 0, nil + } + + if t, ok := l.unstable.maybeTerm(i); ok { + return t, nil + } + + t, err := l.storage.Term(i) + if err == nil { + return t, nil + } + if err == ErrCompacted || err == ErrUnavailable { + return 0, err + } + panic(err) // TODO(bdarnell) +} + +func (l *raftLog) entries(i, maxsize uint64) ([]pb.Entry, error) { + if i > l.lastIndex() { + return nil, nil + } + return l.slice(i, l.lastIndex()+1, maxsize) +} + +// allEntries returns all entries in the log. +func (l *raftLog) allEntries() []pb.Entry { + ents, err := l.entries(l.firstIndex(), noLimit) + if err == nil { + return ents + } + if err == ErrCompacted { // try again if there was a racing compaction + return l.allEntries() + } + // TODO (xiangli): handle error? + panic(err) +} + +// isUpToDate determines if the given (lastIndex,term) log is more up-to-date +// by comparing the index and term of the last entries in the existing logs. +// If the logs have last entries with different terms, then the log with the +// later term is more up-to-date. If the logs end with the same term, then +// whichever log has the larger lastIndex is more up-to-date. If the logs are +// the same, the given log is up-to-date. +func (l *raftLog) isUpToDate(lasti, term uint64) bool { + return term > l.lastTerm() || (term == l.lastTerm() && lasti >= l.lastIndex()) +} + +func (l *raftLog) matchTerm(i, term uint64) bool { + t, err := l.term(i) + if err != nil { + return false + } + return t == term +} + +func (l *raftLog) maybeCommit(maxIndex, term uint64) bool { + if maxIndex > l.committed && l.zeroTermOnErrCompacted(l.term(maxIndex)) == term { + l.commitTo(maxIndex) + return true + } + return false +} + +func (l *raftLog) restore(s pb.Snapshot) { + l.logger.Infof("log [%s] starts to restore snapshot [index: %d, term: %d]", l, s.Metadata.Index, s.Metadata.Term) + l.committed = s.Metadata.Index + l.unstable.restore(s) +} + +// slice returns a slice of log entries from lo through hi-1, inclusive. +func (l *raftLog) slice(lo, hi, maxSize uint64) ([]pb.Entry, error) { + err := l.mustCheckOutOfBounds(lo, hi) + if err != nil { + return nil, err + } + if lo == hi { + return nil, nil + } + var ents []pb.Entry + if lo < l.unstable.offset { + storedEnts, err := l.storage.Entries(lo, min(hi, l.unstable.offset), maxSize) + if err == ErrCompacted { + return nil, err + } else if err == ErrUnavailable { + l.logger.Panicf("entries[%d:%d) is unavailable from storage", lo, min(hi, l.unstable.offset)) + } else if err != nil { + panic(err) // TODO(bdarnell) + } + + // check if ents has reached the size limitation + if uint64(len(storedEnts)) < min(hi, l.unstable.offset)-lo { + return storedEnts, nil + } + + ents = storedEnts + } + if hi > l.unstable.offset { + unstable := l.unstable.slice(max(lo, l.unstable.offset), hi) + if len(ents) > 0 { + combined := make([]pb.Entry, len(ents)+len(unstable)) + n := copy(combined, ents) + copy(combined[n:], unstable) + ents = combined + } else { + ents = unstable + } + } + return limitSize(ents, maxSize), nil +} + +// l.firstIndex <= lo <= hi <= l.firstIndex + len(l.entries) +func (l *raftLog) mustCheckOutOfBounds(lo, hi uint64) error { + if lo > hi { + l.logger.Panicf("invalid slice %d > %d", lo, hi) + } + fi := l.firstIndex() + if lo < fi { + return ErrCompacted + } + + length := l.lastIndex() + 1 - fi + if lo < fi || hi > fi+length { + l.logger.Panicf("slice[%d,%d) out of bound [%d,%d]", lo, hi, fi, l.lastIndex()) + } + return nil +} + +func (l *raftLog) zeroTermOnErrCompacted(t uint64, err error) uint64 { + if err == nil { + return t + } + if err == ErrCompacted { + return 0 + } + l.logger.Panicf("unexpected error (%v)", err) + return 0 +} diff --git a/vendor/go.etcd.io/etcd/raft/log_unstable.go b/vendor/go.etcd.io/etcd/raft/log_unstable.go new file mode 100644 index 000000000..1bff5a7bd --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/log_unstable.go @@ -0,0 +1,157 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import pb "go.etcd.io/etcd/raft/raftpb" + +// unstable.entries[i] has raft log position i+unstable.offset. +// Note that unstable.offset may be less than the highest log +// position in storage; this means that the next write to storage +// might need to truncate the log before persisting unstable.entries. +type unstable struct { + // the incoming unstable snapshot, if any. + snapshot *pb.Snapshot + // all entries that have not yet been written to storage. + entries []pb.Entry + offset uint64 + + logger Logger +} + +// maybeFirstIndex returns the index of the first possible entry in entries +// if it has a snapshot. +func (u *unstable) maybeFirstIndex() (uint64, bool) { + if u.snapshot != nil { + return u.snapshot.Metadata.Index + 1, true + } + return 0, false +} + +// maybeLastIndex returns the last index if it has at least one +// unstable entry or snapshot. +func (u *unstable) maybeLastIndex() (uint64, bool) { + if l := len(u.entries); l != 0 { + return u.offset + uint64(l) - 1, true + } + if u.snapshot != nil { + return u.snapshot.Metadata.Index, true + } + return 0, false +} + +// maybeTerm returns the term of the entry at index i, if there +// is any. +func (u *unstable) maybeTerm(i uint64) (uint64, bool) { + if i < u.offset { + if u.snapshot != nil && u.snapshot.Metadata.Index == i { + return u.snapshot.Metadata.Term, true + } + return 0, false + } + + last, ok := u.maybeLastIndex() + if !ok { + return 0, false + } + if i > last { + return 0, false + } + + return u.entries[i-u.offset].Term, true +} + +func (u *unstable) stableTo(i, t uint64) { + gt, ok := u.maybeTerm(i) + if !ok { + return + } + // if i < offset, term is matched with the snapshot + // only update the unstable entries if term is matched with + // an unstable entry. + if gt == t && i >= u.offset { + u.entries = u.entries[i+1-u.offset:] + u.offset = i + 1 + u.shrinkEntriesArray() + } +} + +// shrinkEntriesArray discards the underlying array used by the entries slice +// if most of it isn't being used. This avoids holding references to a bunch of +// potentially large entries that aren't needed anymore. Simply clearing the +// entries wouldn't be safe because clients might still be using them. +func (u *unstable) shrinkEntriesArray() { + // We replace the array if we're using less than half of the space in + // it. This number is fairly arbitrary, chosen as an attempt to balance + // memory usage vs number of allocations. It could probably be improved + // with some focused tuning. + const lenMultiple = 2 + if len(u.entries) == 0 { + u.entries = nil + } else if len(u.entries)*lenMultiple < cap(u.entries) { + newEntries := make([]pb.Entry, len(u.entries)) + copy(newEntries, u.entries) + u.entries = newEntries + } +} + +func (u *unstable) stableSnapTo(i uint64) { + if u.snapshot != nil && u.snapshot.Metadata.Index == i { + u.snapshot = nil + } +} + +func (u *unstable) restore(s pb.Snapshot) { + u.offset = s.Metadata.Index + 1 + u.entries = nil + u.snapshot = &s +} + +func (u *unstable) truncateAndAppend(ents []pb.Entry) { + after := ents[0].Index + switch { + case after == u.offset+uint64(len(u.entries)): + // after is the next index in the u.entries + // directly append + u.entries = append(u.entries, ents...) + case after <= u.offset: + u.logger.Infof("replace the unstable entries from index %d", after) + // The log is being truncated to before our current offset + // portion, so set the offset and replace the entries + u.offset = after + u.entries = ents + default: + // truncate to after and copy to u.entries + // then append + u.logger.Infof("truncate the unstable entries before index %d", after) + u.entries = append([]pb.Entry{}, u.slice(u.offset, after)...) + u.entries = append(u.entries, ents...) + } +} + +func (u *unstable) slice(lo uint64, hi uint64) []pb.Entry { + u.mustCheckOutOfBounds(lo, hi) + return u.entries[lo-u.offset : hi-u.offset] +} + +// u.offset <= lo <= hi <= u.offset+len(u.entries) +func (u *unstable) mustCheckOutOfBounds(lo, hi uint64) { + if lo > hi { + u.logger.Panicf("invalid unstable.slice %d > %d", lo, hi) + } + upper := u.offset + uint64(len(u.entries)) + if lo < u.offset || hi > upper { + u.logger.Panicf("unstable.slice[%d,%d) out of bound [%d,%d]", lo, hi, u.offset, upper) + } +} diff --git a/vendor/go.etcd.io/etcd/raft/logger.go b/vendor/go.etcd.io/etcd/raft/logger.go new file mode 100644 index 000000000..6d8962965 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/logger.go @@ -0,0 +1,132 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "fmt" + "io/ioutil" + "log" + "os" + "sync" +) + +type Logger interface { + Debug(v ...interface{}) + Debugf(format string, v ...interface{}) + + Error(v ...interface{}) + Errorf(format string, v ...interface{}) + + Info(v ...interface{}) + Infof(format string, v ...interface{}) + + Warning(v ...interface{}) + Warningf(format string, v ...interface{}) + + Fatal(v ...interface{}) + Fatalf(format string, v ...interface{}) + + Panic(v ...interface{}) + Panicf(format string, v ...interface{}) +} + +func SetLogger(l Logger) { + raftLoggerMu.Lock() + raftLogger = l + raftLoggerMu.Unlock() +} + +var ( + defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags)} + discardLogger = &DefaultLogger{Logger: log.New(ioutil.Discard, "", 0)} + raftLoggerMu sync.Mutex + raftLogger = Logger(defaultLogger) +) + +const ( + calldepth = 2 +) + +// DefaultLogger is a default implementation of the Logger interface. +type DefaultLogger struct { + *log.Logger + debug bool +} + +func (l *DefaultLogger) EnableTimestamps() { + l.SetFlags(l.Flags() | log.Ldate | log.Ltime) +} + +func (l *DefaultLogger) EnableDebug() { + l.debug = true +} + +func (l *DefaultLogger) Debug(v ...interface{}) { + if l.debug { + l.Output(calldepth, header("DEBUG", fmt.Sprint(v...))) + } +} + +func (l *DefaultLogger) Debugf(format string, v ...interface{}) { + if l.debug { + l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...))) + } +} + +func (l *DefaultLogger) Info(v ...interface{}) { + l.Output(calldepth, header("INFO", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Infof(format string, v ...interface{}) { + l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Error(v ...interface{}) { + l.Output(calldepth, header("ERROR", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Errorf(format string, v ...interface{}) { + l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Warning(v ...interface{}) { + l.Output(calldepth, header("WARN", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Warningf(format string, v ...interface{}) { + l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Fatal(v ...interface{}) { + l.Output(calldepth, header("FATAL", fmt.Sprint(v...))) + os.Exit(1) +} + +func (l *DefaultLogger) Fatalf(format string, v ...interface{}) { + l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...))) + os.Exit(1) +} + +func (l *DefaultLogger) Panic(v ...interface{}) { + l.Logger.Panic(v...) +} + +func (l *DefaultLogger) Panicf(format string, v ...interface{}) { + l.Logger.Panicf(format, v...) +} + +func header(lvl, msg string) string { + return fmt.Sprintf("%s: %s", lvl, msg) +} diff --git a/vendor/go.etcd.io/etcd/raft/node.go b/vendor/go.etcd.io/etcd/raft/node.go new file mode 100644 index 000000000..ab6185b99 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/node.go @@ -0,0 +1,584 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "context" + "errors" + + pb "go.etcd.io/etcd/raft/raftpb" +) + +type SnapshotStatus int + +const ( + SnapshotFinish SnapshotStatus = 1 + SnapshotFailure SnapshotStatus = 2 +) + +var ( + emptyState = pb.HardState{} + + // ErrStopped is returned by methods on Nodes that have been stopped. + ErrStopped = errors.New("raft: stopped") +) + +// SoftState provides state that is useful for logging and debugging. +// The state is volatile and does not need to be persisted to the WAL. +type SoftState struct { + Lead uint64 // must use atomic operations to access; keep 64-bit aligned. + RaftState StateType +} + +func (a *SoftState) equal(b *SoftState) bool { + return a.Lead == b.Lead && a.RaftState == b.RaftState +} + +// Ready encapsulates the entries and messages that are ready to read, +// be saved to stable storage, committed or sent to other peers. +// All fields in Ready are read-only. +type Ready struct { + // The current volatile state of a Node. + // SoftState will be nil if there is no update. + // It is not required to consume or store SoftState. + *SoftState + + // The current state of a Node to be saved to stable storage BEFORE + // Messages are sent. + // HardState will be equal to empty state if there is no update. + pb.HardState + + // ReadStates can be used for node to serve linearizable read requests locally + // when its applied index is greater than the index in ReadState. + // Note that the readState will be returned when raft receives msgReadIndex. + // The returned is only valid for the request that requested to read. + ReadStates []ReadState + + // Entries specifies entries to be saved to stable storage BEFORE + // Messages are sent. + Entries []pb.Entry + + // Snapshot specifies the snapshot to be saved to stable storage. + Snapshot pb.Snapshot + + // CommittedEntries specifies entries to be committed to a + // store/state-machine. These have previously been committed to stable + // store. + CommittedEntries []pb.Entry + + // Messages specifies outbound messages to be sent AFTER Entries are + // committed to stable storage. + // If it contains a MsgSnap message, the application MUST report back to raft + // when the snapshot has been received or has failed by calling ReportSnapshot. + Messages []pb.Message + + // MustSync indicates whether the HardState and Entries must be synchronously + // written to disk or if an asynchronous write is permissible. + MustSync bool +} + +func isHardStateEqual(a, b pb.HardState) bool { + return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit +} + +// IsEmptyHardState returns true if the given HardState is empty. +func IsEmptyHardState(st pb.HardState) bool { + return isHardStateEqual(st, emptyState) +} + +// IsEmptySnap returns true if the given Snapshot is empty. +func IsEmptySnap(sp pb.Snapshot) bool { + return sp.Metadata.Index == 0 +} + +func (rd Ready) containsUpdates() bool { + return rd.SoftState != nil || !IsEmptyHardState(rd.HardState) || + !IsEmptySnap(rd.Snapshot) || len(rd.Entries) > 0 || + len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 || len(rd.ReadStates) != 0 +} + +// appliedCursor extracts from the Ready the highest index the client has +// applied (once the Ready is confirmed via Advance). If no information is +// contained in the Ready, returns zero. +func (rd Ready) appliedCursor() uint64 { + if n := len(rd.CommittedEntries); n > 0 { + return rd.CommittedEntries[n-1].Index + } + if index := rd.Snapshot.Metadata.Index; index > 0 { + return index + } + return 0 +} + +// Node represents a node in a raft cluster. +type Node interface { + // Tick increments the internal logical clock for the Node by a single tick. Election + // timeouts and heartbeat timeouts are in units of ticks. + Tick() + // Campaign causes the Node to transition to candidate state and start campaigning to become leader. + Campaign(ctx context.Context) error + // Propose proposes that data be appended to the log. Note that proposals can be lost without + // notice, therefore it is user's job to ensure proposal retries. + Propose(ctx context.Context, data []byte) error + // ProposeConfChange proposes a configuration change. Like any proposal, the + // configuration change may be dropped with or without an error being + // returned. In particular, configuration changes are dropped unless the + // leader has certainty that there is no prior unapplied configuration + // change in its log. + // + // The method accepts either a pb.ConfChange (deprecated) or pb.ConfChangeV2 + // message. The latter allows arbitrary configuration changes via joint + // consensus, notably including replacing a voter. Passing a ConfChangeV2 + // message is only allowed if all Nodes participating in the cluster run a + // version of this library aware of the V2 API. See pb.ConfChangeV2 for + // usage details and semantics. + ProposeConfChange(ctx context.Context, cc pb.ConfChangeI) error + + // Step advances the state machine using the given message. ctx.Err() will be returned, if any. + Step(ctx context.Context, msg pb.Message) error + + // Ready returns a channel that returns the current point-in-time state. + // Users of the Node must call Advance after retrieving the state returned by Ready. + // + // NOTE: No committed entries from the next Ready may be applied until all committed entries + // and snapshots from the previous one have finished. + Ready() <-chan Ready + + // Advance notifies the Node that the application has saved progress up to the last Ready. + // It prepares the node to return the next available Ready. + // + // The application should generally call Advance after it applies the entries in last Ready. + // + // However, as an optimization, the application may call Advance while it is applying the + // commands. For example. when the last Ready contains a snapshot, the application might take + // a long time to apply the snapshot data. To continue receiving Ready without blocking raft + // progress, it can call Advance before finishing applying the last ready. + Advance() + // ApplyConfChange applies a config change (previously passed to + // ProposeConfChange) to the node. This must be called whenever a config + // change is observed in Ready.CommittedEntries. + // + // Returns an opaque non-nil ConfState protobuf which must be recorded in + // snapshots. + ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState + + // TransferLeadership attempts to transfer leadership to the given transferee. + TransferLeadership(ctx context.Context, lead, transferee uint64) + + // ReadIndex request a read state. The read state will be set in the ready. + // Read state has a read index. Once the application advances further than the read + // index, any linearizable read requests issued before the read request can be + // processed safely. The read state will have the same rctx attached. + ReadIndex(ctx context.Context, rctx []byte) error + + // Status returns the current status of the raft state machine. + Status() Status + // ReportUnreachable reports the given node is not reachable for the last send. + ReportUnreachable(id uint64) + // ReportSnapshot reports the status of the sent snapshot. The id is the raft ID of the follower + // who is meant to receive the snapshot, and the status is SnapshotFinish or SnapshotFailure. + // Calling ReportSnapshot with SnapshotFinish is a no-op. But, any failure in applying a + // snapshot (for e.g., while streaming it from leader to follower), should be reported to the + // leader with SnapshotFailure. When leader sends a snapshot to a follower, it pauses any raft + // log probes until the follower can apply the snapshot and advance its state. If the follower + // can't do that, for e.g., due to a crash, it could end up in a limbo, never getting any + // updates from the leader. Therefore, it is crucial that the application ensures that any + // failure in snapshot sending is caught and reported back to the leader; so it can resume raft + // log probing in the follower. + ReportSnapshot(id uint64, status SnapshotStatus) + // Stop performs any necessary termination of the Node. + Stop() +} + +type Peer struct { + ID uint64 + Context []byte +} + +// StartNode returns a new Node given configuration and a list of raft peers. +// It appends a ConfChangeAddNode entry for each given peer to the initial log. +// +// Peers must not be zero length; call RestartNode in that case. +func StartNode(c *Config, peers []Peer) Node { + if len(peers) == 0 { + panic("no peers given; use RestartNode instead") + } + rn, err := NewRawNode(c) + if err != nil { + panic(err) + } + rn.Bootstrap(peers) + + n := newNode(rn) + + go n.run() + return &n +} + +// RestartNode is similar to StartNode but does not take a list of peers. +// The current membership of the cluster will be restored from the Storage. +// If the caller has an existing state machine, pass in the last log index that +// has been applied to it; otherwise use zero. +func RestartNode(c *Config) Node { + rn, err := NewRawNode(c) + if err != nil { + panic(err) + } + n := newNode(rn) + go n.run() + return &n +} + +type msgWithResult struct { + m pb.Message + result chan error +} + +// node is the canonical implementation of the Node interface +type node struct { + propc chan msgWithResult + recvc chan pb.Message + confc chan pb.ConfChangeV2 + confstatec chan pb.ConfState + readyc chan Ready + advancec chan struct{} + tickc chan struct{} + done chan struct{} + stop chan struct{} + status chan chan Status + + rn *RawNode +} + +func newNode(rn *RawNode) node { + return node{ + propc: make(chan msgWithResult), + recvc: make(chan pb.Message), + confc: make(chan pb.ConfChangeV2), + confstatec: make(chan pb.ConfState), + readyc: make(chan Ready), + advancec: make(chan struct{}), + // make tickc a buffered chan, so raft node can buffer some ticks when the node + // is busy processing raft messages. Raft node will resume process buffered + // ticks when it becomes idle. + tickc: make(chan struct{}, 128), + done: make(chan struct{}), + stop: make(chan struct{}), + status: make(chan chan Status), + rn: rn, + } +} + +func (n *node) Stop() { + select { + case n.stop <- struct{}{}: + // Not already stopped, so trigger it + case <-n.done: + // Node has already been stopped - no need to do anything + return + } + // Block until the stop has been acknowledged by run() + <-n.done +} + +func (n *node) run() { + var propc chan msgWithResult + var readyc chan Ready + var advancec chan struct{} + var rd Ready + + r := n.rn.raft + + lead := None + + for { + if advancec != nil { + readyc = nil + } else if n.rn.HasReady() { + // Populate a Ready. Note that this Ready is not guaranteed to + // actually be handled. We will arm readyc, but there's no guarantee + // that we will actually send on it. It's possible that we will + // service another channel instead, loop around, and then populate + // the Ready again. We could instead force the previous Ready to be + // handled first, but it's generally good to emit larger Readys plus + // it simplifies testing (by emitting less frequently and more + // predictably). + rd = n.rn.readyWithoutAccept() + readyc = n.readyc + } + + if lead != r.lead { + if r.hasLeader() { + if lead == None { + r.logger.Infof("raft.node: %x elected leader %x at term %d", r.id, r.lead, r.Term) + } else { + r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, lead, r.lead, r.Term) + } + propc = n.propc + } else { + r.logger.Infof("raft.node: %x lost leader %x at term %d", r.id, lead, r.Term) + propc = nil + } + lead = r.lead + } + + select { + // TODO: maybe buffer the config propose if there exists one (the way + // described in raft dissertation) + // Currently it is dropped in Step silently. + case pm := <-propc: + m := pm.m + m.From = r.id + err := r.Step(m) + if pm.result != nil { + pm.result <- err + close(pm.result) + } + case m := <-n.recvc: + // filter out response message from unknown From. + if pr := r.prs.Progress[m.From]; pr != nil || !IsResponseMsg(m.Type) { + r.Step(m) + } + case cc := <-n.confc: + _, okBefore := r.prs.Progress[r.id] + cs := r.applyConfChange(cc) + // If the node was removed, block incoming proposals. Note that we + // only do this if the node was in the config before. Nodes may be + // a member of the group without knowing this (when they're catching + // up on the log and don't have the latest config) and we don't want + // to block the proposal channel in that case. + // + // NB: propc is reset when the leader changes, which, if we learn + // about it, sort of implies that we got readded, maybe? This isn't + // very sound and likely has bugs. + if _, okAfter := r.prs.Progress[r.id]; okBefore && !okAfter { + var found bool + for _, sl := range [][]uint64{cs.Voters, cs.VotersOutgoing} { + for _, id := range sl { + if id == r.id { + found = true + } + } + } + if !found { + propc = nil + } + } + select { + case n.confstatec <- cs: + case <-n.done: + } + case <-n.tickc: + n.rn.Tick() + case readyc <- rd: + n.rn.acceptReady(rd) + advancec = n.advancec + case <-advancec: + n.rn.Advance(rd) + rd = Ready{} + advancec = nil + case c := <-n.status: + c <- getStatus(r) + case <-n.stop: + close(n.done) + return + } + } +} + +// Tick increments the internal logical clock for this Node. Election timeouts +// and heartbeat timeouts are in units of ticks. +func (n *node) Tick() { + select { + case n.tickc <- struct{}{}: + case <-n.done: + default: + n.rn.raft.logger.Warningf("%x (leader %v) A tick missed to fire. Node blocks too long!", n.rn.raft.id, n.rn.raft.id == n.rn.raft.lead) + } +} + +func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) } + +func (n *node) Propose(ctx context.Context, data []byte) error { + return n.stepWait(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}}) +} + +func (n *node) Step(ctx context.Context, m pb.Message) error { + // ignore unexpected local messages receiving over network + if IsLocalMsg(m.Type) { + // TODO: return an error? + return nil + } + return n.step(ctx, m) +} + +func confChangeToMsg(c pb.ConfChangeI) (pb.Message, error) { + typ, data, err := pb.MarshalConfChange(c) + if err != nil { + return pb.Message{}, err + } + return pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: typ, Data: data}}}, nil +} + +func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChangeI) error { + msg, err := confChangeToMsg(cc) + if err != nil { + return err + } + return n.Step(ctx, msg) +} + +func (n *node) step(ctx context.Context, m pb.Message) error { + return n.stepWithWaitOption(ctx, m, false) +} + +func (n *node) stepWait(ctx context.Context, m pb.Message) error { + return n.stepWithWaitOption(ctx, m, true) +} + +// Step advances the state machine using msgs. The ctx.Err() will be returned, +// if any. +func (n *node) stepWithWaitOption(ctx context.Context, m pb.Message, wait bool) error { + if m.Type != pb.MsgProp { + select { + case n.recvc <- m: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-n.done: + return ErrStopped + } + } + ch := n.propc + pm := msgWithResult{m: m} + if wait { + pm.result = make(chan error, 1) + } + select { + case ch <- pm: + if !wait { + return nil + } + case <-ctx.Done(): + return ctx.Err() + case <-n.done: + return ErrStopped + } + select { + case err := <-pm.result: + if err != nil { + return err + } + case <-ctx.Done(): + return ctx.Err() + case <-n.done: + return ErrStopped + } + return nil +} + +func (n *node) Ready() <-chan Ready { return n.readyc } + +func (n *node) Advance() { + select { + case n.advancec <- struct{}{}: + case <-n.done: + } +} + +func (n *node) ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState { + var cs pb.ConfState + select { + case n.confc <- cc.AsV2(): + case <-n.done: + } + select { + case cs = <-n.confstatec: + case <-n.done: + } + return &cs +} + +func (n *node) Status() Status { + c := make(chan Status) + select { + case n.status <- c: + return <-c + case <-n.done: + return Status{} + } +} + +func (n *node) ReportUnreachable(id uint64) { + select { + case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}: + case <-n.done: + } +} + +func (n *node) ReportSnapshot(id uint64, status SnapshotStatus) { + rej := status == SnapshotFailure + + select { + case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}: + case <-n.done: + } +} + +func (n *node) TransferLeadership(ctx context.Context, lead, transferee uint64) { + select { + // manually set 'from' and 'to', so that leader can voluntarily transfers its leadership + case n.recvc <- pb.Message{Type: pb.MsgTransferLeader, From: transferee, To: lead}: + case <-n.done: + case <-ctx.Done(): + } +} + +func (n *node) ReadIndex(ctx context.Context, rctx []byte) error { + return n.step(ctx, pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}}) +} + +func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready { + rd := Ready{ + Entries: r.raftLog.unstableEntries(), + CommittedEntries: r.raftLog.nextEnts(), + Messages: r.msgs, + } + if softSt := r.softState(); !softSt.equal(prevSoftSt) { + rd.SoftState = softSt + } + if hardSt := r.hardState(); !isHardStateEqual(hardSt, prevHardSt) { + rd.HardState = hardSt + } + if r.raftLog.unstable.snapshot != nil { + rd.Snapshot = *r.raftLog.unstable.snapshot + } + if len(r.readStates) != 0 { + rd.ReadStates = r.readStates + } + rd.MustSync = MustSync(r.hardState(), prevHardSt, len(rd.Entries)) + return rd +} + +// MustSync returns true if the hard state and count of Raft entries indicate +// that a synchronous write to persistent storage is required. +func MustSync(st, prevst pb.HardState, entsnum int) bool { + // Persistent state on all servers: + // (Updated on stable storage before responding to RPCs) + // currentTerm + // votedFor + // log entries[] + return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term +} diff --git a/vendor/go.etcd.io/etcd/raft/quorum/joint.go b/vendor/go.etcd.io/etcd/raft/quorum/joint.go new file mode 100644 index 000000000..e3741e0b0 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/quorum/joint.go @@ -0,0 +1,75 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package quorum + +// JointConfig is a configuration of two groups of (possibly overlapping) +// majority configurations. Decisions require the support of both majorities. +type JointConfig [2]MajorityConfig + +func (c JointConfig) String() string { + if len(c[1]) > 0 { + return c[0].String() + "&&" + c[1].String() + } + return c[0].String() +} + +// IDs returns a newly initialized map representing the set of voters present +// in the joint configuration. +func (c JointConfig) IDs() map[uint64]struct{} { + m := map[uint64]struct{}{} + for _, cc := range c { + for id := range cc { + m[id] = struct{}{} + } + } + return m +} + +// Describe returns a (multi-line) representation of the commit indexes for the +// given lookuper. +func (c JointConfig) Describe(l AckedIndexer) string { + return MajorityConfig(c.IDs()).Describe(l) +} + +// CommittedIndex returns the largest committed index for the given joint +// quorum. An index is jointly committed if it is committed in both constituent +// majorities. +func (c JointConfig) CommittedIndex(l AckedIndexer) Index { + idx0 := c[0].CommittedIndex(l) + idx1 := c[1].CommittedIndex(l) + if idx0 < idx1 { + return idx0 + } + return idx1 +} + +// VoteResult takes a mapping of voters to yes/no (true/false) votes and returns +// a result indicating whether the vote is pending, lost, or won. A joint quorum +// requires both majority quorums to vote in favor. +func (c JointConfig) VoteResult(votes map[uint64]bool) VoteResult { + r1 := c[0].VoteResult(votes) + r2 := c[1].VoteResult(votes) + + if r1 == r2 { + // If they agree, return the agreed state. + return r1 + } + if r1 == VoteLost || r2 == VoteLost { + // If either config has lost, loss is the only possible outcome. + return VoteLost + } + // One side won, the other one is pending, so the whole outcome is. + return VotePending +} diff --git a/vendor/go.etcd.io/etcd/raft/quorum/majority.go b/vendor/go.etcd.io/etcd/raft/quorum/majority.go new file mode 100644 index 000000000..8858a36b6 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/quorum/majority.go @@ -0,0 +1,210 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package quorum + +import ( + "fmt" + "math" + "sort" + "strings" +) + +// MajorityConfig is a set of IDs that uses majority quorums to make decisions. +type MajorityConfig map[uint64]struct{} + +func (c MajorityConfig) String() string { + sl := make([]uint64, 0, len(c)) + for id := range c { + sl = append(sl, id) + } + sort.Slice(sl, func(i, j int) bool { return sl[i] < sl[j] }) + var buf strings.Builder + buf.WriteByte('(') + for i := range sl { + if i > 0 { + buf.WriteByte(' ') + } + fmt.Fprint(&buf, sl[i]) + } + buf.WriteByte(')') + return buf.String() +} + +// Describe returns a (multi-line) representation of the commit indexes for the +// given lookuper. +func (c MajorityConfig) Describe(l AckedIndexer) string { + if len(c) == 0 { + return "" + } + type tup struct { + id uint64 + idx Index + ok bool // idx found? + bar int // length of bar displayed for this tup + } + + // Below, populate .bar so that the i-th largest commit index has bar i (we + // plot this as sort of a progress bar). The actual code is a bit more + // complicated and also makes sure that equal index => equal bar. + + n := len(c) + info := make([]tup, 0, n) + for id := range c { + idx, ok := l.AckedIndex(id) + info = append(info, tup{id: id, idx: idx, ok: ok}) + } + + // Sort by index + sort.Slice(info, func(i, j int) bool { + if info[i].idx == info[j].idx { + return info[i].id < info[j].id + } + return info[i].idx < info[j].idx + }) + + // Populate .bar. + for i := range info { + if i > 0 && info[i-1].idx < info[i].idx { + info[i].bar = i + } + } + + // Sort by ID. + sort.Slice(info, func(i, j int) bool { + return info[i].id < info[j].id + }) + + var buf strings.Builder + + // Print. + fmt.Fprint(&buf, strings.Repeat(" ", n)+" idx\n") + for i := range info { + bar := info[i].bar + if !info[i].ok { + fmt.Fprint(&buf, "?"+strings.Repeat(" ", n)) + } else { + fmt.Fprint(&buf, strings.Repeat("x", bar)+">"+strings.Repeat(" ", n-bar)) + } + fmt.Fprintf(&buf, " %5d (id=%d)\n", info[i].idx, info[i].id) + } + return buf.String() +} + +// Slice returns the MajorityConfig as a sorted slice. +func (c MajorityConfig) Slice() []uint64 { + var sl []uint64 + for id := range c { + sl = append(sl, id) + } + sort.Slice(sl, func(i, j int) bool { return sl[i] < sl[j] }) + return sl +} + +func insertionSort(sl []uint64) { + a, b := 0, len(sl) + for i := a + 1; i < b; i++ { + for j := i; j > a && sl[j] < sl[j-1]; j-- { + sl[j], sl[j-1] = sl[j-1], sl[j] + } + } +} + +// CommittedIndex computes the committed index from those supplied via the +// provided AckedIndexer (for the active config). +func (c MajorityConfig) CommittedIndex(l AckedIndexer) Index { + n := len(c) + if n == 0 { + // This plays well with joint quorums which, when one half is the zero + // MajorityConfig, should behave like the other half. + return math.MaxUint64 + } + + // Use an on-stack slice to collect the committed indexes when n <= 7 + // (otherwise we alloc). The alternative is to stash a slice on + // MajorityConfig, but this impairs usability (as is, MajorityConfig is just + // a map, and that's nice). The assumption is that running with a + // replication factor of >7 is rare, and in cases in which it happens + // performance is a lesser concern (additionally the performance + // implications of an allocation here are far from drastic). + var stk [7]uint64 + var srt []uint64 + if len(stk) >= n { + srt = stk[:n] + } else { + srt = make([]uint64, n) + } + + { + // Fill the slice with the indexes observed. Any unused slots will be + // left as zero; these correspond to voters that may report in, but + // haven't yet. We fill from the right (since the zeroes will end up on + // the left after sorting below anyway). + i := n - 1 + for id := range c { + if idx, ok := l.AckedIndex(id); ok { + srt[i] = uint64(idx) + i-- + } + } + } + + // Sort by index. Use a bespoke algorithm (copied from the stdlib's sort + // package) to keep srt on the stack. + insertionSort(srt) + + // The smallest index into the array for which the value is acked by a + // quorum. In other words, from the end of the slice, move n/2+1 to the + // left (accounting for zero-indexing). + pos := n - (n/2 + 1) + return Index(srt[pos]) +} + +// VoteResult takes a mapping of voters to yes/no (true/false) votes and returns +// a result indicating whether the vote is pending (i.e. neither a quorum of +// yes/no has been reached), won (a quorum of yes has been reached), or lost (a +// quorum of no has been reached). +func (c MajorityConfig) VoteResult(votes map[uint64]bool) VoteResult { + if len(c) == 0 { + // By convention, the elections on an empty config win. This comes in + // handy with joint quorums because it'll make a half-populated joint + // quorum behave like a majority quorum. + return VoteWon + } + + ny := [2]int{} // vote counts for no and yes, respectively + + var missing int + for id := range c { + v, ok := votes[id] + if !ok { + missing++ + continue + } + if v { + ny[1]++ + } else { + ny[0]++ + } + } + + q := len(c)/2 + 1 + if ny[1] >= q { + return VoteWon + } + if ny[1]+missing >= q { + return VotePending + } + return VoteLost +} diff --git a/vendor/go.etcd.io/etcd/raft/quorum/quorum.go b/vendor/go.etcd.io/etcd/raft/quorum/quorum.go new file mode 100644 index 000000000..2899e46c9 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/quorum/quorum.go @@ -0,0 +1,58 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package quorum + +import ( + "math" + "strconv" +) + +// Index is a Raft log position. +type Index uint64 + +func (i Index) String() string { + if i == math.MaxUint64 { + return "∞" + } + return strconv.FormatUint(uint64(i), 10) +} + +// AckedIndexer allows looking up a commit index for a given ID of a voter +// from a corresponding MajorityConfig. +type AckedIndexer interface { + AckedIndex(voterID uint64) (idx Index, found bool) +} + +type mapAckIndexer map[uint64]Index + +func (m mapAckIndexer) AckedIndex(id uint64) (Index, bool) { + idx, ok := m[id] + return idx, ok +} + +// VoteResult indicates the outcome of a vote. +// +//go:generate stringer -type=VoteResult +type VoteResult uint8 + +const ( + // VotePending indicates that the decision of the vote depends on future + // votes, i.e. neither "yes" or "no" has reached quorum yet. + VotePending VoteResult = 1 + iota + // VoteLost indicates that the quorum has voted "no". + VoteLost + // VoteWon indicates that the quorum has voted "yes". + VoteWon +) diff --git a/vendor/go.etcd.io/etcd/raft/quorum/voteresult_string.go b/vendor/go.etcd.io/etcd/raft/quorum/voteresult_string.go new file mode 100644 index 000000000..9eca8fd0c --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/quorum/voteresult_string.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -type=VoteResult"; DO NOT EDIT. + +package quorum + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[VotePending-1] + _ = x[VoteLost-2] + _ = x[VoteWon-3] +} + +const _VoteResult_name = "VotePendingVoteLostVoteWon" + +var _VoteResult_index = [...]uint8{0, 11, 19, 26} + +func (i VoteResult) String() string { + i -= 1 + if i >= VoteResult(len(_VoteResult_index)-1) { + return "VoteResult(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _VoteResult_name[_VoteResult_index[i]:_VoteResult_index[i+1]] +} diff --git a/vendor/go.etcd.io/etcd/raft/raft.go b/vendor/go.etcd.io/etcd/raft/raft.go new file mode 100644 index 000000000..d3c3f4257 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/raft.go @@ -0,0 +1,1656 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "bytes" + "errors" + "fmt" + "math" + "math/rand" + "sort" + "strings" + "sync" + "time" + + "go.etcd.io/etcd/raft/confchange" + "go.etcd.io/etcd/raft/quorum" + pb "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/raft/tracker" +) + +// None is a placeholder node ID used when there is no leader. +const None uint64 = 0 +const noLimit = math.MaxUint64 + +// Possible values for StateType. +const ( + StateFollower StateType = iota + StateCandidate + StateLeader + StatePreCandidate + numStates +) + +type ReadOnlyOption int + +const ( + // ReadOnlySafe guarantees the linearizability of the read only request by + // communicating with the quorum. It is the default and suggested option. + ReadOnlySafe ReadOnlyOption = iota + // ReadOnlyLeaseBased ensures linearizability of the read only request by + // relying on the leader lease. It can be affected by clock drift. + // If the clock drift is unbounded, leader might keep the lease longer than it + // should (clock can move backward/pause without any bound). ReadIndex is not safe + // in that case. + ReadOnlyLeaseBased +) + +// Possible values for CampaignType +const ( + // campaignPreElection represents the first phase of a normal election when + // Config.PreVote is true. + campaignPreElection CampaignType = "CampaignPreElection" + // campaignElection represents a normal (time-based) election (the second phase + // of the election when Config.PreVote is true). + campaignElection CampaignType = "CampaignElection" + // campaignTransfer represents the type of leader transfer + campaignTransfer CampaignType = "CampaignTransfer" +) + +// ErrProposalDropped is returned when the proposal is ignored by some cases, +// so that the proposer can be notified and fail fast. +var ErrProposalDropped = errors.New("raft proposal dropped") + +// lockedRand is a small wrapper around rand.Rand to provide +// synchronization among multiple raft groups. Only the methods needed +// by the code are exposed (e.g. Intn). +type lockedRand struct { + mu sync.Mutex + rand *rand.Rand +} + +func (r *lockedRand) Intn(n int) int { + r.mu.Lock() + v := r.rand.Intn(n) + r.mu.Unlock() + return v +} + +var globalRand = &lockedRand{ + rand: rand.New(rand.NewSource(time.Now().UnixNano())), +} + +// CampaignType represents the type of campaigning +// the reason we use the type of string instead of uint64 +// is because it's simpler to compare and fill in raft entries +type CampaignType string + +// StateType represents the role of a node in a cluster. +type StateType uint64 + +var stmap = [...]string{ + "StateFollower", + "StateCandidate", + "StateLeader", + "StatePreCandidate", +} + +func (st StateType) String() string { + return stmap[uint64(st)] +} + +// Config contains the parameters to start a raft. +type Config struct { + // ID is the identity of the local raft. ID cannot be 0. + ID uint64 + + // peers contains the IDs of all nodes (including self) in the raft cluster. It + // should only be set when starting a new raft cluster. Restarting raft from + // previous configuration will panic if peers is set. peer is private and only + // used for testing right now. + peers []uint64 + + // learners contains the IDs of all learner nodes (including self if the + // local node is a learner) in the raft cluster. learners only receives + // entries from the leader node. It does not vote or promote itself. + learners []uint64 + + // ElectionTick is the number of Node.Tick invocations that must pass between + // elections. That is, if a follower does not receive any message from the + // leader of current term before ElectionTick has elapsed, it will become + // candidate and start an election. ElectionTick must be greater than + // HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid + // unnecessary leader switching. + ElectionTick int + // HeartbeatTick is the number of Node.Tick invocations that must pass between + // heartbeats. That is, a leader sends heartbeat messages to maintain its + // leadership every HeartbeatTick ticks. + HeartbeatTick int + + // Storage is the storage for raft. raft generates entries and states to be + // stored in storage. raft reads the persisted entries and states out of + // Storage when it needs. raft reads out the previous state and configuration + // out of storage when restarting. + Storage Storage + // Applied is the last applied index. It should only be set when restarting + // raft. raft will not return entries to the application smaller or equal to + // Applied. If Applied is unset when restarting, raft might return previous + // applied entries. This is a very application dependent configuration. + Applied uint64 + + // MaxSizePerMsg limits the max byte size of each append message. Smaller + // value lowers the raft recovery cost(initial probing and message lost + // during normal operation). On the other side, it might affect the + // throughput during normal replication. Note: math.MaxUint64 for unlimited, + // 0 for at most one entry per message. + MaxSizePerMsg uint64 + // MaxCommittedSizePerReady limits the size of the committed entries which + // can be applied. + MaxCommittedSizePerReady uint64 + // MaxUncommittedEntriesSize limits the aggregate byte size of the + // uncommitted entries that may be appended to a leader's log. Once this + // limit is exceeded, proposals will begin to return ErrProposalDropped + // errors. Note: 0 for no limit. + MaxUncommittedEntriesSize uint64 + // MaxInflightMsgs limits the max number of in-flight append messages during + // optimistic replication phase. The application transportation layer usually + // has its own sending buffer over TCP/UDP. Setting MaxInflightMsgs to avoid + // overflowing that sending buffer. TODO (xiangli): feedback to application to + // limit the proposal rate? + MaxInflightMsgs int + + // CheckQuorum specifies if the leader should check quorum activity. Leader + // steps down when quorum is not active for an electionTimeout. + CheckQuorum bool + + // PreVote enables the Pre-Vote algorithm described in raft thesis section + // 9.6. This prevents disruption when a node that has been partitioned away + // rejoins the cluster. + PreVote bool + + // ReadOnlyOption specifies how the read only request is processed. + // + // ReadOnlySafe guarantees the linearizability of the read only request by + // communicating with the quorum. It is the default and suggested option. + // + // ReadOnlyLeaseBased ensures linearizability of the read only request by + // relying on the leader lease. It can be affected by clock drift. + // If the clock drift is unbounded, leader might keep the lease longer than it + // should (clock can move backward/pause without any bound). ReadIndex is not safe + // in that case. + // CheckQuorum MUST be enabled if ReadOnlyOption is ReadOnlyLeaseBased. + ReadOnlyOption ReadOnlyOption + + // Logger is the logger used for raft log. For multinode which can host + // multiple raft group, each raft group can have its own logger + Logger Logger + + // DisableProposalForwarding set to true means that followers will drop + // proposals, rather than forwarding them to the leader. One use case for + // this feature would be in a situation where the Raft leader is used to + // compute the data of a proposal, for example, adding a timestamp from a + // hybrid logical clock to data in a monotonically increasing way. Forwarding + // should be disabled to prevent a follower with an inaccurate hybrid + // logical clock from assigning the timestamp and then forwarding the data + // to the leader. + DisableProposalForwarding bool +} + +func (c *Config) validate() error { + if c.ID == None { + return errors.New("cannot use none as id") + } + + if c.HeartbeatTick <= 0 { + return errors.New("heartbeat tick must be greater than 0") + } + + if c.ElectionTick <= c.HeartbeatTick { + return errors.New("election tick must be greater than heartbeat tick") + } + + if c.Storage == nil { + return errors.New("storage cannot be nil") + } + + if c.MaxUncommittedEntriesSize == 0 { + c.MaxUncommittedEntriesSize = noLimit + } + + // default MaxCommittedSizePerReady to MaxSizePerMsg because they were + // previously the same parameter. + if c.MaxCommittedSizePerReady == 0 { + c.MaxCommittedSizePerReady = c.MaxSizePerMsg + } + + if c.MaxInflightMsgs <= 0 { + return errors.New("max inflight messages must be greater than 0") + } + + if c.Logger == nil { + c.Logger = raftLogger + } + + if c.ReadOnlyOption == ReadOnlyLeaseBased && !c.CheckQuorum { + return errors.New("CheckQuorum must be enabled when ReadOnlyOption is ReadOnlyLeaseBased") + } + + return nil +} + +type raft struct { + id uint64 + + Term uint64 + Vote uint64 + + readStates []ReadState + + // the log + raftLog *raftLog + + maxMsgSize uint64 + maxUncommittedSize uint64 + // TODO(tbg): rename to trk. + prs tracker.ProgressTracker + + state StateType + + // isLearner is true if the local raft node is a learner. + isLearner bool + + msgs []pb.Message + + // the leader id + lead uint64 + // leadTransferee is id of the leader transfer target when its value is not zero. + // Follow the procedure defined in raft thesis 3.10. + leadTransferee uint64 + // Only one conf change may be pending (in the log, but not yet + // applied) at a time. This is enforced via pendingConfIndex, which + // is set to a value >= the log index of the latest pending + // configuration change (if any). Config changes are only allowed to + // be proposed if the leader's applied index is greater than this + // value. + pendingConfIndex uint64 + // an estimate of the size of the uncommitted tail of the Raft log. Used to + // prevent unbounded log growth. Only maintained by the leader. Reset on + // term changes. + uncommittedSize uint64 + + readOnly *readOnly + + // number of ticks since it reached last electionTimeout when it is leader + // or candidate. + // number of ticks since it reached last electionTimeout or received a + // valid message from current leader when it is a follower. + electionElapsed int + + // number of ticks since it reached last heartbeatTimeout. + // only leader keeps heartbeatElapsed. + heartbeatElapsed int + + checkQuorum bool + preVote bool + + heartbeatTimeout int + electionTimeout int + // randomizedElectionTimeout is a random number between + // [electiontimeout, 2 * electiontimeout - 1]. It gets reset + // when raft changes its state to follower or candidate. + randomizedElectionTimeout int + disableProposalForwarding bool + + tick func() + step stepFunc + + logger Logger +} + +func newRaft(c *Config) *raft { + if err := c.validate(); err != nil { + panic(err.Error()) + } + raftlog := newLogWithSize(c.Storage, c.Logger, c.MaxCommittedSizePerReady) + hs, cs, err := c.Storage.InitialState() + if err != nil { + panic(err) // TODO(bdarnell) + } + + if len(c.peers) > 0 || len(c.learners) > 0 { + if len(cs.Voters) > 0 || len(cs.Learners) > 0 { + // TODO(bdarnell): the peers argument is always nil except in + // tests; the argument should be removed and these tests should be + // updated to specify their nodes through a snapshot. + panic("cannot specify both newRaft(peers, learners) and ConfState.(Voters, Learners)") + } + cs.Voters = c.peers + cs.Learners = c.learners + } + + r := &raft{ + id: c.ID, + lead: None, + isLearner: false, + raftLog: raftlog, + maxMsgSize: c.MaxSizePerMsg, + maxUncommittedSize: c.MaxUncommittedEntriesSize, + prs: tracker.MakeProgressTracker(c.MaxInflightMsgs), + electionTimeout: c.ElectionTick, + heartbeatTimeout: c.HeartbeatTick, + logger: c.Logger, + checkQuorum: c.CheckQuorum, + preVote: c.PreVote, + readOnly: newReadOnly(c.ReadOnlyOption), + disableProposalForwarding: c.DisableProposalForwarding, + } + + cfg, prs, err := confchange.Restore(confchange.Changer{ + Tracker: r.prs, + LastIndex: raftlog.lastIndex(), + }, cs) + if err != nil { + panic(err) + } + assertConfStatesEquivalent(r.logger, cs, r.switchToConfig(cfg, prs)) + + if !IsEmptyHardState(hs) { + r.loadState(hs) + } + if c.Applied > 0 { + raftlog.appliedTo(c.Applied) + } + r.becomeFollower(r.Term, None) + + var nodesStrs []string + for _, n := range r.prs.VoterNodes() { + nodesStrs = append(nodesStrs, fmt.Sprintf("%x", n)) + } + + r.logger.Infof("newRaft %x [peers: [%s], term: %d, commit: %d, applied: %d, lastindex: %d, lastterm: %d]", + r.id, strings.Join(nodesStrs, ","), r.Term, r.raftLog.committed, r.raftLog.applied, r.raftLog.lastIndex(), r.raftLog.lastTerm()) + return r +} + +func (r *raft) hasLeader() bool { return r.lead != None } + +func (r *raft) softState() *SoftState { return &SoftState{Lead: r.lead, RaftState: r.state} } + +func (r *raft) hardState() pb.HardState { + return pb.HardState{ + Term: r.Term, + Vote: r.Vote, + Commit: r.raftLog.committed, + } +} + +// send persists state to stable storage and then sends to its mailbox. +func (r *raft) send(m pb.Message) { + m.From = r.id + if m.Type == pb.MsgVote || m.Type == pb.MsgVoteResp || m.Type == pb.MsgPreVote || m.Type == pb.MsgPreVoteResp { + if m.Term == 0 { + // All {pre-,}campaign messages need to have the term set when + // sending. + // - MsgVote: m.Term is the term the node is campaigning for, + // non-zero as we increment the term when campaigning. + // - MsgVoteResp: m.Term is the new r.Term if the MsgVote was + // granted, non-zero for the same reason MsgVote is + // - MsgPreVote: m.Term is the term the node will campaign, + // non-zero as we use m.Term to indicate the next term we'll be + // campaigning for + // - MsgPreVoteResp: m.Term is the term received in the original + // MsgPreVote if the pre-vote was granted, non-zero for the + // same reasons MsgPreVote is + panic(fmt.Sprintf("term should be set when sending %s", m.Type)) + } + } else { + if m.Term != 0 { + panic(fmt.Sprintf("term should not be set when sending %s (was %d)", m.Type, m.Term)) + } + // do not attach term to MsgProp, MsgReadIndex + // proposals are a way to forward to the leader and + // should be treated as local message. + // MsgReadIndex is also forwarded to leader. + if m.Type != pb.MsgProp && m.Type != pb.MsgReadIndex { + m.Term = r.Term + } + } + r.msgs = append(r.msgs, m) +} + +// sendAppend sends an append RPC with new entries (if any) and the +// current commit index to the given peer. +func (r *raft) sendAppend(to uint64) { + r.maybeSendAppend(to, true) +} + +// maybeSendAppend sends an append RPC with new entries to the given peer, +// if necessary. Returns true if a message was sent. The sendIfEmpty +// argument controls whether messages with no entries will be sent +// ("empty" messages are useful to convey updated Commit indexes, but +// are undesirable when we're sending multiple messages in a batch). +func (r *raft) maybeSendAppend(to uint64, sendIfEmpty bool) bool { + pr := r.prs.Progress[to] + if pr.IsPaused() { + return false + } + m := pb.Message{} + m.To = to + + term, errt := r.raftLog.term(pr.Next - 1) + ents, erre := r.raftLog.entries(pr.Next, r.maxMsgSize) + if len(ents) == 0 && !sendIfEmpty { + return false + } + + if errt != nil || erre != nil { // send snapshot if we failed to get term or entries + if !pr.RecentActive { + r.logger.Debugf("ignore sending snapshot to %x since it is not recently active", to) + return false + } + + m.Type = pb.MsgSnap + snapshot, err := r.raftLog.snapshot() + if err != nil { + if err == ErrSnapshotTemporarilyUnavailable { + r.logger.Debugf("%x failed to send snapshot to %x because snapshot is temporarily unavailable", r.id, to) + return false + } + panic(err) // TODO(bdarnell) + } + if IsEmptySnap(snapshot) { + panic("need non-empty snapshot") + } + m.Snapshot = snapshot + sindex, sterm := snapshot.Metadata.Index, snapshot.Metadata.Term + r.logger.Debugf("%x [firstindex: %d, commit: %d] sent snapshot[index: %d, term: %d] to %x [%s]", + r.id, r.raftLog.firstIndex(), r.raftLog.committed, sindex, sterm, to, pr) + pr.BecomeSnapshot(sindex) + r.logger.Debugf("%x paused sending replication messages to %x [%s]", r.id, to, pr) + } else { + m.Type = pb.MsgApp + m.Index = pr.Next - 1 + m.LogTerm = term + m.Entries = ents + m.Commit = r.raftLog.committed + if n := len(m.Entries); n != 0 { + switch pr.State { + // optimistically increase the next when in StateReplicate + case tracker.StateReplicate: + last := m.Entries[n-1].Index + pr.OptimisticUpdate(last) + pr.Inflights.Add(last) + case tracker.StateProbe: + pr.ProbeSent = true + default: + r.logger.Panicf("%x is sending append in unhandled state %s", r.id, pr.State) + } + } + } + r.send(m) + return true +} + +// sendHeartbeat sends a heartbeat RPC to the given peer. +func (r *raft) sendHeartbeat(to uint64, ctx []byte) { + // Attach the commit as min(to.matched, r.committed). + // When the leader sends out heartbeat message, + // the receiver(follower) might not be matched with the leader + // or it might not have all the committed entries. + // The leader MUST NOT forward the follower's commit to + // an unmatched index. + commit := min(r.prs.Progress[to].Match, r.raftLog.committed) + m := pb.Message{ + To: to, + Type: pb.MsgHeartbeat, + Commit: commit, + Context: ctx, + } + + r.send(m) +} + +// bcastAppend sends RPC, with entries to all peers that are not up-to-date +// according to the progress recorded in r.prs. +func (r *raft) bcastAppend() { + r.prs.Visit(func(id uint64, _ *tracker.Progress) { + if id == r.id { + return + } + r.sendAppend(id) + }) +} + +// bcastHeartbeat sends RPC, without entries to all the peers. +func (r *raft) bcastHeartbeat() { + lastCtx := r.readOnly.lastPendingRequestCtx() + if len(lastCtx) == 0 { + r.bcastHeartbeatWithCtx(nil) + } else { + r.bcastHeartbeatWithCtx([]byte(lastCtx)) + } +} + +func (r *raft) bcastHeartbeatWithCtx(ctx []byte) { + r.prs.Visit(func(id uint64, _ *tracker.Progress) { + if id == r.id { + return + } + r.sendHeartbeat(id, ctx) + }) +} + +func (r *raft) advance(rd Ready) { + // If entries were applied (or a snapshot), update our cursor for + // the next Ready. Note that if the current HardState contains a + // new Commit index, this does not mean that we're also applying + // all of the new entries due to commit pagination by size. + if index := rd.appliedCursor(); index > 0 { + r.raftLog.appliedTo(index) + if r.prs.Config.AutoLeave && index >= r.pendingConfIndex && r.state == StateLeader { + // If the current (and most recent, at least for this leader's term) + // configuration should be auto-left, initiate that now. + ccdata, err := (&pb.ConfChangeV2{}).Marshal() + if err != nil { + panic(err) + } + ent := pb.Entry{ + Type: pb.EntryConfChangeV2, + Data: ccdata, + } + if !r.appendEntry(ent) { + // If we could not append the entry, bump the pending conf index + // so that we'll try again later. + // + // TODO(tbg): test this case. + r.pendingConfIndex = r.raftLog.lastIndex() + } else { + r.logger.Infof("initiating automatic transition out of joint configuration %s", r.prs.Config) + } + } + } + r.reduceUncommittedSize(rd.CommittedEntries) + + if len(rd.Entries) > 0 { + e := rd.Entries[len(rd.Entries)-1] + r.raftLog.stableTo(e.Index, e.Term) + } + if !IsEmptySnap(rd.Snapshot) { + r.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index) + } +} + +// maybeCommit attempts to advance the commit index. Returns true if +// the commit index changed (in which case the caller should call +// r.bcastAppend). +func (r *raft) maybeCommit() bool { + mci := r.prs.Committed() + return r.raftLog.maybeCommit(mci, r.Term) +} + +func (r *raft) reset(term uint64) { + if r.Term != term { + r.Term = term + r.Vote = None + } + r.lead = None + + r.electionElapsed = 0 + r.heartbeatElapsed = 0 + r.resetRandomizedElectionTimeout() + + r.abortLeaderTransfer() + + r.prs.ResetVotes() + r.prs.Visit(func(id uint64, pr *tracker.Progress) { + *pr = tracker.Progress{ + Match: 0, + Next: r.raftLog.lastIndex() + 1, + Inflights: tracker.NewInflights(r.prs.MaxInflight), + IsLearner: pr.IsLearner, + } + if id == r.id { + pr.Match = r.raftLog.lastIndex() + } + }) + + r.pendingConfIndex = 0 + r.uncommittedSize = 0 + r.readOnly = newReadOnly(r.readOnly.option) +} + +func (r *raft) appendEntry(es ...pb.Entry) (accepted bool) { + li := r.raftLog.lastIndex() + for i := range es { + es[i].Term = r.Term + es[i].Index = li + 1 + uint64(i) + } + // Track the size of this uncommitted proposal. + if !r.increaseUncommittedSize(es) { + r.logger.Debugf( + "%x appending new entries to log would exceed uncommitted entry size limit; dropping proposal", + r.id, + ) + // Drop the proposal. + return false + } + // use latest "last" index after truncate/append + li = r.raftLog.append(es...) + r.prs.Progress[r.id].MaybeUpdate(li) + // Regardless of maybeCommit's return, our caller will call bcastAppend. + r.maybeCommit() + return true +} + +// tickElection is run by followers and candidates after r.electionTimeout. +func (r *raft) tickElection() { + r.electionElapsed++ + + if r.promotable() && r.pastElectionTimeout() { + r.electionElapsed = 0 + r.Step(pb.Message{From: r.id, Type: pb.MsgHup}) + } +} + +// tickHeartbeat is run by leaders to send a MsgBeat after r.heartbeatTimeout. +func (r *raft) tickHeartbeat() { + r.heartbeatElapsed++ + r.electionElapsed++ + + if r.electionElapsed >= r.electionTimeout { + r.electionElapsed = 0 + if r.checkQuorum { + r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum}) + } + // If current leader cannot transfer leadership in electionTimeout, it becomes leader again. + if r.state == StateLeader && r.leadTransferee != None { + r.abortLeaderTransfer() + } + } + + if r.state != StateLeader { + return + } + + if r.heartbeatElapsed >= r.heartbeatTimeout { + r.heartbeatElapsed = 0 + r.Step(pb.Message{From: r.id, Type: pb.MsgBeat}) + } +} + +func (r *raft) becomeFollower(term uint64, lead uint64) { + r.step = stepFollower + r.reset(term) + r.tick = r.tickElection + r.lead = lead + r.state = StateFollower + r.logger.Infof("%x became follower at term %d", r.id, r.Term) +} + +func (r *raft) becomeCandidate() { + // TODO(xiangli) remove the panic when the raft implementation is stable + if r.state == StateLeader { + panic("invalid transition [leader -> candidate]") + } + r.step = stepCandidate + r.reset(r.Term + 1) + r.tick = r.tickElection + r.Vote = r.id + r.state = StateCandidate + r.logger.Infof("%x became candidate at term %d", r.id, r.Term) +} + +func (r *raft) becomePreCandidate() { + // TODO(xiangli) remove the panic when the raft implementation is stable + if r.state == StateLeader { + panic("invalid transition [leader -> pre-candidate]") + } + // Becoming a pre-candidate changes our step functions and state, + // but doesn't change anything else. In particular it does not increase + // r.Term or change r.Vote. + r.step = stepCandidate + r.prs.ResetVotes() + r.tick = r.tickElection + r.lead = None + r.state = StatePreCandidate + r.logger.Infof("%x became pre-candidate at term %d", r.id, r.Term) +} + +func (r *raft) becomeLeader() { + // TODO(xiangli) remove the panic when the raft implementation is stable + if r.state == StateFollower { + panic("invalid transition [follower -> leader]") + } + r.step = stepLeader + r.reset(r.Term) + r.tick = r.tickHeartbeat + r.lead = r.id + r.state = StateLeader + // Followers enter replicate mode when they've been successfully probed + // (perhaps after having received a snapshot as a result). The leader is + // trivially in this state. Note that r.reset() has initialized this + // progress with the last index already. + r.prs.Progress[r.id].BecomeReplicate() + + // Conservatively set the pendingConfIndex to the last index in the + // log. There may or may not be a pending config change, but it's + // safe to delay any future proposals until we commit all our + // pending log entries, and scanning the entire tail of the log + // could be expensive. + r.pendingConfIndex = r.raftLog.lastIndex() + + emptyEnt := pb.Entry{Data: nil} + if !r.appendEntry(emptyEnt) { + // This won't happen because we just called reset() above. + r.logger.Panic("empty entry was dropped") + } + // As a special case, don't count the initial empty entry towards the + // uncommitted log quota. This is because we want to preserve the + // behavior of allowing one entry larger than quota if the current + // usage is zero. + r.reduceUncommittedSize([]pb.Entry{emptyEnt}) + r.logger.Infof("%x became leader at term %d", r.id, r.Term) +} + +// campaign transitions the raft instance to candidate state. This must only be +// called after verifying that this is a legitimate transition. +func (r *raft) campaign(t CampaignType) { + if !r.promotable() { + // This path should not be hit (callers are supposed to check), but + // better safe than sorry. + r.logger.Warningf("%x is unpromotable; campaign() should have been called", r.id) + } + var term uint64 + var voteMsg pb.MessageType + if t == campaignPreElection { + r.becomePreCandidate() + voteMsg = pb.MsgPreVote + // PreVote RPCs are sent for the next term before we've incremented r.Term. + term = r.Term + 1 + } else { + r.becomeCandidate() + voteMsg = pb.MsgVote + term = r.Term + } + if _, _, res := r.poll(r.id, voteRespMsgType(voteMsg), true); res == quorum.VoteWon { + // We won the election after voting for ourselves (which must mean that + // this is a single-node cluster). Advance to the next state. + if t == campaignPreElection { + r.campaign(campaignElection) + } else { + r.becomeLeader() + } + return + } + var ids []uint64 + { + idMap := r.prs.Voters.IDs() + ids = make([]uint64, 0, len(idMap)) + for id := range idMap { + ids = append(ids, id) + } + sort.Slice(ids, func(i, j int) bool { return ids[i] < ids[j] }) + } + for _, id := range ids { + if id == r.id { + continue + } + r.logger.Infof("%x [logterm: %d, index: %d] sent %s request to %x at term %d", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), voteMsg, id, r.Term) + + var ctx []byte + if t == campaignTransfer { + ctx = []byte(t) + } + r.send(pb.Message{Term: term, To: id, Type: voteMsg, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm(), Context: ctx}) + } +} + +func (r *raft) poll(id uint64, t pb.MessageType, v bool) (granted int, rejected int, result quorum.VoteResult) { + if v { + r.logger.Infof("%x received %s from %x at term %d", r.id, t, id, r.Term) + } else { + r.logger.Infof("%x received %s rejection from %x at term %d", r.id, t, id, r.Term) + } + r.prs.RecordVote(id, v) + return r.prs.TallyVotes() +} + +func (r *raft) Step(m pb.Message) error { + // Handle the message term, which may result in our stepping down to a follower. + switch { + case m.Term == 0: + // local message + case m.Term > r.Term: + if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote { + force := bytes.Equal(m.Context, []byte(campaignTransfer)) + inLease := r.checkQuorum && r.lead != None && r.electionElapsed < r.electionTimeout + if !force && inLease { + // If a server receives a RequestVote request within the minimum election timeout + // of hearing from a current leader, it does not update its term or grant its vote + r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: lease is not expired (remaining ticks: %d)", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term, r.electionTimeout-r.electionElapsed) + return nil + } + } + switch { + case m.Type == pb.MsgPreVote: + // Never change our term in response to a PreVote + case m.Type == pb.MsgPreVoteResp && !m.Reject: + // We send pre-vote requests with a term in our future. If the + // pre-vote is granted, we will increment our term when we get a + // quorum. If it is not, the term comes from the node that + // rejected our vote so we should become a follower at the new + // term. + default: + r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]", + r.id, r.Term, m.Type, m.From, m.Term) + if m.Type == pb.MsgApp || m.Type == pb.MsgHeartbeat || m.Type == pb.MsgSnap { + r.becomeFollower(m.Term, m.From) + } else { + r.becomeFollower(m.Term, None) + } + } + + case m.Term < r.Term: + if (r.checkQuorum || r.preVote) && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) { + // We have received messages from a leader at a lower term. It is possible + // that these messages were simply delayed in the network, but this could + // also mean that this node has advanced its term number during a network + // partition, and it is now unable to either win an election or to rejoin + // the majority on the old term. If checkQuorum is false, this will be + // handled by incrementing term numbers in response to MsgVote with a + // higher term, but if checkQuorum is true we may not advance the term on + // MsgVote and must generate other messages to advance the term. The net + // result of these two features is to minimize the disruption caused by + // nodes that have been removed from the cluster's configuration: a + // removed node will send MsgVotes (or MsgPreVotes) which will be ignored, + // but it will not receive MsgApp or MsgHeartbeat, so it will not create + // disruptive term increases, by notifying leader of this node's activeness. + // The above comments also true for Pre-Vote + // + // When follower gets isolated, it soon starts an election ending + // up with a higher term than leader, although it won't receive enough + // votes to win the election. When it regains connectivity, this response + // with "pb.MsgAppResp" of higher term would force leader to step down. + // However, this disruption is inevitable to free this stuck node with + // fresh election. This can be prevented with Pre-Vote phase. + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp}) + } else if m.Type == pb.MsgPreVote { + // Before Pre-Vote enable, there may have candidate with higher term, + // but less log. After update to Pre-Vote, the cluster may deadlock if + // we drop messages with a lower term. + r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) + r.send(pb.Message{To: m.From, Term: r.Term, Type: pb.MsgPreVoteResp, Reject: true}) + } else { + // ignore other cases + r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]", + r.id, r.Term, m.Type, m.From, m.Term) + } + return nil + } + + switch m.Type { + case pb.MsgHup: + if r.state != StateLeader { + if !r.promotable() { + r.logger.Warningf("%x is unpromotable and can not campaign; ignoring MsgHup", r.id) + return nil + } + ents, err := r.raftLog.slice(r.raftLog.applied+1, r.raftLog.committed+1, noLimit) + if err != nil { + r.logger.Panicf("unexpected error getting unapplied entries (%v)", err) + } + if n := numOfPendingConf(ents); n != 0 && r.raftLog.committed > r.raftLog.applied { + r.logger.Warningf("%x cannot campaign at term %d since there are still %d pending configuration changes to apply", r.id, r.Term, n) + return nil + } + + r.logger.Infof("%x is starting a new election at term %d", r.id, r.Term) + if r.preVote { + r.campaign(campaignPreElection) + } else { + r.campaign(campaignElection) + } + } else { + r.logger.Debugf("%x ignoring MsgHup because already leader", r.id) + } + + case pb.MsgVote, pb.MsgPreVote: + // We can vote if this is a repeat of a vote we've already cast... + canVote := r.Vote == m.From || + // ...we haven't voted and we don't think there's a leader yet in this term... + (r.Vote == None && r.lead == None) || + // ...or this is a PreVote for a future term... + (m.Type == pb.MsgPreVote && m.Term > r.Term) + // ...and we believe the candidate is up to date. + if canVote && r.raftLog.isUpToDate(m.Index, m.LogTerm) { + // Note: it turns out that that learners must be allowed to cast votes. + // This seems counter- intuitive but is necessary in the situation in which + // a learner has been promoted (i.e. is now a voter) but has not learned + // about this yet. + // For example, consider a group in which id=1 is a learner and id=2 and + // id=3 are voters. A configuration change promoting 1 can be committed on + // the quorum `{2,3}` without the config change being appended to the + // learner's log. If the leader (say 2) fails, there are de facto two + // voters remaining. Only 3 can win an election (due to its log containing + // all committed entries), but to do so it will need 1 to vote. But 1 + // considers itself a learner and will continue to do so until 3 has + // stepped up as leader, replicates the conf change to 1, and 1 applies it. + // Ultimately, by receiving a request to vote, the learner realizes that + // the candidate believes it to be a voter, and that it should act + // accordingly. The candidate's config may be stale, too; but in that case + // it won't win the election, at least in the absence of the bug discussed + // in: + // https://github.com/etcd-io/etcd/issues/7625#issuecomment-488798263. + r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] cast %s for %x [logterm: %d, index: %d] at term %d", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) + // When responding to Msg{Pre,}Vote messages we include the term + // from the message, not the local term. To see why, consider the + // case where a single node was previously partitioned away and + // it's local term is now out of date. If we include the local term + // (recall that for pre-votes we don't update the local term), the + // (pre-)campaigning node on the other end will proceed to ignore + // the message (it ignores all out of date messages). + // The term in the original message and current local term are the + // same in the case of regular votes, but different for pre-votes. + r.send(pb.Message{To: m.From, Term: m.Term, Type: voteRespMsgType(m.Type)}) + if m.Type == pb.MsgVote { + // Only record real votes. + r.electionElapsed = 0 + r.Vote = m.From + } + } else { + r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) + r.send(pb.Message{To: m.From, Term: r.Term, Type: voteRespMsgType(m.Type), Reject: true}) + } + + default: + err := r.step(r, m) + if err != nil { + return err + } + } + return nil +} + +type stepFunc func(r *raft, m pb.Message) error + +func stepLeader(r *raft, m pb.Message) error { + // These message types do not require any progress for m.From. + switch m.Type { + case pb.MsgBeat: + r.bcastHeartbeat() + return nil + case pb.MsgCheckQuorum: + // The leader should always see itself as active. As a precaution, handle + // the case in which the leader isn't in the configuration any more (for + // example if it just removed itself). + // + // TODO(tbg): I added a TODO in removeNode, it doesn't seem that the + // leader steps down when removing itself. I might be missing something. + if pr := r.prs.Progress[r.id]; pr != nil { + pr.RecentActive = true + } + if !r.prs.QuorumActive() { + r.logger.Warningf("%x stepped down to follower since quorum is not active", r.id) + r.becomeFollower(r.Term, None) + } + // Mark everyone (but ourselves) as inactive in preparation for the next + // CheckQuorum. + r.prs.Visit(func(id uint64, pr *tracker.Progress) { + if id != r.id { + pr.RecentActive = false + } + }) + return nil + case pb.MsgProp: + if len(m.Entries) == 0 { + r.logger.Panicf("%x stepped empty MsgProp", r.id) + } + if r.prs.Progress[r.id] == nil { + // If we are not currently a member of the range (i.e. this node + // was removed from the configuration while serving as leader), + // drop any new proposals. + return ErrProposalDropped + } + if r.leadTransferee != None { + r.logger.Debugf("%x [term %d] transfer leadership to %x is in progress; dropping proposal", r.id, r.Term, r.leadTransferee) + return ErrProposalDropped + } + + for i := range m.Entries { + e := &m.Entries[i] + var cc pb.ConfChangeI + if e.Type == pb.EntryConfChange { + var ccc pb.ConfChange + if err := ccc.Unmarshal(e.Data); err != nil { + panic(err) + } + cc = ccc + } else if e.Type == pb.EntryConfChangeV2 { + var ccc pb.ConfChangeV2 + if err := ccc.Unmarshal(e.Data); err != nil { + panic(err) + } + cc = ccc + } + if cc != nil { + alreadyPending := r.pendingConfIndex > r.raftLog.applied + alreadyJoint := len(r.prs.Config.Voters[1]) > 0 + wantsLeaveJoint := len(cc.AsV2().Changes) == 0 + + var refused string + if alreadyPending { + refused = fmt.Sprintf("possible unapplied conf change at index %d (applied to %d)", r.pendingConfIndex, r.raftLog.applied) + } else if alreadyJoint && !wantsLeaveJoint { + refused = "must transition out of joint config first" + } else if !alreadyJoint && wantsLeaveJoint { + refused = "not in joint state; refusing empty conf change" + } + + if refused != "" { + r.logger.Infof("%x ignoring conf change %v at config %s: %s", r.id, cc, r.prs.Config, refused) + m.Entries[i] = pb.Entry{Type: pb.EntryNormal} + } else { + r.pendingConfIndex = r.raftLog.lastIndex() + uint64(i) + 1 + } + } + } + + if !r.appendEntry(m.Entries...) { + return ErrProposalDropped + } + r.bcastAppend() + return nil + case pb.MsgReadIndex: + // If more than the local vote is needed, go through a full broadcast, + // otherwise optimize. + if !r.prs.IsSingleton() { + if r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(r.raftLog.committed)) != r.Term { + // Reject read only request when this leader has not committed any log entry at its term. + return nil + } + + // thinking: use an interally defined context instead of the user given context. + // We can express this in terms of the term and index instead of a user-supplied value. + // This would allow multiple reads to piggyback on the same message. + switch r.readOnly.option { + case ReadOnlySafe: + r.readOnly.addRequest(r.raftLog.committed, m) + // The local node automatically acks the request. + r.readOnly.recvAck(r.id, m.Entries[0].Data) + r.bcastHeartbeatWithCtx(m.Entries[0].Data) + case ReadOnlyLeaseBased: + ri := r.raftLog.committed + if m.From == None || m.From == r.id { // from local member + r.readStates = append(r.readStates, ReadState{Index: ri, RequestCtx: m.Entries[0].Data}) + } else { + r.send(pb.Message{To: m.From, Type: pb.MsgReadIndexResp, Index: ri, Entries: m.Entries}) + } + } + } else { // only one voting member (the leader) in the cluster + if m.From == None || m.From == r.id { // from leader itself + r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data}) + } else { // from learner member + r.send(pb.Message{To: m.From, Type: pb.MsgReadIndexResp, Index: r.raftLog.committed, Entries: m.Entries}) + } + } + + return nil + } + + // All other message types require a progress for m.From (pr). + pr := r.prs.Progress[m.From] + if pr == nil { + r.logger.Debugf("%x no progress available for %x", r.id, m.From) + return nil + } + switch m.Type { + case pb.MsgAppResp: + pr.RecentActive = true + + if m.Reject { + r.logger.Debugf("%x received MsgAppResp(MsgApp was rejected, lastindex: %d) from %x for index %d", + r.id, m.RejectHint, m.From, m.Index) + if pr.MaybeDecrTo(m.Index, m.RejectHint) { + r.logger.Debugf("%x decreased progress of %x to [%s]", r.id, m.From, pr) + if pr.State == tracker.StateReplicate { + pr.BecomeProbe() + } + r.sendAppend(m.From) + } + } else { + oldPaused := pr.IsPaused() + if pr.MaybeUpdate(m.Index) { + switch { + case pr.State == tracker.StateProbe: + pr.BecomeReplicate() + case pr.State == tracker.StateSnapshot && pr.Match >= pr.PendingSnapshot: + // TODO(tbg): we should also enter this branch if a snapshot is + // received that is below pr.PendingSnapshot but which makes it + // possible to use the log again. + r.logger.Debugf("%x recovered from needing snapshot, resumed sending replication messages to %x [%s]", r.id, m.From, pr) + // Transition back to replicating state via probing state + // (which takes the snapshot into account). If we didn't + // move to replicating state, that would only happen with + // the next round of appends (but there may not be a next + // round for a while, exposing an inconsistent RaftStatus). + pr.BecomeProbe() + pr.BecomeReplicate() + case pr.State == tracker.StateReplicate: + pr.Inflights.FreeLE(m.Index) + } + + if r.maybeCommit() { + r.bcastAppend() + } else if oldPaused { + // If we were paused before, this node may be missing the + // latest commit index, so send it. + r.sendAppend(m.From) + } + // We've updated flow control information above, which may + // allow us to send multiple (size-limited) in-flight messages + // at once (such as when transitioning from probe to + // replicate, or when freeTo() covers multiple messages). If + // we have more entries to send, send as many messages as we + // can (without sending empty messages for the commit index) + for r.maybeSendAppend(m.From, false) { + } + // Transfer leadership is in progress. + if m.From == r.leadTransferee && pr.Match == r.raftLog.lastIndex() { + r.logger.Infof("%x sent MsgTimeoutNow to %x after received MsgAppResp", r.id, m.From) + r.sendTimeoutNow(m.From) + } + } + } + case pb.MsgHeartbeatResp: + pr.RecentActive = true + pr.ProbeSent = false + + // free one slot for the full inflights window to allow progress. + if pr.State == tracker.StateReplicate && pr.Inflights.Full() { + pr.Inflights.FreeFirstOne() + } + if pr.Match < r.raftLog.lastIndex() { + r.sendAppend(m.From) + } + + if r.readOnly.option != ReadOnlySafe || len(m.Context) == 0 { + return nil + } + + if r.prs.Voters.VoteResult(r.readOnly.recvAck(m.From, m.Context)) != quorum.VoteWon { + return nil + } + + rss := r.readOnly.advance(m) + for _, rs := range rss { + req := rs.req + if req.From == None || req.From == r.id { // from local member + r.readStates = append(r.readStates, ReadState{Index: rs.index, RequestCtx: req.Entries[0].Data}) + } else { + r.send(pb.Message{To: req.From, Type: pb.MsgReadIndexResp, Index: rs.index, Entries: req.Entries}) + } + } + case pb.MsgSnapStatus: + if pr.State != tracker.StateSnapshot { + return nil + } + // TODO(tbg): this code is very similar to the snapshot handling in + // MsgAppResp above. In fact, the code there is more correct than the + // code here and should likely be updated to match (or even better, the + // logic pulled into a newly created Progress state machine handler). + if !m.Reject { + pr.BecomeProbe() + r.logger.Debugf("%x snapshot succeeded, resumed sending replication messages to %x [%s]", r.id, m.From, pr) + } else { + // NB: the order here matters or we'll be probing erroneously from + // the snapshot index, but the snapshot never applied. + pr.PendingSnapshot = 0 + pr.BecomeProbe() + r.logger.Debugf("%x snapshot failed, resumed sending replication messages to %x [%s]", r.id, m.From, pr) + } + // If snapshot finish, wait for the MsgAppResp from the remote node before sending + // out the next MsgApp. + // If snapshot failure, wait for a heartbeat interval before next try + pr.ProbeSent = true + case pb.MsgUnreachable: + // During optimistic replication, if the remote becomes unreachable, + // there is huge probability that a MsgApp is lost. + if pr.State == tracker.StateReplicate { + pr.BecomeProbe() + } + r.logger.Debugf("%x failed to send message to %x because it is unreachable [%s]", r.id, m.From, pr) + case pb.MsgTransferLeader: + if pr.IsLearner { + r.logger.Debugf("%x is learner. Ignored transferring leadership", r.id) + return nil + } + leadTransferee := m.From + lastLeadTransferee := r.leadTransferee + if lastLeadTransferee != None { + if lastLeadTransferee == leadTransferee { + r.logger.Infof("%x [term %d] transfer leadership to %x is in progress, ignores request to same node %x", + r.id, r.Term, leadTransferee, leadTransferee) + return nil + } + r.abortLeaderTransfer() + r.logger.Infof("%x [term %d] abort previous transferring leadership to %x", r.id, r.Term, lastLeadTransferee) + } + if leadTransferee == r.id { + r.logger.Debugf("%x is already leader. Ignored transferring leadership to self", r.id) + return nil + } + // Transfer leadership to third party. + r.logger.Infof("%x [term %d] starts to transfer leadership to %x", r.id, r.Term, leadTransferee) + // Transfer leadership should be finished in one electionTimeout, so reset r.electionElapsed. + r.electionElapsed = 0 + r.leadTransferee = leadTransferee + if pr.Match == r.raftLog.lastIndex() { + r.sendTimeoutNow(leadTransferee) + r.logger.Infof("%x sends MsgTimeoutNow to %x immediately as %x already has up-to-date log", r.id, leadTransferee, leadTransferee) + } else { + r.sendAppend(leadTransferee) + } + } + return nil +} + +// stepCandidate is shared by StateCandidate and StatePreCandidate; the difference is +// whether they respond to MsgVoteResp or MsgPreVoteResp. +func stepCandidate(r *raft, m pb.Message) error { + // Only handle vote responses corresponding to our candidacy (while in + // StateCandidate, we may get stale MsgPreVoteResp messages in this term from + // our pre-candidate state). + var myVoteRespType pb.MessageType + if r.state == StatePreCandidate { + myVoteRespType = pb.MsgPreVoteResp + } else { + myVoteRespType = pb.MsgVoteResp + } + switch m.Type { + case pb.MsgProp: + r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term) + return ErrProposalDropped + case pb.MsgApp: + r.becomeFollower(m.Term, m.From) // always m.Term == r.Term + r.handleAppendEntries(m) + case pb.MsgHeartbeat: + r.becomeFollower(m.Term, m.From) // always m.Term == r.Term + r.handleHeartbeat(m) + case pb.MsgSnap: + r.becomeFollower(m.Term, m.From) // always m.Term == r.Term + r.handleSnapshot(m) + case myVoteRespType: + gr, rj, res := r.poll(m.From, m.Type, !m.Reject) + r.logger.Infof("%x has received %d %s votes and %d vote rejections", r.id, gr, m.Type, rj) + switch res { + case quorum.VoteWon: + if r.state == StatePreCandidate { + r.campaign(campaignElection) + } else { + r.becomeLeader() + r.bcastAppend() + } + case quorum.VoteLost: + // pb.MsgPreVoteResp contains future term of pre-candidate + // m.Term > r.Term; reuse r.Term + r.becomeFollower(r.Term, None) + } + case pb.MsgTimeoutNow: + r.logger.Debugf("%x [term %d state %v] ignored MsgTimeoutNow from %x", r.id, r.Term, r.state, m.From) + } + return nil +} + +func stepFollower(r *raft, m pb.Message) error { + switch m.Type { + case pb.MsgProp: + if r.lead == None { + r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term) + return ErrProposalDropped + } else if r.disableProposalForwarding { + r.logger.Infof("%x not forwarding to leader %x at term %d; dropping proposal", r.id, r.lead, r.Term) + return ErrProposalDropped + } + m.To = r.lead + r.send(m) + case pb.MsgApp: + r.electionElapsed = 0 + r.lead = m.From + r.handleAppendEntries(m) + case pb.MsgHeartbeat: + r.electionElapsed = 0 + r.lead = m.From + r.handleHeartbeat(m) + case pb.MsgSnap: + r.electionElapsed = 0 + r.lead = m.From + r.handleSnapshot(m) + case pb.MsgTransferLeader: + if r.lead == None { + r.logger.Infof("%x no leader at term %d; dropping leader transfer msg", r.id, r.Term) + return nil + } + m.To = r.lead + r.send(m) + case pb.MsgTimeoutNow: + if r.promotable() { + r.logger.Infof("%x [term %d] received MsgTimeoutNow from %x and starts an election to get leadership.", r.id, r.Term, m.From) + // Leadership transfers never use pre-vote even if r.preVote is true; we + // know we are not recovering from a partition so there is no need for the + // extra round trip. + r.campaign(campaignTransfer) + } else { + r.logger.Infof("%x received MsgTimeoutNow from %x but is not promotable", r.id, m.From) + } + case pb.MsgReadIndex: + if r.lead == None { + r.logger.Infof("%x no leader at term %d; dropping index reading msg", r.id, r.Term) + return nil + } + m.To = r.lead + r.send(m) + case pb.MsgReadIndexResp: + if len(m.Entries) != 1 { + r.logger.Errorf("%x invalid format of MsgReadIndexResp from %x, entries count: %d", r.id, m.From, len(m.Entries)) + return nil + } + r.readStates = append(r.readStates, ReadState{Index: m.Index, RequestCtx: m.Entries[0].Data}) + } + return nil +} + +func (r *raft) handleAppendEntries(m pb.Message) { + if m.Index < r.raftLog.committed { + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed}) + return + } + + if mlastIndex, ok := r.raftLog.maybeAppend(m.Index, m.LogTerm, m.Commit, m.Entries...); ok { + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: mlastIndex}) + } else { + r.logger.Debugf("%x [logterm: %d, index: %d] rejected MsgApp [logterm: %d, index: %d] from %x", + r.id, r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(m.Index)), m.Index, m.LogTerm, m.Index, m.From) + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: m.Index, Reject: true, RejectHint: r.raftLog.lastIndex()}) + } +} + +func (r *raft) handleHeartbeat(m pb.Message) { + r.raftLog.commitTo(m.Commit) + r.send(pb.Message{To: m.From, Type: pb.MsgHeartbeatResp, Context: m.Context}) +} + +func (r *raft) handleSnapshot(m pb.Message) { + sindex, sterm := m.Snapshot.Metadata.Index, m.Snapshot.Metadata.Term + if r.restore(m.Snapshot) { + r.logger.Infof("%x [commit: %d] restored snapshot [index: %d, term: %d]", + r.id, r.raftLog.committed, sindex, sterm) + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.lastIndex()}) + } else { + r.logger.Infof("%x [commit: %d] ignored snapshot [index: %d, term: %d]", + r.id, r.raftLog.committed, sindex, sterm) + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed}) + } +} + +// restore recovers the state machine from a snapshot. It restores the log and the +// configuration of state machine. If this method returns false, the snapshot was +// ignored, either because it was obsolete or because of an error. +func (r *raft) restore(s pb.Snapshot) bool { + if s.Metadata.Index <= r.raftLog.committed { + return false + } + if r.state != StateFollower { + // This is defense-in-depth: if the leader somehow ended up applying a + // snapshot, it could move into a new term without moving into a + // follower state. This should never fire, but if it did, we'd have + // prevented damage by returning early, so log only a loud warning. + // + // At the time of writing, the instance is guaranteed to be in follower + // state when this method is called. + r.logger.Warningf("%x attempted to restore snapshot as leader; should never happen", r.id) + r.becomeFollower(r.Term+1, None) + return false + } + + // More defense-in-depth: throw away snapshot if recipient is not in the + // config. This shouuldn't ever happen (at the time of writing) but lots of + // code here and there assumes that r.id is in the progress tracker. + found := false + cs := s.Metadata.ConfState + for _, set := range [][]uint64{ + cs.Voters, + cs.Learners, + } { + for _, id := range set { + if id == r.id { + found = true + break + } + } + } + if !found { + r.logger.Warningf( + "%x attempted to restore snapshot but it is not in the ConfState %v; should never happen", + r.id, cs, + ) + return false + } + + // Now go ahead and actually restore. + + if r.raftLog.matchTerm(s.Metadata.Index, s.Metadata.Term) { + r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] fast-forwarded commit to snapshot [index: %d, term: %d]", + r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term) + r.raftLog.commitTo(s.Metadata.Index) + return false + } + + r.raftLog.restore(s) + + // Reset the configuration and add the (potentially updated) peers in anew. + r.prs = tracker.MakeProgressTracker(r.prs.MaxInflight) + cfg, prs, err := confchange.Restore(confchange.Changer{ + Tracker: r.prs, + LastIndex: r.raftLog.lastIndex(), + }, cs) + + if err != nil { + // This should never happen. Either there's a bug in our config change + // handling or the client corrupted the conf change. + panic(fmt.Sprintf("unable to restore config %+v: %s", cs, err)) + } + + assertConfStatesEquivalent(r.logger, cs, r.switchToConfig(cfg, prs)) + + pr := r.prs.Progress[r.id] + pr.MaybeUpdate(pr.Next - 1) // TODO(tbg): this is untested and likely unneeded + + r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] restored snapshot [index: %d, term: %d]", + r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term) + return true +} + +// promotable indicates whether state machine can be promoted to leader, +// which is true when its own id is in progress list. +func (r *raft) promotable() bool { + pr := r.prs.Progress[r.id] + return pr != nil && !pr.IsLearner +} + +func (r *raft) applyConfChange(cc pb.ConfChangeV2) pb.ConfState { + cfg, prs, err := func() (tracker.Config, tracker.ProgressMap, error) { + changer := confchange.Changer{ + Tracker: r.prs, + LastIndex: r.raftLog.lastIndex(), + } + if cc.LeaveJoint() { + return changer.LeaveJoint() + } else if autoLeave, ok := cc.EnterJoint(); ok { + return changer.EnterJoint(autoLeave, cc.Changes...) + } + return changer.Simple(cc.Changes...) + }() + + if err != nil { + // TODO(tbg): return the error to the caller. + panic(err) + } + + return r.switchToConfig(cfg, prs) +} + +// switchToConfig reconfigures this node to use the provided configuration. It +// updates the in-memory state and, when necessary, carries out additional +// actions such as reacting to the removal of nodes or changed quorum +// requirements. +// +// The inputs usually result from restoring a ConfState or applying a ConfChange. +func (r *raft) switchToConfig(cfg tracker.Config, prs tracker.ProgressMap) pb.ConfState { + r.prs.Config = cfg + r.prs.Progress = prs + + r.logger.Infof("%x switched to configuration %s", r.id, r.prs.Config) + cs := r.prs.ConfState() + pr, ok := r.prs.Progress[r.id] + + // Update whether the node itself is a learner, resetting to false when the + // node is removed. + r.isLearner = ok && pr.IsLearner + + if (!ok || r.isLearner) && r.state == StateLeader { + // This node is leader and was removed or demoted. We prevent demotions + // at the time writing but hypothetically we handle them the same way as + // removing the leader: stepping down into the next Term. + // + // TODO(tbg): step down (for sanity) and ask follower with largest Match + // to TimeoutNow (to avoid interruption). This might still drop some + // proposals but it's better than nothing. + // + // TODO(tbg): test this branch. It is untested at the time of writing. + return cs + } + + // The remaining steps only make sense if this node is the leader and there + // are other nodes. + if r.state != StateLeader || len(cs.Voters) == 0 { + return cs + } + + if r.maybeCommit() { + // If the configuration change means that more entries are committed now, + // broadcast/append to everyone in the updated config. + r.bcastAppend() + } else { + // Otherwise, still probe the newly added replicas; there's no reason to + // let them wait out a heartbeat interval (or the next incoming + // proposal). + r.prs.Visit(func(id uint64, pr *tracker.Progress) { + r.maybeSendAppend(id, false /* sendIfEmpty */) + }) + } + // If the the leadTransferee was removed, abort the leadership transfer. + if _, tOK := r.prs.Progress[r.leadTransferee]; !tOK && r.leadTransferee != 0 { + r.abortLeaderTransfer() + } + + return cs +} + +func (r *raft) loadState(state pb.HardState) { + if state.Commit < r.raftLog.committed || state.Commit > r.raftLog.lastIndex() { + r.logger.Panicf("%x state.commit %d is out of range [%d, %d]", r.id, state.Commit, r.raftLog.committed, r.raftLog.lastIndex()) + } + r.raftLog.committed = state.Commit + r.Term = state.Term + r.Vote = state.Vote +} + +// pastElectionTimeout returns true iff r.electionElapsed is greater +// than or equal to the randomized election timeout in +// [electiontimeout, 2 * electiontimeout - 1]. +func (r *raft) pastElectionTimeout() bool { + return r.electionElapsed >= r.randomizedElectionTimeout +} + +func (r *raft) resetRandomizedElectionTimeout() { + r.randomizedElectionTimeout = r.electionTimeout + globalRand.Intn(r.electionTimeout) +} + +func (r *raft) sendTimeoutNow(to uint64) { + r.send(pb.Message{To: to, Type: pb.MsgTimeoutNow}) +} + +func (r *raft) abortLeaderTransfer() { + r.leadTransferee = None +} + +// increaseUncommittedSize computes the size of the proposed entries and +// determines whether they would push leader over its maxUncommittedSize limit. +// If the new entries would exceed the limit, the method returns false. If not, +// the increase in uncommitted entry size is recorded and the method returns +// true. +func (r *raft) increaseUncommittedSize(ents []pb.Entry) bool { + var s uint64 + for _, e := range ents { + s += uint64(PayloadSize(e)) + } + + if r.uncommittedSize > 0 && r.uncommittedSize+s > r.maxUncommittedSize { + // If the uncommitted tail of the Raft log is empty, allow any size + // proposal. Otherwise, limit the size of the uncommitted tail of the + // log and drop any proposal that would push the size over the limit. + return false + } + r.uncommittedSize += s + return true +} + +// reduceUncommittedSize accounts for the newly committed entries by decreasing +// the uncommitted entry size limit. +func (r *raft) reduceUncommittedSize(ents []pb.Entry) { + if r.uncommittedSize == 0 { + // Fast-path for followers, who do not track or enforce the limit. + return + } + + var s uint64 + for _, e := range ents { + s += uint64(PayloadSize(e)) + } + if s > r.uncommittedSize { + // uncommittedSize may underestimate the size of the uncommitted Raft + // log tail but will never overestimate it. Saturate at 0 instead of + // allowing overflow. + r.uncommittedSize = 0 + } else { + r.uncommittedSize -= s + } +} + +func numOfPendingConf(ents []pb.Entry) int { + n := 0 + for i := range ents { + if ents[i].Type == pb.EntryConfChange { + n++ + } + } + return n +} diff --git a/vendor/go.etcd.io/etcd/raft/raftpb/confchange.go b/vendor/go.etcd.io/etcd/raft/raftpb/confchange.go new file mode 100644 index 000000000..46a7a7021 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/raftpb/confchange.go @@ -0,0 +1,170 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raftpb + +import ( + "fmt" + "strconv" + "strings" + + "github.com/gogo/protobuf/proto" +) + +// ConfChangeI abstracts over ConfChangeV2 and (legacy) ConfChange to allow +// treating them in a unified manner. +type ConfChangeI interface { + AsV2() ConfChangeV2 + AsV1() (ConfChange, bool) +} + +// MarshalConfChange calls Marshal on the underlying ConfChange or ConfChangeV2 +// and returns the result along with the corresponding EntryType. +func MarshalConfChange(c ConfChangeI) (EntryType, []byte, error) { + var typ EntryType + var ccdata []byte + var err error + if ccv1, ok := c.AsV1(); ok { + typ = EntryConfChange + ccdata, err = ccv1.Marshal() + } else { + ccv2 := c.AsV2() + typ = EntryConfChangeV2 + ccdata, err = ccv2.Marshal() + } + return typ, ccdata, err +} + +// AsV2 returns a V2 configuration change carrying out the same operation. +func (c ConfChange) AsV2() ConfChangeV2 { + return ConfChangeV2{ + Changes: []ConfChangeSingle{{ + Type: c.Type, + NodeID: c.NodeID, + }}, + Context: c.Context, + } +} + +// AsV1 returns the ConfChange and true. +func (c ConfChange) AsV1() (ConfChange, bool) { + return c, true +} + +// AsV2 is the identity. +func (c ConfChangeV2) AsV2() ConfChangeV2 { return c } + +// AsV1 returns ConfChange{} and false. +func (c ConfChangeV2) AsV1() (ConfChange, bool) { return ConfChange{}, false } + +// EnterJoint returns two bools. The second bool is true if and only if this +// config change will use Joint Consensus, which is the case if it contains more +// than one change or if the use of Joint Consensus was requested explicitly. +// The first bool can only be true if second one is, and indicates whether the +// Joint State will be left automatically. +func (c *ConfChangeV2) EnterJoint() (autoLeave bool, ok bool) { + // NB: in theory, more config changes could qualify for the "simple" + // protocol but it depends on the config on top of which the changes apply. + // For example, adding two learners is not OK if both nodes are part of the + // base config (i.e. two voters are turned into learners in the process of + // applying the conf change). In practice, these distinctions should not + // matter, so we keep it simple and use Joint Consensus liberally. + if c.Transition != ConfChangeTransitionAuto || len(c.Changes) > 1 { + // Use Joint Consensus. + var autoLeave bool + switch c.Transition { + case ConfChangeTransitionAuto: + autoLeave = true + case ConfChangeTransitionJointImplicit: + autoLeave = true + case ConfChangeTransitionJointExplicit: + default: + panic(fmt.Sprintf("unknown transition: %+v", c)) + } + return autoLeave, true + } + return false, false +} + +// LeaveJoint is true if the configuration change leaves a joint configuration. +// This is the case if the ConfChangeV2 is zero, with the possible exception of +// the Context field. +func (c *ConfChangeV2) LeaveJoint() bool { + cpy := *c + cpy.Context = nil + return proto.Equal(&cpy, &ConfChangeV2{}) +} + +// ConfChangesFromString parses a Space-delimited sequence of operations into a +// slice of ConfChangeSingle. The supported operations are: +// - vn: make n a voter, +// - ln: make n a learner, +// - rn: remove n, and +// - un: update n. +func ConfChangesFromString(s string) ([]ConfChangeSingle, error) { + var ccs []ConfChangeSingle + toks := strings.Split(strings.TrimSpace(s), " ") + if toks[0] == "" { + toks = nil + } + for _, tok := range toks { + if len(tok) < 2 { + return nil, fmt.Errorf("unknown token %s", tok) + } + var cc ConfChangeSingle + switch tok[0] { + case 'v': + cc.Type = ConfChangeAddNode + case 'l': + cc.Type = ConfChangeAddLearnerNode + case 'r': + cc.Type = ConfChangeRemoveNode + case 'u': + cc.Type = ConfChangeUpdateNode + default: + return nil, fmt.Errorf("unknown input: %s", tok) + } + id, err := strconv.ParseUint(tok[1:], 10, 64) + if err != nil { + return nil, err + } + cc.NodeID = id + ccs = append(ccs, cc) + } + return ccs, nil +} + +// ConfChangesToString is the inverse to ConfChangesFromString. +func ConfChangesToString(ccs []ConfChangeSingle) string { + var buf strings.Builder + for i, cc := range ccs { + if i > 0 { + buf.WriteByte(' ') + } + switch cc.Type { + case ConfChangeAddNode: + buf.WriteByte('v') + case ConfChangeAddLearnerNode: + buf.WriteByte('l') + case ConfChangeRemoveNode: + buf.WriteByte('r') + case ConfChangeUpdateNode: + buf.WriteByte('u') + default: + buf.WriteString("unknown") + } + fmt.Fprintf(&buf, "%d", cc.NodeID) + } + return buf.String() +} diff --git a/vendor/go.etcd.io/etcd/raft/raftpb/confstate.go b/vendor/go.etcd.io/etcd/raft/raftpb/confstate.go new file mode 100644 index 000000000..4bda93214 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/raftpb/confstate.go @@ -0,0 +1,45 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raftpb + +import ( + "fmt" + "reflect" + "sort" +) + +// Equivalent returns a nil error if the inputs describe the same configuration. +// On mismatch, returns a descriptive error showing the differences. +func (cs ConfState) Equivalent(cs2 ConfState) error { + cs1 := cs + orig1, orig2 := cs1, cs2 + s := func(sl *[]uint64) { + *sl = append([]uint64(nil), *sl...) + sort.Slice(*sl, func(i, j int) bool { return (*sl)[i] < (*sl)[j] }) + } + + for _, cs := range []*ConfState{&cs1, &cs2} { + s(&cs.Voters) + s(&cs.Learners) + s(&cs.VotersOutgoing) + s(&cs.LearnersNext) + cs.XXX_unrecognized = nil + } + + if !reflect.DeepEqual(cs1, cs2) { + return fmt.Errorf("ConfStates not equivalent after sorting:\n%+#v\n%+#v\nInputs were:\n%+#v\n%+#v", cs1, cs2, orig1, orig2) + } + return nil +} diff --git a/vendor/go.etcd.io/etcd/raft/raftpb/raft.pb.go b/vendor/go.etcd.io/etcd/raft/raftpb/raft.pb.go new file mode 100644 index 000000000..fcf259c89 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/raftpb/raft.pb.go @@ -0,0 +1,2646 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: raft.proto + +/* + Package raftpb is a generated protocol buffer package. + + It is generated from these files: + raft.proto + + It has these top-level messages: + Entry + SnapshotMetadata + Snapshot + Message + HardState + ConfState + ConfChange + ConfChangeSingle + ConfChangeV2 +*/ +package raftpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type EntryType int32 + +const ( + EntryNormal EntryType = 0 + EntryConfChange EntryType = 1 + EntryConfChangeV2 EntryType = 2 +) + +var EntryType_name = map[int32]string{ + 0: "EntryNormal", + 1: "EntryConfChange", + 2: "EntryConfChangeV2", +} +var EntryType_value = map[string]int32{ + "EntryNormal": 0, + "EntryConfChange": 1, + "EntryConfChangeV2": 2, +} + +func (x EntryType) Enum() *EntryType { + p := new(EntryType) + *p = x + return p +} +func (x EntryType) String() string { + return proto.EnumName(EntryType_name, int32(x)) +} +func (x *EntryType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(EntryType_value, data, "EntryType") + if err != nil { + return err + } + *x = EntryType(value) + return nil +} +func (EntryType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } + +type MessageType int32 + +const ( + MsgHup MessageType = 0 + MsgBeat MessageType = 1 + MsgProp MessageType = 2 + MsgApp MessageType = 3 + MsgAppResp MessageType = 4 + MsgVote MessageType = 5 + MsgVoteResp MessageType = 6 + MsgSnap MessageType = 7 + MsgHeartbeat MessageType = 8 + MsgHeartbeatResp MessageType = 9 + MsgUnreachable MessageType = 10 + MsgSnapStatus MessageType = 11 + MsgCheckQuorum MessageType = 12 + MsgTransferLeader MessageType = 13 + MsgTimeoutNow MessageType = 14 + MsgReadIndex MessageType = 15 + MsgReadIndexResp MessageType = 16 + MsgPreVote MessageType = 17 + MsgPreVoteResp MessageType = 18 +) + +var MessageType_name = map[int32]string{ + 0: "MsgHup", + 1: "MsgBeat", + 2: "MsgProp", + 3: "MsgApp", + 4: "MsgAppResp", + 5: "MsgVote", + 6: "MsgVoteResp", + 7: "MsgSnap", + 8: "MsgHeartbeat", + 9: "MsgHeartbeatResp", + 10: "MsgUnreachable", + 11: "MsgSnapStatus", + 12: "MsgCheckQuorum", + 13: "MsgTransferLeader", + 14: "MsgTimeoutNow", + 15: "MsgReadIndex", + 16: "MsgReadIndexResp", + 17: "MsgPreVote", + 18: "MsgPreVoteResp", +} +var MessageType_value = map[string]int32{ + "MsgHup": 0, + "MsgBeat": 1, + "MsgProp": 2, + "MsgApp": 3, + "MsgAppResp": 4, + "MsgVote": 5, + "MsgVoteResp": 6, + "MsgSnap": 7, + "MsgHeartbeat": 8, + "MsgHeartbeatResp": 9, + "MsgUnreachable": 10, + "MsgSnapStatus": 11, + "MsgCheckQuorum": 12, + "MsgTransferLeader": 13, + "MsgTimeoutNow": 14, + "MsgReadIndex": 15, + "MsgReadIndexResp": 16, + "MsgPreVote": 17, + "MsgPreVoteResp": 18, +} + +func (x MessageType) Enum() *MessageType { + p := new(MessageType) + *p = x + return p +} +func (x MessageType) String() string { + return proto.EnumName(MessageType_name, int32(x)) +} +func (x *MessageType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MessageType_value, data, "MessageType") + if err != nil { + return err + } + *x = MessageType(value) + return nil +} +func (MessageType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} } + +// ConfChangeTransition specifies the behavior of a configuration change with +// respect to joint consensus. +type ConfChangeTransition int32 + +const ( + // Automatically use the simple protocol if possible, otherwise fall back + // to ConfChangeJointImplicit. Most applications will want to use this. + ConfChangeTransitionAuto ConfChangeTransition = 0 + // Use joint consensus unconditionally, and transition out of them + // automatically (by proposing a zero configuration change). + // + // This option is suitable for applications that want to minimize the time + // spent in the joint configuration and do not store the joint configuration + // in the state machine (outside of InitialState). + ConfChangeTransitionJointImplicit ConfChangeTransition = 1 + // Use joint consensus and remain in the joint configuration until the + // application proposes a no-op configuration change. This is suitable for + // applications that want to explicitly control the transitions, for example + // to use a custom payload (via the Context field). + ConfChangeTransitionJointExplicit ConfChangeTransition = 2 +) + +var ConfChangeTransition_name = map[int32]string{ + 0: "ConfChangeTransitionAuto", + 1: "ConfChangeTransitionJointImplicit", + 2: "ConfChangeTransitionJointExplicit", +} +var ConfChangeTransition_value = map[string]int32{ + "ConfChangeTransitionAuto": 0, + "ConfChangeTransitionJointImplicit": 1, + "ConfChangeTransitionJointExplicit": 2, +} + +func (x ConfChangeTransition) Enum() *ConfChangeTransition { + p := new(ConfChangeTransition) + *p = x + return p +} +func (x ConfChangeTransition) String() string { + return proto.EnumName(ConfChangeTransition_name, int32(x)) +} +func (x *ConfChangeTransition) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ConfChangeTransition_value, data, "ConfChangeTransition") + if err != nil { + return err + } + *x = ConfChangeTransition(value) + return nil +} +func (ConfChangeTransition) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} } + +type ConfChangeType int32 + +const ( + ConfChangeAddNode ConfChangeType = 0 + ConfChangeRemoveNode ConfChangeType = 1 + ConfChangeUpdateNode ConfChangeType = 2 + ConfChangeAddLearnerNode ConfChangeType = 3 +) + +var ConfChangeType_name = map[int32]string{ + 0: "ConfChangeAddNode", + 1: "ConfChangeRemoveNode", + 2: "ConfChangeUpdateNode", + 3: "ConfChangeAddLearnerNode", +} +var ConfChangeType_value = map[string]int32{ + "ConfChangeAddNode": 0, + "ConfChangeRemoveNode": 1, + "ConfChangeUpdateNode": 2, + "ConfChangeAddLearnerNode": 3, +} + +func (x ConfChangeType) Enum() *ConfChangeType { + p := new(ConfChangeType) + *p = x + return p +} +func (x ConfChangeType) String() string { + return proto.EnumName(ConfChangeType_name, int32(x)) +} +func (x *ConfChangeType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ConfChangeType_value, data, "ConfChangeType") + if err != nil { + return err + } + *x = ConfChangeType(value) + return nil +} +func (ConfChangeType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{3} } + +type Entry struct { + Term uint64 `protobuf:"varint,2,opt,name=Term" json:"Term"` + Index uint64 `protobuf:"varint,3,opt,name=Index" json:"Index"` + Type EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"` + Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Entry) Reset() { *m = Entry{} } +func (m *Entry) String() string { return proto.CompactTextString(m) } +func (*Entry) ProtoMessage() {} +func (*Entry) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } + +type SnapshotMetadata struct { + ConfState ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state"` + Index uint64 `protobuf:"varint,2,opt,name=index" json:"index"` + Term uint64 `protobuf:"varint,3,opt,name=term" json:"term"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SnapshotMetadata) Reset() { *m = SnapshotMetadata{} } +func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) } +func (*SnapshotMetadata) ProtoMessage() {} +func (*SnapshotMetadata) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} } + +type Snapshot struct { + Data []byte `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` + Metadata SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} } + +type Message struct { + Type MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"` + To uint64 `protobuf:"varint,2,opt,name=to" json:"to"` + From uint64 `protobuf:"varint,3,opt,name=from" json:"from"` + Term uint64 `protobuf:"varint,4,opt,name=term" json:"term"` + LogTerm uint64 `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"` + Index uint64 `protobuf:"varint,6,opt,name=index" json:"index"` + Entries []Entry `protobuf:"bytes,7,rep,name=entries" json:"entries"` + Commit uint64 `protobuf:"varint,8,opt,name=commit" json:"commit"` + Snapshot Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"` + Reject bool `protobuf:"varint,10,opt,name=reject" json:"reject"` + RejectHint uint64 `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"` + Context []byte `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{3} } + +type HardState struct { + Term uint64 `protobuf:"varint,1,opt,name=term" json:"term"` + Vote uint64 `protobuf:"varint,2,opt,name=vote" json:"vote"` + Commit uint64 `protobuf:"varint,3,opt,name=commit" json:"commit"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *HardState) Reset() { *m = HardState{} } +func (m *HardState) String() string { return proto.CompactTextString(m) } +func (*HardState) ProtoMessage() {} +func (*HardState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{4} } + +type ConfState struct { + // The voters in the incoming config. (If the configuration is not joint, + // then the outgoing config is empty). + Voters []uint64 `protobuf:"varint,1,rep,name=voters" json:"voters,omitempty"` + // The learners in the incoming config. + Learners []uint64 `protobuf:"varint,2,rep,name=learners" json:"learners,omitempty"` + // The voters in the outgoing config. + VotersOutgoing []uint64 `protobuf:"varint,3,rep,name=voters_outgoing,json=votersOutgoing" json:"voters_outgoing,omitempty"` + // The nodes that will become learners when the outgoing config is removed. + // These nodes are necessarily currently in nodes_joint (or they would have + // been added to the incoming config right away). + LearnersNext []uint64 `protobuf:"varint,4,rep,name=learners_next,json=learnersNext" json:"learners_next,omitempty"` + // If set, the config is joint and Raft will automatically transition into + // the final config (i.e. remove the outgoing config) when this is safe. + AutoLeave bool `protobuf:"varint,5,opt,name=auto_leave,json=autoLeave" json:"auto_leave"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfState) Reset() { *m = ConfState{} } +func (m *ConfState) String() string { return proto.CompactTextString(m) } +func (*ConfState) ProtoMessage() {} +func (*ConfState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{5} } + +type ConfChange struct { + Type ConfChangeType `protobuf:"varint,2,opt,name=type,enum=raftpb.ConfChangeType" json:"type"` + NodeID uint64 `protobuf:"varint,3,opt,name=node_id,json=nodeId" json:"node_id"` + Context []byte `protobuf:"bytes,4,opt,name=context" json:"context,omitempty"` + // NB: this is used only by etcd to thread through a unique identifier. + // Ideally it should really use the Context instead. No counterpart to + // this field exists in ConfChangeV2. + ID uint64 `protobuf:"varint,1,opt,name=id" json:"id"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfChange) Reset() { *m = ConfChange{} } +func (m *ConfChange) String() string { return proto.CompactTextString(m) } +func (*ConfChange) ProtoMessage() {} +func (*ConfChange) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{6} } + +// ConfChangeSingle is an individual configuration change operation. Multiple +// such operations can be carried out atomically via a ConfChangeV2. +type ConfChangeSingle struct { + Type ConfChangeType `protobuf:"varint,1,opt,name=type,enum=raftpb.ConfChangeType" json:"type"` + NodeID uint64 `protobuf:"varint,2,opt,name=node_id,json=nodeId" json:"node_id"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfChangeSingle) Reset() { *m = ConfChangeSingle{} } +func (m *ConfChangeSingle) String() string { return proto.CompactTextString(m) } +func (*ConfChangeSingle) ProtoMessage() {} +func (*ConfChangeSingle) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{7} } + +// ConfChangeV2 messages initiate configuration changes. They support both the +// simple "one at a time" membership change protocol and full Joint Consensus +// allowing for arbitrary changes in membership. +// +// The supplied context is treated as an opaque payload and can be used to +// attach an action on the state machine to the application of the config change +// proposal. Note that contrary to Joint Consensus as outlined in the Raft +// paper[1], configuration changes become active when they are *applied* to the +// state machine (not when they are appended to the log). +// +// The simple protocol can be used whenever only a single change is made. +// +// Non-simple changes require the use of Joint Consensus, for which two +// configuration changes are run. The first configuration change specifies the +// desired changes and transitions the Raft group into the joint configuration, +// in which quorum requires a majority of both the pre-changes and post-changes +// configuration. Joint Consensus avoids entering fragile intermediate +// configurations that could compromise survivability. For example, without the +// use of Joint Consensus and running across three availability zones with a +// replication factor of three, it is not possible to replace a voter without +// entering an intermediate configuration that does not survive the outage of +// one availability zone. +// +// The provided ConfChangeTransition specifies how (and whether) Joint Consensus +// is used, and assigns the task of leaving the joint configuration either to +// Raft or the application. Leaving the joint configuration is accomplished by +// proposing a ConfChangeV2 with only and optionally the Context field +// populated. +// +// For details on Raft membership changes, see: +// +// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf +type ConfChangeV2 struct { + Transition ConfChangeTransition `protobuf:"varint,1,opt,name=transition,enum=raftpb.ConfChangeTransition" json:"transition"` + Changes []ConfChangeSingle `protobuf:"bytes,2,rep,name=changes" json:"changes"` + Context []byte `protobuf:"bytes,3,opt,name=context" json:"context,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfChangeV2) Reset() { *m = ConfChangeV2{} } +func (m *ConfChangeV2) String() string { return proto.CompactTextString(m) } +func (*ConfChangeV2) ProtoMessage() {} +func (*ConfChangeV2) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{8} } + +func init() { + proto.RegisterType((*Entry)(nil), "raftpb.Entry") + proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata") + proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot") + proto.RegisterType((*Message)(nil), "raftpb.Message") + proto.RegisterType((*HardState)(nil), "raftpb.HardState") + proto.RegisterType((*ConfState)(nil), "raftpb.ConfState") + proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange") + proto.RegisterType((*ConfChangeSingle)(nil), "raftpb.ConfChangeSingle") + proto.RegisterType((*ConfChangeV2)(nil), "raftpb.ConfChangeV2") + proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value) + proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value) + proto.RegisterEnum("raftpb.ConfChangeTransition", ConfChangeTransition_name, ConfChangeTransition_value) + proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value) +} +func (m *Entry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Entry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + if m.Data != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SnapshotMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotMetadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.ConfState.Size())) + n1, err := m.ConfState.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Snapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Data != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Metadata.Size())) + n2, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.To)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.From)) + dAtA[i] = 0x20 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x28 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.LogTerm)) + dAtA[i] = 0x30 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + if len(m.Entries) > 0 { + for _, msg := range m.Entries { + dAtA[i] = 0x3a + i++ + i = encodeVarintRaft(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x40 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) + dAtA[i] = 0x4a + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Snapshot.Size())) + n3, err := m.Snapshot.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + dAtA[i] = 0x50 + i++ + if m.Reject { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x58 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.RejectHint)) + if m.Context != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) + i += copy(dAtA[i:], m.Context) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *HardState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HardState) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Vote)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfState) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Voters) > 0 { + for _, num := range m.Voters { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } + } + if len(m.Learners) > 0 { + for _, num := range m.Learners { + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } + } + if len(m.VotersOutgoing) > 0 { + for _, num := range m.VotersOutgoing { + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } + } + if len(m.LearnersNext) > 0 { + for _, num := range m.LearnersNext { + dAtA[i] = 0x20 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } + } + dAtA[i] = 0x28 + i++ + if m.AutoLeave { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfChange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfChange) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.ID)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.NodeID)) + if m.Context != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) + i += copy(dAtA[i:], m.Context) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfChangeSingle) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfChangeSingle) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.NodeID)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfChangeV2) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfChangeV2) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Transition)) + if len(m.Changes) > 0 { + for _, msg := range m.Changes { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Context != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) + i += copy(dAtA[i:], m.Context) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintRaft(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Entry) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.Term)) + n += 1 + sovRaft(uint64(m.Index)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SnapshotMetadata) Size() (n int) { + var l int + _ = l + l = m.ConfState.Size() + n += 1 + l + sovRaft(uint64(l)) + n += 1 + sovRaft(uint64(m.Index)) + n += 1 + sovRaft(uint64(m.Term)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Snapshot) Size() (n int) { + var l int + _ = l + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovRaft(uint64(l)) + } + l = m.Metadata.Size() + n += 1 + l + sovRaft(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Message) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.To)) + n += 1 + sovRaft(uint64(m.From)) + n += 1 + sovRaft(uint64(m.Term)) + n += 1 + sovRaft(uint64(m.LogTerm)) + n += 1 + sovRaft(uint64(m.Index)) + if len(m.Entries) > 0 { + for _, e := range m.Entries { + l = e.Size() + n += 1 + l + sovRaft(uint64(l)) + } + } + n += 1 + sovRaft(uint64(m.Commit)) + l = m.Snapshot.Size() + n += 1 + l + sovRaft(uint64(l)) + n += 2 + n += 1 + sovRaft(uint64(m.RejectHint)) + if m.Context != nil { + l = len(m.Context) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HardState) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Term)) + n += 1 + sovRaft(uint64(m.Vote)) + n += 1 + sovRaft(uint64(m.Commit)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConfState) Size() (n int) { + var l int + _ = l + if len(m.Voters) > 0 { + for _, e := range m.Voters { + n += 1 + sovRaft(uint64(e)) + } + } + if len(m.Learners) > 0 { + for _, e := range m.Learners { + n += 1 + sovRaft(uint64(e)) + } + } + if len(m.VotersOutgoing) > 0 { + for _, e := range m.VotersOutgoing { + n += 1 + sovRaft(uint64(e)) + } + } + if len(m.LearnersNext) > 0 { + for _, e := range m.LearnersNext { + n += 1 + sovRaft(uint64(e)) + } + } + n += 2 + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConfChange) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.ID)) + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.NodeID)) + if m.Context != nil { + l = len(m.Context) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConfChangeSingle) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.NodeID)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConfChangeV2) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Transition)) + if len(m.Changes) > 0 { + for _, e := range m.Changes { + l = e.Size() + n += 1 + l + sovRaft(uint64(l)) + } + } + if m.Context != nil { + l = len(m.Context) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovRaft(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozRaft(x uint64) (n int) { + return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Entry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Entry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (EntryType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConfState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Snapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (MessageType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + m.To = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.To |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + m.From = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.From |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LogTerm", wireType) + } + m.LogTerm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LogTerm |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entries = append(m.Entries, Entry{}) + if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + m.Commit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Commit |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Snapshot.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Reject", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Reject = bool(v != 0) + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RejectHint", wireType) + } + m.RejectHint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RejectHint |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...) + if m.Context == nil { + m.Context = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HardState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HardState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + m.Vote = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Vote |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + m.Commit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Commit |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Voters = append(m.Voters, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Voters = append(m.Voters, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Voters", wireType) + } + case 2: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Learners = append(m.Learners, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Learners = append(m.Learners, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Learners", wireType) + } + case 3: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.VotersOutgoing = append(m.VotersOutgoing, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.VotersOutgoing = append(m.VotersOutgoing, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field VotersOutgoing", wireType) + } + case 4: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LearnersNext = append(m.LearnersNext, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LearnersNext = append(m.LearnersNext, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field LearnersNext", wireType) + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoLeave", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.AutoLeave = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfChange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfChange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (ConfChangeType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + m.NodeID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NodeID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...) + if m.Context == nil { + m.Context = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfChangeSingle) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfChangeSingle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfChangeSingle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (ConfChangeType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + m.NodeID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NodeID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfChangeV2) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfChangeV2: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfChangeV2: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Transition", wireType) + } + m.Transition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Transition |= (ConfChangeTransition(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Changes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Changes = append(m.Changes, ConfChangeSingle{}) + if err := m.Changes[len(m.Changes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...) + if m.Context == nil { + m.Context = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRaft(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthRaft + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRaft(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("raft.proto", fileDescriptorRaft) } + +var fileDescriptorRaft = []byte{ + // 1009 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xcd, 0x6e, 0xe3, 0x36, + 0x17, 0xb5, 0x64, 0xc5, 0x3f, 0xd7, 0x8e, 0xc3, 0xdc, 0xc9, 0x37, 0x20, 0x82, 0xc0, 0xe3, 0xcf, + 0xd3, 0x62, 0x8c, 0x14, 0x93, 0x16, 0x5e, 0x14, 0x45, 0x77, 0xf9, 0x19, 0x20, 0x29, 0xe2, 0x74, + 0xea, 0x64, 0xb2, 0x28, 0x50, 0x04, 0x8c, 0x45, 0x2b, 0x6a, 0x2d, 0x51, 0xa0, 0xe8, 0x34, 0xd9, + 0x14, 0x45, 0x9f, 0xa2, 0x9b, 0xd9, 0xf6, 0x01, 0xfa, 0x14, 0x59, 0x0e, 0xd0, 0xfd, 0xa0, 0x93, + 0xbe, 0x48, 0x41, 0x8a, 0xb2, 0x65, 0x27, 0x98, 0x45, 0x77, 0xe4, 0x39, 0x87, 0xf7, 0x9e, 0x7b, + 0x79, 0x45, 0x01, 0x48, 0x36, 0x56, 0x3b, 0x89, 0x14, 0x4a, 0x60, 0x45, 0xaf, 0x93, 0xcb, 0xcd, + 0x8d, 0x40, 0x04, 0xc2, 0x40, 0x9f, 0xeb, 0x55, 0xc6, 0x76, 0x7f, 0x81, 0x95, 0x57, 0xb1, 0x92, + 0xb7, 0xf8, 0x19, 0x78, 0x67, 0xb7, 0x09, 0xa7, 0x4e, 0xc7, 0xe9, 0xb5, 0xfa, 0xeb, 0x3b, 0xd9, + 0xa9, 0x1d, 0x43, 0x6a, 0x62, 0xcf, 0xbb, 0x7b, 0xff, 0xac, 0x34, 0x34, 0x22, 0xa4, 0xe0, 0x9d, + 0x71, 0x19, 0x51, 0xb7, 0xe3, 0xf4, 0xbc, 0x19, 0xc3, 0x65, 0x84, 0x9b, 0xb0, 0x72, 0x14, 0xfb, + 0xfc, 0x86, 0x96, 0x0b, 0x54, 0x06, 0x21, 0x82, 0x77, 0xc0, 0x14, 0xa3, 0x5e, 0xc7, 0xe9, 0x35, + 0x87, 0x66, 0xdd, 0xfd, 0xd5, 0x01, 0x72, 0x1a, 0xb3, 0x24, 0xbd, 0x12, 0x6a, 0xc0, 0x15, 0xf3, + 0x99, 0x62, 0xf8, 0x25, 0xc0, 0x48, 0xc4, 0xe3, 0x8b, 0x54, 0x31, 0x95, 0x39, 0x6a, 0xcc, 0x1d, + 0xed, 0x8b, 0x78, 0x7c, 0xaa, 0x09, 0x1b, 0xbc, 0x3e, 0xca, 0x01, 0x9d, 0x3c, 0x34, 0xc9, 0x8b, + 0xbe, 0x32, 0x48, 0x5b, 0x56, 0xda, 0x72, 0xd1, 0x97, 0x41, 0xba, 0xdf, 0x43, 0x2d, 0x77, 0xa0, + 0x2d, 0x6a, 0x07, 0x26, 0x67, 0x73, 0x68, 0xd6, 0xf8, 0x35, 0xd4, 0x22, 0xeb, 0xcc, 0x04, 0x6e, + 0xf4, 0x69, 0xee, 0x65, 0xd9, 0xb9, 0x8d, 0x3b, 0xd3, 0x77, 0xdf, 0x96, 0xa1, 0x3a, 0xe0, 0x69, + 0xca, 0x02, 0x8e, 0x2f, 0xc1, 0x53, 0xf3, 0x0e, 0x3f, 0xc9, 0x63, 0x58, 0xba, 0xd8, 0x63, 0x2d, + 0xc3, 0x0d, 0x70, 0x95, 0x58, 0xa8, 0xc4, 0x55, 0x42, 0x97, 0x31, 0x96, 0x62, 0xa9, 0x0c, 0x8d, + 0xcc, 0x0a, 0xf4, 0x96, 0x0b, 0xc4, 0x36, 0x54, 0x27, 0x22, 0x30, 0x17, 0xb6, 0x52, 0x20, 0x73, + 0x70, 0xde, 0xb6, 0xca, 0xc3, 0xb6, 0xbd, 0x84, 0x2a, 0x8f, 0x95, 0x0c, 0x79, 0x4a, 0xab, 0x9d, + 0x72, 0xaf, 0xd1, 0x5f, 0x5d, 0x98, 0x8c, 0x3c, 0x94, 0xd5, 0xe0, 0x16, 0x54, 0x46, 0x22, 0x8a, + 0x42, 0x45, 0x6b, 0x85, 0x58, 0x16, 0xc3, 0x3e, 0xd4, 0x52, 0xdb, 0x31, 0x5a, 0x37, 0x9d, 0x24, + 0xcb, 0x9d, 0xcc, 0x3b, 0x98, 0xeb, 0x74, 0x44, 0xc9, 0x7f, 0xe4, 0x23, 0x45, 0xa1, 0xe3, 0xf4, + 0x6a, 0x79, 0xc4, 0x0c, 0xc3, 0x4f, 0x00, 0xb2, 0xd5, 0x61, 0x18, 0x2b, 0xda, 0x28, 0xe4, 0x2c, + 0xe0, 0x48, 0xa1, 0x3a, 0x12, 0xb1, 0xe2, 0x37, 0x8a, 0x36, 0xcd, 0xc5, 0xe6, 0xdb, 0xee, 0x0f, + 0x50, 0x3f, 0x64, 0xd2, 0xcf, 0xc6, 0x27, 0xef, 0xa0, 0xf3, 0xa0, 0x83, 0x14, 0xbc, 0x6b, 0xa1, + 0xf8, 0xe2, 0xbc, 0x6b, 0xa4, 0x50, 0x70, 0xf9, 0x61, 0xc1, 0xdd, 0x3f, 0x1d, 0xa8, 0xcf, 0xe6, + 0x15, 0x9f, 0x42, 0x45, 0x9f, 0x91, 0x29, 0x75, 0x3a, 0xe5, 0x9e, 0x37, 0xb4, 0x3b, 0xdc, 0x84, + 0xda, 0x84, 0x33, 0x19, 0x6b, 0xc6, 0x35, 0xcc, 0x6c, 0x8f, 0x2f, 0x60, 0x2d, 0x53, 0x5d, 0x88, + 0xa9, 0x0a, 0x44, 0x18, 0x07, 0xb4, 0x6c, 0x24, 0xad, 0x0c, 0xfe, 0xd6, 0xa2, 0xf8, 0x1c, 0x56, + 0xf3, 0x43, 0x17, 0xb1, 0xae, 0xd4, 0x33, 0xb2, 0x66, 0x0e, 0x9e, 0xf0, 0x1b, 0x85, 0xcf, 0x01, + 0xd8, 0x54, 0x89, 0x8b, 0x09, 0x67, 0xd7, 0xdc, 0x0c, 0x43, 0xde, 0xd0, 0xba, 0xc6, 0x8f, 0x35, + 0xdc, 0x7d, 0xeb, 0x00, 0x68, 0xd3, 0xfb, 0x57, 0x2c, 0x0e, 0xf4, 0x47, 0xe5, 0x86, 0xbe, 0xed, + 0x09, 0x68, 0xed, 0xfd, 0xfb, 0x67, 0xee, 0xd1, 0xc1, 0xd0, 0x0d, 0x7d, 0xfc, 0xc2, 0x8e, 0xb4, + 0x6b, 0x46, 0xfa, 0x69, 0xf1, 0x13, 0xcd, 0x4e, 0x3f, 0x98, 0xea, 0x17, 0x50, 0x8d, 0x85, 0xcf, + 0x2f, 0x42, 0xdf, 0x36, 0xac, 0x65, 0x43, 0x56, 0x4e, 0x84, 0xcf, 0x8f, 0x0e, 0x86, 0x15, 0x4d, + 0x1f, 0xf9, 0xc5, 0x3b, 0xf3, 0x16, 0xef, 0x2c, 0x02, 0x32, 0x4f, 0x70, 0x1a, 0xc6, 0xc1, 0x84, + 0xcf, 0x8c, 0x38, 0xff, 0xc5, 0x88, 0xfb, 0x31, 0x23, 0xdd, 0x3f, 0x1c, 0x68, 0xce, 0xe3, 0x9c, + 0xf7, 0x71, 0x0f, 0x40, 0x49, 0x16, 0xa7, 0xa1, 0x0a, 0x45, 0x6c, 0x33, 0x6e, 0x3d, 0x92, 0x71, + 0xa6, 0xc9, 0x27, 0x72, 0x7e, 0x0a, 0xbf, 0x82, 0xea, 0xc8, 0xa8, 0xb2, 0x1b, 0x2f, 0x3c, 0x29, + 0xcb, 0xa5, 0xe5, 0x5f, 0x98, 0x95, 0x17, 0xfb, 0x52, 0x5e, 0xe8, 0xcb, 0xf6, 0x21, 0xd4, 0x67, + 0xaf, 0x35, 0xae, 0x41, 0xc3, 0x6c, 0x4e, 0x84, 0x8c, 0xd8, 0x84, 0x94, 0xf0, 0x09, 0xac, 0x19, + 0x60, 0x1e, 0x9f, 0x38, 0xf8, 0x3f, 0x58, 0x5f, 0x02, 0xcf, 0xfb, 0xc4, 0xdd, 0xfe, 0xcb, 0x85, + 0x46, 0xe1, 0x59, 0x42, 0x80, 0xca, 0x20, 0x0d, 0x0e, 0xa7, 0x09, 0x29, 0x61, 0x03, 0xaa, 0x83, + 0x34, 0xd8, 0xe3, 0x4c, 0x11, 0xc7, 0x6e, 0x5e, 0x4b, 0x91, 0x10, 0xd7, 0xaa, 0x76, 0x93, 0x84, + 0x94, 0xb1, 0x05, 0x90, 0xad, 0x87, 0x3c, 0x4d, 0x88, 0x67, 0x85, 0xe7, 0x42, 0x71, 0xb2, 0xa2, + 0xbd, 0xd9, 0x8d, 0x61, 0x2b, 0x96, 0xd5, 0x4f, 0x00, 0xa9, 0x22, 0x81, 0xa6, 0x4e, 0xc6, 0x99, + 0x54, 0x97, 0x3a, 0x4b, 0x0d, 0x37, 0x80, 0x14, 0x11, 0x73, 0xa8, 0x8e, 0x08, 0xad, 0x41, 0x1a, + 0xbc, 0x89, 0x25, 0x67, 0xa3, 0x2b, 0x76, 0x39, 0xe1, 0x04, 0x70, 0x1d, 0x56, 0x6d, 0x20, 0xfd, + 0xc5, 0x4d, 0x53, 0xd2, 0xb0, 0xb2, 0xfd, 0x2b, 0x3e, 0xfa, 0xe9, 0xbb, 0xa9, 0x90, 0xd3, 0x88, + 0x34, 0x75, 0xd9, 0x83, 0x34, 0x30, 0x17, 0x34, 0xe6, 0xf2, 0x98, 0x33, 0x9f, 0x4b, 0xb2, 0x6a, + 0x4f, 0x9f, 0x85, 0x11, 0x17, 0x53, 0x75, 0x22, 0x7e, 0x26, 0x2d, 0x6b, 0x66, 0xc8, 0x99, 0x6f, + 0x7e, 0x61, 0x64, 0xcd, 0x9a, 0x99, 0x21, 0xc6, 0x0c, 0xb1, 0xf5, 0xbe, 0x96, 0xdc, 0x94, 0xb8, + 0x6e, 0xb3, 0xda, 0xbd, 0xd1, 0xe0, 0xf6, 0x6f, 0x0e, 0x6c, 0x3c, 0x36, 0x1e, 0xb8, 0x05, 0xf4, + 0x31, 0x7c, 0x77, 0xaa, 0x04, 0x29, 0xe1, 0xa7, 0xf0, 0xff, 0xc7, 0xd8, 0x6f, 0x44, 0x18, 0xab, + 0xa3, 0x28, 0x99, 0x84, 0xa3, 0x50, 0x5f, 0xc5, 0xc7, 0x64, 0xaf, 0x6e, 0xac, 0xcc, 0xdd, 0xbe, + 0x85, 0xd6, 0xe2, 0x47, 0xa1, 0x9b, 0x31, 0x47, 0x76, 0x7d, 0x5f, 0x8f, 0x3f, 0x29, 0x21, 0x2d, + 0x9a, 0x1d, 0xf2, 0x48, 0x5c, 0x73, 0xc3, 0x38, 0x8b, 0xcc, 0x9b, 0xc4, 0x67, 0x2a, 0x63, 0xdc, + 0xc5, 0x42, 0x76, 0x7d, 0xff, 0x38, 0x7b, 0x7b, 0x0c, 0x5b, 0xde, 0xa3, 0x77, 0x1f, 0xda, 0xa5, + 0x77, 0x1f, 0xda, 0xa5, 0xbb, 0xfb, 0xb6, 0xf3, 0xee, 0xbe, 0xed, 0xfc, 0x7d, 0xdf, 0x76, 0x7e, + 0xff, 0xa7, 0x5d, 0xfa, 0x37, 0x00, 0x00, 0xff, 0xff, 0x87, 0x11, 0x6d, 0xd6, 0xaf, 0x08, 0x00, + 0x00, +} diff --git a/vendor/go.etcd.io/etcd/raft/raftpb/raft.proto b/vendor/go.etcd.io/etcd/raft/raftpb/raft.proto new file mode 100644 index 000000000..23d62ec2f --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/raftpb/raft.proto @@ -0,0 +1,177 @@ +syntax = "proto2"; +package raftpb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; +option (gogoproto.goproto_enum_prefix_all) = false; + +enum EntryType { + EntryNormal = 0; + EntryConfChange = 1; // corresponds to pb.ConfChange + EntryConfChangeV2 = 2; // corresponds to pb.ConfChangeV2 +} + +message Entry { + optional uint64 Term = 2 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations + optional uint64 Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations + optional EntryType Type = 1 [(gogoproto.nullable) = false]; + optional bytes Data = 4; +} + +message SnapshotMetadata { + optional ConfState conf_state = 1 [(gogoproto.nullable) = false]; + optional uint64 index = 2 [(gogoproto.nullable) = false]; + optional uint64 term = 3 [(gogoproto.nullable) = false]; +} + +message Snapshot { + optional bytes data = 1; + optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false]; +} + +enum MessageType { + MsgHup = 0; + MsgBeat = 1; + MsgProp = 2; + MsgApp = 3; + MsgAppResp = 4; + MsgVote = 5; + MsgVoteResp = 6; + MsgSnap = 7; + MsgHeartbeat = 8; + MsgHeartbeatResp = 9; + MsgUnreachable = 10; + MsgSnapStatus = 11; + MsgCheckQuorum = 12; + MsgTransferLeader = 13; + MsgTimeoutNow = 14; + MsgReadIndex = 15; + MsgReadIndexResp = 16; + MsgPreVote = 17; + MsgPreVoteResp = 18; +} + +message Message { + optional MessageType type = 1 [(gogoproto.nullable) = false]; + optional uint64 to = 2 [(gogoproto.nullable) = false]; + optional uint64 from = 3 [(gogoproto.nullable) = false]; + optional uint64 term = 4 [(gogoproto.nullable) = false]; + optional uint64 logTerm = 5 [(gogoproto.nullable) = false]; + optional uint64 index = 6 [(gogoproto.nullable) = false]; + repeated Entry entries = 7 [(gogoproto.nullable) = false]; + optional uint64 commit = 8 [(gogoproto.nullable) = false]; + optional Snapshot snapshot = 9 [(gogoproto.nullable) = false]; + optional bool reject = 10 [(gogoproto.nullable) = false]; + optional uint64 rejectHint = 11 [(gogoproto.nullable) = false]; + optional bytes context = 12; +} + +message HardState { + optional uint64 term = 1 [(gogoproto.nullable) = false]; + optional uint64 vote = 2 [(gogoproto.nullable) = false]; + optional uint64 commit = 3 [(gogoproto.nullable) = false]; +} + +// ConfChangeTransition specifies the behavior of a configuration change with +// respect to joint consensus. +enum ConfChangeTransition { + // Automatically use the simple protocol if possible, otherwise fall back + // to ConfChangeJointImplicit. Most applications will want to use this. + ConfChangeTransitionAuto = 0; + // Use joint consensus unconditionally, and transition out of them + // automatically (by proposing a zero configuration change). + // + // This option is suitable for applications that want to minimize the time + // spent in the joint configuration and do not store the joint configuration + // in the state machine (outside of InitialState). + ConfChangeTransitionJointImplicit = 1; + // Use joint consensus and remain in the joint configuration until the + // application proposes a no-op configuration change. This is suitable for + // applications that want to explicitly control the transitions, for example + // to use a custom payload (via the Context field). + ConfChangeTransitionJointExplicit = 2; +} + +message ConfState { + // The voters in the incoming config. (If the configuration is not joint, + // then the outgoing config is empty). + repeated uint64 voters = 1; + // The learners in the incoming config. + repeated uint64 learners = 2; + // The voters in the outgoing config. + repeated uint64 voters_outgoing = 3; + // The nodes that will become learners when the outgoing config is removed. + // These nodes are necessarily currently in nodes_joint (or they would have + // been added to the incoming config right away). + repeated uint64 learners_next = 4; + // If set, the config is joint and Raft will automatically transition into + // the final config (i.e. remove the outgoing config) when this is safe. + optional bool auto_leave = 5 [(gogoproto.nullable) = false]; +} + +enum ConfChangeType { + ConfChangeAddNode = 0; + ConfChangeRemoveNode = 1; + ConfChangeUpdateNode = 2; + ConfChangeAddLearnerNode = 3; +} + +message ConfChange { + optional ConfChangeType type = 2 [(gogoproto.nullable) = false]; + optional uint64 node_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "NodeID" ]; + optional bytes context = 4; + + // NB: this is used only by etcd to thread through a unique identifier. + // Ideally it should really use the Context instead. No counterpart to + // this field exists in ConfChangeV2. + optional uint64 id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID" ]; +} + +// ConfChangeSingle is an individual configuration change operation. Multiple +// such operations can be carried out atomically via a ConfChangeV2. +message ConfChangeSingle { + optional ConfChangeType type = 1 [(gogoproto.nullable) = false]; + optional uint64 node_id = 2 [(gogoproto.nullable) = false, (gogoproto.customname) = "NodeID"]; +} + +// ConfChangeV2 messages initiate configuration changes. They support both the +// simple "one at a time" membership change protocol and full Joint Consensus +// allowing for arbitrary changes in membership. +// +// The supplied context is treated as an opaque payload and can be used to +// attach an action on the state machine to the application of the config change +// proposal. Note that contrary to Joint Consensus as outlined in the Raft +// paper[1], configuration changes become active when they are *applied* to the +// state machine (not when they are appended to the log). +// +// The simple protocol can be used whenever only a single change is made. +// +// Non-simple changes require the use of Joint Consensus, for which two +// configuration changes are run. The first configuration change specifies the +// desired changes and transitions the Raft group into the joint configuration, +// in which quorum requires a majority of both the pre-changes and post-changes +// configuration. Joint Consensus avoids entering fragile intermediate +// configurations that could compromise survivability. For example, without the +// use of Joint Consensus and running across three availability zones with a +// replication factor of three, it is not possible to replace a voter without +// entering an intermediate configuration that does not survive the outage of +// one availability zone. +// +// The provided ConfChangeTransition specifies how (and whether) Joint Consensus +// is used, and assigns the task of leaving the joint configuration either to +// Raft or the application. Leaving the joint configuration is accomplished by +// proposing a ConfChangeV2 with only and optionally the Context field +// populated. +// +// For details on Raft membership changes, see: +// +// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf +message ConfChangeV2 { + optional ConfChangeTransition transition = 1 [(gogoproto.nullable) = false]; + repeated ConfChangeSingle changes = 2 [(gogoproto.nullable) = false]; + optional bytes context = 3; +} diff --git a/vendor/go.etcd.io/etcd/raft/rawnode.go b/vendor/go.etcd.io/etcd/raft/rawnode.go new file mode 100644 index 000000000..90eb69493 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/rawnode.go @@ -0,0 +1,239 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "errors" + + pb "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/raft/tracker" +) + +// ErrStepLocalMsg is returned when try to step a local raft message +var ErrStepLocalMsg = errors.New("raft: cannot step raft local message") + +// ErrStepPeerNotFound is returned when try to step a response message +// but there is no peer found in raft.prs for that node. +var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found") + +// RawNode is a thread-unsafe Node. +// The methods of this struct correspond to the methods of Node and are described +// more fully there. +type RawNode struct { + raft *raft + prevSoftSt *SoftState + prevHardSt pb.HardState +} + +// NewRawNode instantiates a RawNode from the given configuration. +// +// See Bootstrap() for bootstrapping an initial state; this replaces the former +// 'peers' argument to this method (with identical behavior). However, It is +// recommended that instead of calling Bootstrap, applications bootstrap their +// state manually by setting up a Storage that has a first index > 1 and which +// stores the desired ConfState as its InitialState. +func NewRawNode(config *Config) (*RawNode, error) { + r := newRaft(config) + rn := &RawNode{ + raft: r, + } + rn.prevSoftSt = r.softState() + rn.prevHardSt = r.hardState() + return rn, nil +} + +// Tick advances the internal logical clock by a single tick. +func (rn *RawNode) Tick() { + rn.raft.tick() +} + +// TickQuiesced advances the internal logical clock by a single tick without +// performing any other state machine processing. It allows the caller to avoid +// periodic heartbeats and elections when all of the peers in a Raft group are +// known to be at the same state. Expected usage is to periodically invoke Tick +// or TickQuiesced depending on whether the group is "active" or "quiesced". +// +// WARNING: Be very careful about using this method as it subverts the Raft +// state machine. You should probably be using Tick instead. +func (rn *RawNode) TickQuiesced() { + rn.raft.electionElapsed++ +} + +// Campaign causes this RawNode to transition to candidate state. +func (rn *RawNode) Campaign() error { + return rn.raft.Step(pb.Message{ + Type: pb.MsgHup, + }) +} + +// Propose proposes data be appended to the raft log. +func (rn *RawNode) Propose(data []byte) error { + return rn.raft.Step(pb.Message{ + Type: pb.MsgProp, + From: rn.raft.id, + Entries: []pb.Entry{ + {Data: data}, + }}) +} + +// ProposeConfChange proposes a config change. See (Node).ProposeConfChange for +// details. +func (rn *RawNode) ProposeConfChange(cc pb.ConfChangeI) error { + m, err := confChangeToMsg(cc) + if err != nil { + return err + } + return rn.raft.Step(m) +} + +// ApplyConfChange applies a config change to the local node. +func (rn *RawNode) ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState { + cs := rn.raft.applyConfChange(cc.AsV2()) + return &cs +} + +// Step advances the state machine using the given message. +func (rn *RawNode) Step(m pb.Message) error { + // ignore unexpected local messages receiving over network + if IsLocalMsg(m.Type) { + return ErrStepLocalMsg + } + if pr := rn.raft.prs.Progress[m.From]; pr != nil || !IsResponseMsg(m.Type) { + return rn.raft.Step(m) + } + return ErrStepPeerNotFound +} + +// Ready returns the outstanding work that the application needs to handle. This +// includes appending and applying entries or a snapshot, updating the HardState, +// and sending messages. The returned Ready() *must* be handled and subsequently +// passed back via Advance(). +func (rn *RawNode) Ready() Ready { + rd := rn.readyWithoutAccept() + rn.acceptReady(rd) + return rd +} + +// readyWithoutAccept returns a Ready. This is a read-only operation, i.e. there +// is no obligation that the Ready must be handled. +func (rn *RawNode) readyWithoutAccept() Ready { + return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt) +} + +// acceptReady is called when the consumer of the RawNode has decided to go +// ahead and handle a Ready. Nothing must alter the state of the RawNode between +// this call and the prior call to Ready(). +func (rn *RawNode) acceptReady(rd Ready) { + if rd.SoftState != nil { + rn.prevSoftSt = rd.SoftState + } + if len(rd.ReadStates) != 0 { + rn.raft.readStates = nil + } + rn.raft.msgs = nil +} + +// HasReady called when RawNode user need to check if any Ready pending. +// Checking logic in this method should be consistent with Ready.containsUpdates(). +func (rn *RawNode) HasReady() bool { + r := rn.raft + if !r.softState().equal(rn.prevSoftSt) { + return true + } + if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) { + return true + } + if r.raftLog.unstable.snapshot != nil && !IsEmptySnap(*r.raftLog.unstable.snapshot) { + return true + } + if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() { + return true + } + if len(r.readStates) != 0 { + return true + } + return false +} + +// Advance notifies the RawNode that the application has applied and saved progress in the +// last Ready results. +func (rn *RawNode) Advance(rd Ready) { + if !IsEmptyHardState(rd.HardState) { + rn.prevHardSt = rd.HardState + } + rn.raft.advance(rd) +} + +// Status returns the current status of the given group. This allocates, see +// BasicStatus and WithProgress for allocation-friendlier choices. +func (rn *RawNode) Status() Status { + status := getStatus(rn.raft) + return status +} + +// BasicStatus returns a BasicStatus. Notably this does not contain the +// Progress map; see WithProgress for an allocation-free way to inspect it. +func (rn *RawNode) BasicStatus() BasicStatus { + return getBasicStatus(rn.raft) +} + +// ProgressType indicates the type of replica a Progress corresponds to. +type ProgressType byte + +const ( + // ProgressTypePeer accompanies a Progress for a regular peer replica. + ProgressTypePeer ProgressType = iota + // ProgressTypeLearner accompanies a Progress for a learner replica. + ProgressTypeLearner +) + +// WithProgress is a helper to introspect the Progress for this node and its +// peers. +func (rn *RawNode) WithProgress(visitor func(id uint64, typ ProgressType, pr tracker.Progress)) { + rn.raft.prs.Visit(func(id uint64, pr *tracker.Progress) { + typ := ProgressTypePeer + if pr.IsLearner { + typ = ProgressTypeLearner + } + p := *pr + p.Inflights = nil + visitor(id, typ, p) + }) +} + +// ReportUnreachable reports the given node is not reachable for the last send. +func (rn *RawNode) ReportUnreachable(id uint64) { + _ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id}) +} + +// ReportSnapshot reports the status of the sent snapshot. +func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) { + rej := status == SnapshotFailure + + _ = rn.raft.Step(pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}) +} + +// TransferLeader tries to transfer leadership to the given transferee. +func (rn *RawNode) TransferLeader(transferee uint64) { + _ = rn.raft.Step(pb.Message{Type: pb.MsgTransferLeader, From: transferee}) +} + +// ReadIndex requests a read state. The read state will be set in ready. +// Read State has a read index. Once the application advances further than the read +// index, any linearizable read requests issued before the read request can be +// processed safely. The read state will have the same rctx attached. +func (rn *RawNode) ReadIndex(rctx []byte) { + _ = rn.raft.Step(pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}}) +} diff --git a/vendor/go.etcd.io/etcd/raft/read_only.go b/vendor/go.etcd.io/etcd/raft/read_only.go new file mode 100644 index 000000000..6987f1bd7 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/read_only.go @@ -0,0 +1,121 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import pb "go.etcd.io/etcd/raft/raftpb" + +// ReadState provides state for read only query. +// It's caller's responsibility to call ReadIndex first before getting +// this state from ready, it's also caller's duty to differentiate if this +// state is what it requests through RequestCtx, eg. given a unique id as +// RequestCtx +type ReadState struct { + Index uint64 + RequestCtx []byte +} + +type readIndexStatus struct { + req pb.Message + index uint64 + // NB: this never records 'false', but it's more convenient to use this + // instead of a map[uint64]struct{} due to the API of quorum.VoteResult. If + // this becomes performance sensitive enough (doubtful), quorum.VoteResult + // can change to an API that is closer to that of CommittedIndex. + acks map[uint64]bool +} + +type readOnly struct { + option ReadOnlyOption + pendingReadIndex map[string]*readIndexStatus + readIndexQueue []string +} + +func newReadOnly(option ReadOnlyOption) *readOnly { + return &readOnly{ + option: option, + pendingReadIndex: make(map[string]*readIndexStatus), + } +} + +// addRequest adds a read only reuqest into readonly struct. +// `index` is the commit index of the raft state machine when it received +// the read only request. +// `m` is the original read only request message from the local or remote node. +func (ro *readOnly) addRequest(index uint64, m pb.Message) { + s := string(m.Entries[0].Data) + if _, ok := ro.pendingReadIndex[s]; ok { + return + } + ro.pendingReadIndex[s] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]bool)} + ro.readIndexQueue = append(ro.readIndexQueue, s) +} + +// recvAck notifies the readonly struct that the raft state machine received +// an acknowledgment of the heartbeat that attached with the read only request +// context. +func (ro *readOnly) recvAck(id uint64, context []byte) map[uint64]bool { + rs, ok := ro.pendingReadIndex[string(context)] + if !ok { + return nil + } + + rs.acks[id] = true + return rs.acks +} + +// advance advances the read only request queue kept by the readonly struct. +// It dequeues the requests until it finds the read only request that has +// the same context as the given `m`. +func (ro *readOnly) advance(m pb.Message) []*readIndexStatus { + var ( + i int + found bool + ) + + ctx := string(m.Context) + rss := []*readIndexStatus{} + + for _, okctx := range ro.readIndexQueue { + i++ + rs, ok := ro.pendingReadIndex[okctx] + if !ok { + panic("cannot find corresponding read state from pending map") + } + rss = append(rss, rs) + if okctx == ctx { + found = true + break + } + } + + if found { + ro.readIndexQueue = ro.readIndexQueue[i:] + for _, rs := range rss { + delete(ro.pendingReadIndex, string(rs.req.Entries[0].Data)) + } + return rss + } + + return nil +} + +// lastPendingRequestCtx returns the context of the last pending read only +// request in readonly struct. +func (ro *readOnly) lastPendingRequestCtx() string { + if len(ro.readIndexQueue) == 0 { + return "" + } + return ro.readIndexQueue[len(ro.readIndexQueue)-1] +} diff --git a/vendor/go.etcd.io/etcd/raft/status.go b/vendor/go.etcd.io/etcd/raft/status.go new file mode 100644 index 000000000..adc60486d --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/status.go @@ -0,0 +1,106 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "fmt" + + pb "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/raft/tracker" +) + +// Status contains information about this Raft peer and its view of the system. +// The Progress is only populated on the leader. +type Status struct { + BasicStatus + Config tracker.Config + Progress map[uint64]tracker.Progress +} + +// BasicStatus contains basic information about the Raft peer. It does not allocate. +type BasicStatus struct { + ID uint64 + + pb.HardState + SoftState + + Applied uint64 + + LeadTransferee uint64 +} + +func getProgressCopy(r *raft) map[uint64]tracker.Progress { + m := make(map[uint64]tracker.Progress) + r.prs.Visit(func(id uint64, pr *tracker.Progress) { + var p tracker.Progress + p = *pr + p.Inflights = pr.Inflights.Clone() + pr = nil + + m[id] = p + }) + return m +} + +func getBasicStatus(r *raft) BasicStatus { + s := BasicStatus{ + ID: r.id, + LeadTransferee: r.leadTransferee, + } + s.HardState = r.hardState() + s.SoftState = *r.softState() + s.Applied = r.raftLog.applied + return s +} + +// getStatus gets a copy of the current raft status. +func getStatus(r *raft) Status { + var s Status + s.BasicStatus = getBasicStatus(r) + if s.RaftState == StateLeader { + s.Progress = getProgressCopy(r) + } + s.Config = r.prs.Config.Clone() + return s +} + +// MarshalJSON translates the raft status into JSON. +// TODO: try to simplify this by introducing ID type into raft +func (s Status) MarshalJSON() ([]byte, error) { + j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"applied":%d,"progress":{`, + s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState, s.Applied) + + if len(s.Progress) == 0 { + j += "}," + } else { + for k, v := range s.Progress { + subj := fmt.Sprintf(`"%x":{"match":%d,"next":%d,"state":%q},`, k, v.Match, v.Next, v.State) + j += subj + } + // remove the trailing "," + j = j[:len(j)-1] + "}," + } + + j += fmt.Sprintf(`"leadtransferee":"%x"}`, s.LeadTransferee) + return []byte(j), nil +} + +func (s Status) String() string { + b, err := s.MarshalJSON() + if err != nil { + raftLogger.Panicf("unexpected error: %v", err) + } + return string(b) +} diff --git a/vendor/go.etcd.io/etcd/raft/storage.go b/vendor/go.etcd.io/etcd/raft/storage.go new file mode 100644 index 000000000..6be574590 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/storage.go @@ -0,0 +1,273 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "errors" + "sync" + + pb "go.etcd.io/etcd/raft/raftpb" +) + +// ErrCompacted is returned by Storage.Entries/Compact when a requested +// index is unavailable because it predates the last snapshot. +var ErrCompacted = errors.New("requested index is unavailable due to compaction") + +// ErrSnapOutOfDate is returned by Storage.CreateSnapshot when a requested +// index is older than the existing snapshot. +var ErrSnapOutOfDate = errors.New("requested index is older than the existing snapshot") + +// ErrUnavailable is returned by Storage interface when the requested log entries +// are unavailable. +var ErrUnavailable = errors.New("requested entry at index is unavailable") + +// ErrSnapshotTemporarilyUnavailable is returned by the Storage interface when the required +// snapshot is temporarily unavailable. +var ErrSnapshotTemporarilyUnavailable = errors.New("snapshot is temporarily unavailable") + +// Storage is an interface that may be implemented by the application +// to retrieve log entries from storage. +// +// If any Storage method returns an error, the raft instance will +// become inoperable and refuse to participate in elections; the +// application is responsible for cleanup and recovery in this case. +type Storage interface { + // TODO(tbg): split this into two interfaces, LogStorage and StateStorage. + + // InitialState returns the saved HardState and ConfState information. + InitialState() (pb.HardState, pb.ConfState, error) + // Entries returns a slice of log entries in the range [lo,hi). + // MaxSize limits the total size of the log entries returned, but + // Entries returns at least one entry if any. + Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) + // Term returns the term of entry i, which must be in the range + // [FirstIndex()-1, LastIndex()]. The term of the entry before + // FirstIndex is retained for matching purposes even though the + // rest of that entry may not be available. + Term(i uint64) (uint64, error) + // LastIndex returns the index of the last entry in the log. + LastIndex() (uint64, error) + // FirstIndex returns the index of the first log entry that is + // possibly available via Entries (older entries have been incorporated + // into the latest Snapshot; if storage only contains the dummy entry the + // first log entry is not available). + FirstIndex() (uint64, error) + // Snapshot returns the most recent snapshot. + // If snapshot is temporarily unavailable, it should return ErrSnapshotTemporarilyUnavailable, + // so raft state machine could know that Storage needs some time to prepare + // snapshot and call Snapshot later. + Snapshot() (pb.Snapshot, error) +} + +// MemoryStorage implements the Storage interface backed by an +// in-memory array. +type MemoryStorage struct { + // Protects access to all fields. Most methods of MemoryStorage are + // run on the raft goroutine, but Append() is run on an application + // goroutine. + sync.Mutex + + hardState pb.HardState + snapshot pb.Snapshot + // ents[i] has raft log position i+snapshot.Metadata.Index + ents []pb.Entry +} + +// NewMemoryStorage creates an empty MemoryStorage. +func NewMemoryStorage() *MemoryStorage { + return &MemoryStorage{ + // When starting from scratch populate the list with a dummy entry at term zero. + ents: make([]pb.Entry, 1), + } +} + +// InitialState implements the Storage interface. +func (ms *MemoryStorage) InitialState() (pb.HardState, pb.ConfState, error) { + return ms.hardState, ms.snapshot.Metadata.ConfState, nil +} + +// SetHardState saves the current HardState. +func (ms *MemoryStorage) SetHardState(st pb.HardState) error { + ms.Lock() + defer ms.Unlock() + ms.hardState = st + return nil +} + +// Entries implements the Storage interface. +func (ms *MemoryStorage) Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) { + ms.Lock() + defer ms.Unlock() + offset := ms.ents[0].Index + if lo <= offset { + return nil, ErrCompacted + } + if hi > ms.lastIndex()+1 { + raftLogger.Panicf("entries' hi(%d) is out of bound lastindex(%d)", hi, ms.lastIndex()) + } + // only contains dummy entries. + if len(ms.ents) == 1 { + return nil, ErrUnavailable + } + + ents := ms.ents[lo-offset : hi-offset] + return limitSize(ents, maxSize), nil +} + +// Term implements the Storage interface. +func (ms *MemoryStorage) Term(i uint64) (uint64, error) { + ms.Lock() + defer ms.Unlock() + offset := ms.ents[0].Index + if i < offset { + return 0, ErrCompacted + } + if int(i-offset) >= len(ms.ents) { + return 0, ErrUnavailable + } + return ms.ents[i-offset].Term, nil +} + +// LastIndex implements the Storage interface. +func (ms *MemoryStorage) LastIndex() (uint64, error) { + ms.Lock() + defer ms.Unlock() + return ms.lastIndex(), nil +} + +func (ms *MemoryStorage) lastIndex() uint64 { + return ms.ents[0].Index + uint64(len(ms.ents)) - 1 +} + +// FirstIndex implements the Storage interface. +func (ms *MemoryStorage) FirstIndex() (uint64, error) { + ms.Lock() + defer ms.Unlock() + return ms.firstIndex(), nil +} + +func (ms *MemoryStorage) firstIndex() uint64 { + return ms.ents[0].Index + 1 +} + +// Snapshot implements the Storage interface. +func (ms *MemoryStorage) Snapshot() (pb.Snapshot, error) { + ms.Lock() + defer ms.Unlock() + return ms.snapshot, nil +} + +// ApplySnapshot overwrites the contents of this Storage object with +// those of the given snapshot. +func (ms *MemoryStorage) ApplySnapshot(snap pb.Snapshot) error { + ms.Lock() + defer ms.Unlock() + + //handle check for old snapshot being applied + msIndex := ms.snapshot.Metadata.Index + snapIndex := snap.Metadata.Index + if msIndex >= snapIndex { + return ErrSnapOutOfDate + } + + ms.snapshot = snap + ms.ents = []pb.Entry{{Term: snap.Metadata.Term, Index: snap.Metadata.Index}} + return nil +} + +// CreateSnapshot makes a snapshot which can be retrieved with Snapshot() and +// can be used to reconstruct the state at that point. +// If any configuration changes have been made since the last compaction, +// the result of the last ApplyConfChange must be passed in. +func (ms *MemoryStorage) CreateSnapshot(i uint64, cs *pb.ConfState, data []byte) (pb.Snapshot, error) { + ms.Lock() + defer ms.Unlock() + if i <= ms.snapshot.Metadata.Index { + return pb.Snapshot{}, ErrSnapOutOfDate + } + + offset := ms.ents[0].Index + if i > ms.lastIndex() { + raftLogger.Panicf("snapshot %d is out of bound lastindex(%d)", i, ms.lastIndex()) + } + + ms.snapshot.Metadata.Index = i + ms.snapshot.Metadata.Term = ms.ents[i-offset].Term + if cs != nil { + ms.snapshot.Metadata.ConfState = *cs + } + ms.snapshot.Data = data + return ms.snapshot, nil +} + +// Compact discards all log entries prior to compactIndex. +// It is the application's responsibility to not attempt to compact an index +// greater than raftLog.applied. +func (ms *MemoryStorage) Compact(compactIndex uint64) error { + ms.Lock() + defer ms.Unlock() + offset := ms.ents[0].Index + if compactIndex <= offset { + return ErrCompacted + } + if compactIndex > ms.lastIndex() { + raftLogger.Panicf("compact %d is out of bound lastindex(%d)", compactIndex, ms.lastIndex()) + } + + i := compactIndex - offset + ents := make([]pb.Entry, 1, 1+uint64(len(ms.ents))-i) + ents[0].Index = ms.ents[i].Index + ents[0].Term = ms.ents[i].Term + ents = append(ents, ms.ents[i+1:]...) + ms.ents = ents + return nil +} + +// Append the new entries to storage. +// TODO (xiangli): ensure the entries are continuous and +// entries[0].Index > ms.entries[0].Index +func (ms *MemoryStorage) Append(entries []pb.Entry) error { + if len(entries) == 0 { + return nil + } + + ms.Lock() + defer ms.Unlock() + + first := ms.firstIndex() + last := entries[0].Index + uint64(len(entries)) - 1 + + // shortcut if there is no new entry. + if last < first { + return nil + } + // truncate compacted entries + if first > entries[0].Index { + entries = entries[first-entries[0].Index:] + } + + offset := entries[0].Index - ms.ents[0].Index + switch { + case uint64(len(ms.ents)) > offset: + ms.ents = append([]pb.Entry{}, ms.ents[:offset]...) + ms.ents = append(ms.ents, entries...) + case uint64(len(ms.ents)) == offset: + ms.ents = append(ms.ents, entries...) + default: + raftLogger.Panicf("missing log entry [last: %d, append at: %d]", + ms.lastIndex(), entries[0].Index) + } + return nil +} diff --git a/vendor/go.etcd.io/etcd/raft/tracker/inflights.go b/vendor/go.etcd.io/etcd/raft/tracker/inflights.go new file mode 100644 index 000000000..1a056341a --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/tracker/inflights.go @@ -0,0 +1,132 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracker + +// Inflights limits the number of MsgApp (represented by the largest index +// contained within) sent to followers but not yet acknowledged by them. Callers +// use Full() to check whether more messages can be sent, call Add() whenever +// they are sending a new append, and release "quota" via FreeLE() whenever an +// ack is received. +type Inflights struct { + // the starting index in the buffer + start int + // number of inflights in the buffer + count int + + // the size of the buffer + size int + + // buffer contains the index of the last entry + // inside one message. + buffer []uint64 +} + +// NewInflights sets up an Inflights that allows up to 'size' inflight messages. +func NewInflights(size int) *Inflights { + return &Inflights{ + size: size, + } +} + +// Clone returns an *Inflights that is identical to but shares no memory with +// the receiver. +func (in *Inflights) Clone() *Inflights { + ins := *in + ins.buffer = append([]uint64(nil), in.buffer...) + return &ins +} + +// Add notifies the Inflights that a new message with the given index is being +// dispatched. Full() must be called prior to Add() to verify that there is room +// for one more message, and consecutive calls to add Add() must provide a +// monotonic sequence of indexes. +func (in *Inflights) Add(inflight uint64) { + if in.Full() { + panic("cannot add into a Full inflights") + } + next := in.start + in.count + size := in.size + if next >= size { + next -= size + } + if next >= len(in.buffer) { + in.grow() + } + in.buffer[next] = inflight + in.count++ +} + +// grow the inflight buffer by doubling up to inflights.size. We grow on demand +// instead of preallocating to inflights.size to handle systems which have +// thousands of Raft groups per process. +func (in *Inflights) grow() { + newSize := len(in.buffer) * 2 + if newSize == 0 { + newSize = 1 + } else if newSize > in.size { + newSize = in.size + } + newBuffer := make([]uint64, newSize) + copy(newBuffer, in.buffer) + in.buffer = newBuffer +} + +// FreeLE frees the inflights smaller or equal to the given `to` flight. +func (in *Inflights) FreeLE(to uint64) { + if in.count == 0 || to < in.buffer[in.start] { + // out of the left side of the window + return + } + + idx := in.start + var i int + for i = 0; i < in.count; i++ { + if to < in.buffer[idx] { // found the first large inflight + break + } + + // increase index and maybe rotate + size := in.size + if idx++; idx >= size { + idx -= size + } + } + // free i inflights and set new start index + in.count -= i + in.start = idx + if in.count == 0 { + // inflights is empty, reset the start index so that we don't grow the + // buffer unnecessarily. + in.start = 0 + } +} + +// FreeFirstOne releases the first inflight. This is a no-op if nothing is +// inflight. +func (in *Inflights) FreeFirstOne() { in.FreeLE(in.buffer[in.start]) } + +// Full returns true if no more messages can be sent at the moment. +func (in *Inflights) Full() bool { + return in.count == in.size +} + +// Count returns the number of inflight messages. +func (in *Inflights) Count() int { return in.count } + +// reset frees all inflights. +func (in *Inflights) reset() { + in.count = 0 + in.start = 0 +} diff --git a/vendor/go.etcd.io/etcd/raft/tracker/progress.go b/vendor/go.etcd.io/etcd/raft/tracker/progress.go new file mode 100644 index 000000000..62c81f45a --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/tracker/progress.go @@ -0,0 +1,259 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracker + +import ( + "fmt" + "sort" + "strings" +) + +// Progress represents a follower’s progress in the view of the leader. Leader +// maintains progresses of all followers, and sends entries to the follower +// based on its progress. +// +// NB(tbg): Progress is basically a state machine whose transitions are mostly +// strewn around `*raft.raft`. Additionally, some fields are only used when in a +// certain State. All of this isn't ideal. +type Progress struct { + Match, Next uint64 + // State defines how the leader should interact with the follower. + // + // When in StateProbe, leader sends at most one replication message + // per heartbeat interval. It also probes actual progress of the follower. + // + // When in StateReplicate, leader optimistically increases next + // to the latest entry sent after sending replication message. This is + // an optimized state for fast replicating log entries to the follower. + // + // When in StateSnapshot, leader should have sent out snapshot + // before and stops sending any replication message. + State StateType + + // PendingSnapshot is used in StateSnapshot. + // If there is a pending snapshot, the pendingSnapshot will be set to the + // index of the snapshot. If pendingSnapshot is set, the replication process of + // this Progress will be paused. raft will not resend snapshot until the pending one + // is reported to be failed. + PendingSnapshot uint64 + + // RecentActive is true if the progress is recently active. Receiving any messages + // from the corresponding follower indicates the progress is active. + // RecentActive can be reset to false after an election timeout. + // + // TODO(tbg): the leader should always have this set to true. + RecentActive bool + + // ProbeSent is used while this follower is in StateProbe. When ProbeSent is + // true, raft should pause sending replication message to this peer until + // ProbeSent is reset. See ProbeAcked() and IsPaused(). + ProbeSent bool + + // Inflights is a sliding window for the inflight messages. + // Each inflight message contains one or more log entries. + // The max number of entries per message is defined in raft config as MaxSizePerMsg. + // Thus inflight effectively limits both the number of inflight messages + // and the bandwidth each Progress can use. + // When inflights is Full, no more message should be sent. + // When a leader sends out a message, the index of the last + // entry should be added to inflights. The index MUST be added + // into inflights in order. + // When a leader receives a reply, the previous inflights should + // be freed by calling inflights.FreeLE with the index of the last + // received entry. + Inflights *Inflights + + // IsLearner is true if this progress is tracked for a learner. + IsLearner bool +} + +// ResetState moves the Progress into the specified State, resetting ProbeSent, +// PendingSnapshot, and Inflights. +func (pr *Progress) ResetState(state StateType) { + pr.ProbeSent = false + pr.PendingSnapshot = 0 + pr.State = state + pr.Inflights.reset() +} + +func max(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +func min(a, b uint64) uint64 { + if a > b { + return b + } + return a +} + +// ProbeAcked is called when this peer has accepted an append. It resets +// ProbeSent to signal that additional append messages should be sent without +// further delay. +func (pr *Progress) ProbeAcked() { + pr.ProbeSent = false +} + +// BecomeProbe transitions into StateProbe. Next is reset to Match+1 or, +// optionally and if larger, the index of the pending snapshot. +func (pr *Progress) BecomeProbe() { + // If the original state is StateSnapshot, progress knows that + // the pending snapshot has been sent to this peer successfully, then + // probes from pendingSnapshot + 1. + if pr.State == StateSnapshot { + pendingSnapshot := pr.PendingSnapshot + pr.ResetState(StateProbe) + pr.Next = max(pr.Match+1, pendingSnapshot+1) + } else { + pr.ResetState(StateProbe) + pr.Next = pr.Match + 1 + } +} + +// BecomeReplicate transitions into StateReplicate, resetting Next to Match+1. +func (pr *Progress) BecomeReplicate() { + pr.ResetState(StateReplicate) + pr.Next = pr.Match + 1 +} + +// BecomeSnapshot moves the Progress to StateSnapshot with the specified pending +// snapshot index. +func (pr *Progress) BecomeSnapshot(snapshoti uint64) { + pr.ResetState(StateSnapshot) + pr.PendingSnapshot = snapshoti +} + +// MaybeUpdate is called when an MsgAppResp arrives from the follower, with the +// index acked by it. The method returns false if the given n index comes from +// an outdated message. Otherwise it updates the progress and returns true. +func (pr *Progress) MaybeUpdate(n uint64) bool { + var updated bool + if pr.Match < n { + pr.Match = n + updated = true + pr.ProbeAcked() + } + if pr.Next < n+1 { + pr.Next = n + 1 + } + return updated +} + +// OptimisticUpdate signals that appends all the way up to and including index n +// are in-flight. As a result, Next is increased to n+1. +func (pr *Progress) OptimisticUpdate(n uint64) { pr.Next = n + 1 } + +// MaybeDecrTo adjusts the Progress to the receipt of a MsgApp rejection. The +// arguments are the index the follower rejected to append to its log, and its +// last index. +// +// Rejections can happen spuriously as messages are sent out of order or +// duplicated. In such cases, the rejection pertains to an index that the +// Progress already knows were previously acknowledged, and false is returned +// without changing the Progress. +// +// If the rejection is genuine, Next is lowered sensibly, and the Progress is +// cleared for sending log entries. +func (pr *Progress) MaybeDecrTo(rejected, last uint64) bool { + if pr.State == StateReplicate { + // The rejection must be stale if the progress has matched and "rejected" + // is smaller than "match". + if rejected <= pr.Match { + return false + } + // Directly decrease next to match + 1. + // + // TODO(tbg): why not use last if it's larger? + pr.Next = pr.Match + 1 + return true + } + + // The rejection must be stale if "rejected" does not match next - 1. This + // is because non-replicating followers are probed one entry at a time. + if pr.Next-1 != rejected { + return false + } + + if pr.Next = min(rejected, last+1); pr.Next < 1 { + pr.Next = 1 + } + pr.ProbeSent = false + return true +} + +// IsPaused returns whether sending log entries to this node has been throttled. +// This is done when a node has rejected recent MsgApps, is currently waiting +// for a snapshot, or has reached the MaxInflightMsgs limit. In normal +// operation, this is false. A throttled node will be contacted less frequently +// until it has reached a state in which it's able to accept a steady stream of +// log entries again. +func (pr *Progress) IsPaused() bool { + switch pr.State { + case StateProbe: + return pr.ProbeSent + case StateReplicate: + return pr.Inflights.Full() + case StateSnapshot: + return true + default: + panic("unexpected state") + } +} + +func (pr *Progress) String() string { + var buf strings.Builder + fmt.Fprintf(&buf, "%s match=%d next=%d", pr.State, pr.Match, pr.Next) + if pr.IsLearner { + fmt.Fprint(&buf, " learner") + } + if pr.IsPaused() { + fmt.Fprint(&buf, " paused") + } + if pr.PendingSnapshot > 0 { + fmt.Fprintf(&buf, " pendingSnap=%d", pr.PendingSnapshot) + } + if !pr.RecentActive { + fmt.Fprintf(&buf, " inactive") + } + if n := pr.Inflights.Count(); n > 0 { + fmt.Fprintf(&buf, " inflight=%d", n) + if pr.Inflights.Full() { + fmt.Fprint(&buf, "[full]") + } + } + return buf.String() +} + +// ProgressMap is a map of *Progress. +type ProgressMap map[uint64]*Progress + +// String prints the ProgressMap in sorted key order, one Progress per line. +func (m ProgressMap) String() string { + ids := make([]uint64, 0, len(m)) + for k := range m { + ids = append(ids, k) + } + sort.Slice(ids, func(i, j int) bool { + return ids[i] < ids[j] + }) + var buf strings.Builder + for _, id := range ids { + fmt.Fprintf(&buf, "%d: %s\n", id, m[id]) + } + return buf.String() +} diff --git a/vendor/go.etcd.io/etcd/raft/tracker/state.go b/vendor/go.etcd.io/etcd/raft/tracker/state.go new file mode 100644 index 000000000..285b4b8f5 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/tracker/state.go @@ -0,0 +1,42 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracker + +// StateType is the state of a tracked follower. +type StateType uint64 + +const ( + // StateProbe indicates a follower whose last index isn't known. Such a + // follower is "probed" (i.e. an append sent periodically) to narrow down + // its last index. In the ideal (and common) case, only one round of probing + // is necessary as the follower will react with a hint. Followers that are + // probed over extended periods of time are often offline. + StateProbe StateType = iota + // StateReplicate is the state steady in which a follower eagerly receives + // log entries to append to its log. + StateReplicate + // StateSnapshot indicates a follower that needs log entries not available + // from the leader's Raft log. Such a follower needs a full snapshot to + // return to StateReplicate. + StateSnapshot +) + +var prstmap = [...]string{ + "StateProbe", + "StateReplicate", + "StateSnapshot", +} + +func (st StateType) String() string { return prstmap[uint64(st)] } diff --git a/vendor/go.etcd.io/etcd/raft/tracker/tracker.go b/vendor/go.etcd.io/etcd/raft/tracker/tracker.go new file mode 100644 index 000000000..a4581143d --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/tracker/tracker.go @@ -0,0 +1,288 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracker + +import ( + "fmt" + "sort" + "strings" + + "go.etcd.io/etcd/raft/quorum" + pb "go.etcd.io/etcd/raft/raftpb" +) + +// Config reflects the configuration tracked in a ProgressTracker. +type Config struct { + Voters quorum.JointConfig + // AutoLeave is true if the configuration is joint and a transition to the + // incoming configuration should be carried out automatically by Raft when + // this is possible. If false, the configuration will be joint until the + // application initiates the transition manually. + AutoLeave bool + // Learners is a set of IDs corresponding to the learners active in the + // current configuration. + // + // Invariant: Learners and Voters does not intersect, i.e. if a peer is in + // either half of the joint config, it can't be a learner; if it is a + // learner it can't be in either half of the joint config. This invariant + // simplifies the implementation since it allows peers to have clarity about + // its current role without taking into account joint consensus. + Learners map[uint64]struct{} + // When we turn a voter into a learner during a joint consensus transition, + // we cannot add the learner directly when entering the joint state. This is + // because this would violate the invariant that the intersection of + // voters and learners is empty. For example, assume a Voter is removed and + // immediately re-added as a learner (or in other words, it is demoted): + // + // Initially, the configuration will be + // + // voters: {1 2 3} + // learners: {} + // + // and we want to demote 3. Entering the joint configuration, we naively get + // + // voters: {1 2} & {1 2 3} + // learners: {3} + // + // but this violates the invariant (3 is both voter and learner). Instead, + // we get + // + // voters: {1 2} & {1 2 3} + // learners: {} + // next_learners: {3} + // + // Where 3 is now still purely a voter, but we are remembering the intention + // to make it a learner upon transitioning into the final configuration: + // + // voters: {1 2} + // learners: {3} + // next_learners: {} + // + // Note that next_learners is not used while adding a learner that is not + // also a voter in the joint config. In this case, the learner is added + // right away when entering the joint configuration, so that it is caught up + // as soon as possible. + LearnersNext map[uint64]struct{} +} + +func (c Config) String() string { + var buf strings.Builder + fmt.Fprintf(&buf, "voters=%s", c.Voters) + if c.Learners != nil { + fmt.Fprintf(&buf, " learners=%s", quorum.MajorityConfig(c.Learners).String()) + } + if c.LearnersNext != nil { + fmt.Fprintf(&buf, " learners_next=%s", quorum.MajorityConfig(c.LearnersNext).String()) + } + if c.AutoLeave { + fmt.Fprintf(&buf, " autoleave") + } + return buf.String() +} + +// Clone returns a copy of the Config that shares no memory with the original. +func (c *Config) Clone() Config { + clone := func(m map[uint64]struct{}) map[uint64]struct{} { + if m == nil { + return nil + } + mm := make(map[uint64]struct{}, len(m)) + for k := range m { + mm[k] = struct{}{} + } + return mm + } + return Config{ + Voters: quorum.JointConfig{clone(c.Voters[0]), clone(c.Voters[1])}, + Learners: clone(c.Learners), + LearnersNext: clone(c.LearnersNext), + } +} + +// ProgressTracker tracks the currently active configuration and the information +// known about the nodes and learners in it. In particular, it tracks the match +// index for each peer which in turn allows reasoning about the committed index. +type ProgressTracker struct { + Config + + Progress ProgressMap + + Votes map[uint64]bool + + MaxInflight int +} + +// MakeProgressTracker initializes a ProgressTracker. +func MakeProgressTracker(maxInflight int) ProgressTracker { + p := ProgressTracker{ + MaxInflight: maxInflight, + Config: Config{ + Voters: quorum.JointConfig{ + quorum.MajorityConfig{}, + nil, // only populated when used + }, + Learners: nil, // only populated when used + LearnersNext: nil, // only populated when used + }, + Votes: map[uint64]bool{}, + Progress: map[uint64]*Progress{}, + } + return p +} + +// ConfState returns a ConfState representing the active configuration. +func (p *ProgressTracker) ConfState() pb.ConfState { + return pb.ConfState{ + Voters: p.Voters[0].Slice(), + VotersOutgoing: p.Voters[1].Slice(), + Learners: quorum.MajorityConfig(p.Learners).Slice(), + LearnersNext: quorum.MajorityConfig(p.LearnersNext).Slice(), + AutoLeave: p.AutoLeave, + } +} + +// IsSingleton returns true if (and only if) there is only one voting member +// (i.e. the leader) in the current configuration. +func (p *ProgressTracker) IsSingleton() bool { + return len(p.Voters[0]) == 1 && len(p.Voters[1]) == 0 +} + +type matchAckIndexer map[uint64]*Progress + +var _ quorum.AckedIndexer = matchAckIndexer(nil) + +// AckedIndex implements IndexLookuper. +func (l matchAckIndexer) AckedIndex(id uint64) (quorum.Index, bool) { + pr, ok := l[id] + if !ok { + return 0, false + } + return quorum.Index(pr.Match), true +} + +// Committed returns the largest log index known to be committed based on what +// the voting members of the group have acknowledged. +func (p *ProgressTracker) Committed() uint64 { + return uint64(p.Voters.CommittedIndex(matchAckIndexer(p.Progress))) +} + +func insertionSort(sl []uint64) { + a, b := 0, len(sl) + for i := a + 1; i < b; i++ { + for j := i; j > a && sl[j] < sl[j-1]; j-- { + sl[j], sl[j-1] = sl[j-1], sl[j] + } + } +} + +// Visit invokes the supplied closure for all tracked progresses in stable order. +func (p *ProgressTracker) Visit(f func(id uint64, pr *Progress)) { + n := len(p.Progress) + // We need to sort the IDs and don't want to allocate since this is hot code. + // The optimization here mirrors that in `(MajorityConfig).CommittedIndex`, + // see there for details. + var sl [7]uint64 + ids := sl[:] + if len(sl) >= n { + ids = sl[:n] + } else { + ids = make([]uint64, n) + } + for id := range p.Progress { + n-- + ids[n] = id + } + insertionSort(ids) + for _, id := range ids { + f(id, p.Progress[id]) + } +} + +// QuorumActive returns true if the quorum is active from the view of the local +// raft state machine. Otherwise, it returns false. +func (p *ProgressTracker) QuorumActive() bool { + votes := map[uint64]bool{} + p.Visit(func(id uint64, pr *Progress) { + if pr.IsLearner { + return + } + votes[id] = pr.RecentActive + }) + + return p.Voters.VoteResult(votes) == quorum.VoteWon +} + +// VoterNodes returns a sorted slice of voters. +func (p *ProgressTracker) VoterNodes() []uint64 { + m := p.Voters.IDs() + nodes := make([]uint64, 0, len(m)) + for id := range m { + nodes = append(nodes, id) + } + sort.Slice(nodes, func(i, j int) bool { return nodes[i] < nodes[j] }) + return nodes +} + +// LearnerNodes returns a sorted slice of learners. +func (p *ProgressTracker) LearnerNodes() []uint64 { + if len(p.Learners) == 0 { + return nil + } + nodes := make([]uint64, 0, len(p.Learners)) + for id := range p.Learners { + nodes = append(nodes, id) + } + sort.Slice(nodes, func(i, j int) bool { return nodes[i] < nodes[j] }) + return nodes +} + +// ResetVotes prepares for a new round of vote counting via recordVote. +func (p *ProgressTracker) ResetVotes() { + p.Votes = map[uint64]bool{} +} + +// RecordVote records that the node with the given id voted for this Raft +// instance if v == true (and declined it otherwise). +func (p *ProgressTracker) RecordVote(id uint64, v bool) { + _, ok := p.Votes[id] + if !ok { + p.Votes[id] = v + } +} + +// TallyVotes returns the number of granted and rejected Votes, and whether the +// election outcome is known. +func (p *ProgressTracker) TallyVotes() (granted int, rejected int, _ quorum.VoteResult) { + // Make sure to populate granted/rejected correctly even if the Votes slice + // contains members no longer part of the configuration. This doesn't really + // matter in the way the numbers are used (they're informational), but might + // as well get it right. + for id, pr := range p.Progress { + if pr.IsLearner { + continue + } + v, voted := p.Votes[id] + if !voted { + continue + } + if v { + granted++ + } else { + rejected++ + } + } + result := p.Voters.VoteResult(p.Votes) + return granted, rejected, result +} diff --git a/vendor/go.etcd.io/etcd/raft/util.go b/vendor/go.etcd.io/etcd/raft/util.go new file mode 100644 index 000000000..785cf735d --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/util.go @@ -0,0 +1,233 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "bytes" + "fmt" + "strings" + + pb "go.etcd.io/etcd/raft/raftpb" +) + +func (st StateType) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf("%q", st.String())), nil +} + +func min(a, b uint64) uint64 { + if a > b { + return b + } + return a +} + +func max(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +func IsLocalMsg(msgt pb.MessageType) bool { + return msgt == pb.MsgHup || msgt == pb.MsgBeat || msgt == pb.MsgUnreachable || + msgt == pb.MsgSnapStatus || msgt == pb.MsgCheckQuorum +} + +func IsResponseMsg(msgt pb.MessageType) bool { + return msgt == pb.MsgAppResp || msgt == pb.MsgVoteResp || msgt == pb.MsgHeartbeatResp || msgt == pb.MsgUnreachable || msgt == pb.MsgPreVoteResp +} + +// voteResponseType maps vote and prevote message types to their corresponding responses. +func voteRespMsgType(msgt pb.MessageType) pb.MessageType { + switch msgt { + case pb.MsgVote: + return pb.MsgVoteResp + case pb.MsgPreVote: + return pb.MsgPreVoteResp + default: + panic(fmt.Sprintf("not a vote message: %s", msgt)) + } +} + +func DescribeHardState(hs pb.HardState) string { + var buf strings.Builder + fmt.Fprintf(&buf, "Term:%d", hs.Term) + if hs.Vote != 0 { + fmt.Fprintf(&buf, " Vote:%d", hs.Vote) + } + fmt.Fprintf(&buf, " Commit:%d", hs.Commit) + return buf.String() +} + +func DescribeSoftState(ss SoftState) string { + return fmt.Sprintf("Lead:%d State:%s", ss.Lead, ss.RaftState) +} + +func DescribeConfState(state pb.ConfState) string { + return fmt.Sprintf( + "Voters:%v VotersOutgoing:%v Learners:%v LearnersNext:%v AutoLeave:%v", + state.Voters, state.VotersOutgoing, state.Learners, state.LearnersNext, state.AutoLeave, + ) +} + +func DescribeSnapshot(snap pb.Snapshot) string { + m := snap.Metadata + return fmt.Sprintf("Index:%d Term:%d ConfState:%s", m.Index, m.Term, DescribeConfState(m.ConfState)) +} + +func DescribeReady(rd Ready, f EntryFormatter) string { + var buf strings.Builder + if rd.SoftState != nil { + fmt.Fprint(&buf, DescribeSoftState(*rd.SoftState)) + buf.WriteByte('\n') + } + if !IsEmptyHardState(rd.HardState) { + fmt.Fprintf(&buf, "HardState %s", DescribeHardState(rd.HardState)) + buf.WriteByte('\n') + } + if len(rd.ReadStates) > 0 { + fmt.Fprintf(&buf, "ReadStates %v\n", rd.ReadStates) + } + if len(rd.Entries) > 0 { + buf.WriteString("Entries:\n") + fmt.Fprint(&buf, DescribeEntries(rd.Entries, f)) + } + if !IsEmptySnap(rd.Snapshot) { + fmt.Fprintf(&buf, "Snapshot %s\n", DescribeSnapshot(rd.Snapshot)) + } + if len(rd.CommittedEntries) > 0 { + buf.WriteString("CommittedEntries:\n") + fmt.Fprint(&buf, DescribeEntries(rd.CommittedEntries, f)) + } + if len(rd.Messages) > 0 { + buf.WriteString("Messages:\n") + for _, msg := range rd.Messages { + fmt.Fprint(&buf, DescribeMessage(msg, f)) + buf.WriteByte('\n') + } + } + if buf.Len() > 0 { + return fmt.Sprintf("Ready MustSync=%t:\n%s", rd.MustSync, buf.String()) + } + return "" +} + +// EntryFormatter can be implemented by the application to provide human-readable formatting +// of entry data. Nil is a valid EntryFormatter and will use a default format. +type EntryFormatter func([]byte) string + +// DescribeMessage returns a concise human-readable description of a +// Message for debugging. +func DescribeMessage(m pb.Message, f EntryFormatter) string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index) + if m.Reject { + fmt.Fprintf(&buf, " Rejected (Hint: %d)", m.RejectHint) + } + if m.Commit != 0 { + fmt.Fprintf(&buf, " Commit:%d", m.Commit) + } + if len(m.Entries) > 0 { + fmt.Fprintf(&buf, " Entries:[") + for i, e := range m.Entries { + if i != 0 { + buf.WriteString(", ") + } + buf.WriteString(DescribeEntry(e, f)) + } + fmt.Fprintf(&buf, "]") + } + if !IsEmptySnap(m.Snapshot) { + fmt.Fprintf(&buf, " Snapshot: %s", DescribeSnapshot(m.Snapshot)) + } + return buf.String() +} + +// PayloadSize is the size of the payload of this Entry. Notably, it does not +// depend on its Index or Term. +func PayloadSize(e pb.Entry) int { + return len(e.Data) +} + +// DescribeEntry returns a concise human-readable description of an +// Entry for debugging. +func DescribeEntry(e pb.Entry, f EntryFormatter) string { + if f == nil { + f = func(data []byte) string { return fmt.Sprintf("%q", data) } + } + + formatConfChange := func(cc pb.ConfChangeI) string { + // TODO(tbg): give the EntryFormatter a type argument so that it gets + // a chance to expose the Context. + return pb.ConfChangesToString(cc.AsV2().Changes) + } + + var formatted string + switch e.Type { + case pb.EntryNormal: + formatted = f(e.Data) + case pb.EntryConfChange: + var cc pb.ConfChange + if err := cc.Unmarshal(e.Data); err != nil { + formatted = err.Error() + } else { + formatted = formatConfChange(cc) + } + case pb.EntryConfChangeV2: + var cc pb.ConfChangeV2 + if err := cc.Unmarshal(e.Data); err != nil { + formatted = err.Error() + } else { + formatted = formatConfChange(cc) + } + } + if formatted != "" { + formatted = " " + formatted + } + return fmt.Sprintf("%d/%d %s%s", e.Term, e.Index, e.Type, formatted) +} + +// DescribeEntries calls DescribeEntry for each Entry, adding a newline to +// each. +func DescribeEntries(ents []pb.Entry, f EntryFormatter) string { + var buf bytes.Buffer + for _, e := range ents { + _, _ = buf.WriteString(DescribeEntry(e, f) + "\n") + } + return buf.String() +} + +func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry { + if len(ents) == 0 { + return ents + } + size := ents[0].Size() + var limit int + for limit = 1; limit < len(ents); limit++ { + size += ents[limit].Size() + if uint64(size) > maxSize { + break + } + } + return ents[:limit] +} + +func assertConfStatesEquivalent(l Logger, cs1, cs2 pb.ConfState) { + err := cs1.Equivalent(cs2) + if err == nil { + return + } + l.Panic(err) +} diff --git a/vendor/go.etcd.io/etcd/version/version.go b/vendor/go.etcd.io/etcd/version/version.go new file mode 100644 index 000000000..ee97e461e --- /dev/null +++ b/vendor/go.etcd.io/etcd/version/version.go @@ -0,0 +1,56 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package version implements etcd version parsing and contains latest version +// information. +package version + +import ( + "fmt" + "strings" + + "github.com/coreos/go-semver/semver" +) + +var ( + // MinClusterVersion is the min cluster version this etcd binary is compatible with. + MinClusterVersion = "3.0.0" + Version = "3.4.13" + APIVersion = "unknown" + + // Git SHA Value will be set during build + GitSHA = "Not provided (use ./build instead of go build)" +) + +func init() { + ver, err := semver.NewVersion(Version) + if err == nil { + APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor) + } +} + +type Versions struct { + Server string `json:"etcdserver"` + Cluster string `json:"etcdcluster"` + // TODO: raft state machine version +} + +// Cluster only keeps the major.minor. +func Cluster(v string) string { + vs := strings.Split(v, ".") + if len(vs) <= 2 { + return v + } + return fmt.Sprintf("%s.%s", vs[0], vs[1]) +} diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml new file mode 100644 index 000000000..6d4d1be7b --- /dev/null +++ b/vendor/go.uber.org/atomic/.codecov.yml @@ -0,0 +1,15 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 100 # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure + diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore new file mode 100644 index 000000000..0a4504f11 --- /dev/null +++ b/vendor/go.uber.org/atomic/.gitignore @@ -0,0 +1,11 @@ +.DS_Store +/vendor +/cover +cover.out +lint.log + +# Binaries +*.test + +# Profiling output +*.prof diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml new file mode 100644 index 000000000..0f3769e5f --- /dev/null +++ b/vendor/go.uber.org/atomic/.travis.yml @@ -0,0 +1,27 @@ +sudo: false +language: go +go_import_path: go.uber.org/atomic + +go: + - 1.11.x + - 1.12.x + +matrix: + include: + - go: 1.12.x + env: NO_TEST=yes LINT=yes + +cache: + directories: + - vendor + +install: + - make install_ci + +script: + - test -n "$NO_TEST" || make test_ci + - test -n "$NO_TEST" || scripts/test-ubergo.sh + - test -z "$LINT" || make install_lint lint + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt new file mode 100644 index 000000000..8765c9fbc --- /dev/null +++ b/vendor/go.uber.org/atomic/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile new file mode 100644 index 000000000..1ef263075 --- /dev/null +++ b/vendor/go.uber.org/atomic/Makefile @@ -0,0 +1,51 @@ +# Many Go tools take file globs or directories as arguments instead of packages. +PACKAGE_FILES ?= *.go + +# For pre go1.6 +export GO15VENDOREXPERIMENT=1 + + +.PHONY: build +build: + go build -i ./... + + +.PHONY: install +install: + glide --version || go get github.com/Masterminds/glide + glide install + + +.PHONY: test +test: + go test -cover -race ./... + + +.PHONY: install_ci +install_ci: install + go get github.com/wadey/gocovmerge + go get github.com/mattn/goveralls + go get golang.org/x/tools/cmd/cover + +.PHONY: install_lint +install_lint: + go get golang.org/x/lint/golint + + +.PHONY: lint +lint: + @rm -rf lint.log + @echo "Checking formatting..." + @gofmt -d -s $(PACKAGE_FILES) 2>&1 | tee lint.log + @echo "Checking vet..." + @go vet ./... 2>&1 | tee -a lint.log;) + @echo "Checking lint..." + @golint $$(go list ./...) 2>&1 | tee -a lint.log + @echo "Checking for unresolved FIXMEs..." + @git grep -i fixme | grep -v -e vendor -e Makefile | tee -a lint.log + @[ ! -s lint.log ] + + +.PHONY: test_ci +test_ci: install_ci build + ./scripts/cover.sh $(shell go list $(PACKAGES)) diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md new file mode 100644 index 000000000..62eb8e576 --- /dev/null +++ b/vendor/go.uber.org/atomic/README.md @@ -0,0 +1,36 @@ +# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard] + +Simple wrappers for primitive types to enforce atomic access. + +## Installation +`go get -u go.uber.org/atomic` + +## Usage +The standard library's `sync/atomic` is powerful, but it's easy to forget which +variables must be accessed atomically. `go.uber.org/atomic` preserves all the +functionality of the standard library, but wraps the primitive types to +provide a safer, more convenient API. + +```go +var atom atomic.Uint32 +atom.Store(42) +atom.Sub(2) +atom.CAS(40, 11) +``` + +See the [documentation][doc] for a complete API specification. + +## Development Status +Stable. + +___ +Released under the [MIT License](LICENSE.txt). + +[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg +[doc]: https://godoc.org/go.uber.org/atomic +[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master +[ci]: https://travis-ci.com/uber-go/atomic +[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/atomic +[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic +[reportcard]: https://goreportcard.com/report/go.uber.org/atomic diff --git a/vendor/go.uber.org/atomic/atomic.go b/vendor/go.uber.org/atomic/atomic.go new file mode 100644 index 000000000..1db6849fc --- /dev/null +++ b/vendor/go.uber.org/atomic/atomic.go @@ -0,0 +1,351 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package atomic provides simple wrappers around numerics to enforce atomic +// access. +package atomic + +import ( + "math" + "sync/atomic" + "time" +) + +// Int32 is an atomic wrapper around an int32. +type Int32 struct{ v int32 } + +// NewInt32 creates an Int32. +func NewInt32(i int32) *Int32 { + return &Int32{i} +} + +// Load atomically loads the wrapped value. +func (i *Int32) Load() int32 { + return atomic.LoadInt32(&i.v) +} + +// Add atomically adds to the wrapped int32 and returns the new value. +func (i *Int32) Add(n int32) int32 { + return atomic.AddInt32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int32 and returns the new value. +func (i *Int32) Sub(n int32) int32 { + return atomic.AddInt32(&i.v, -n) +} + +// Inc atomically increments the wrapped int32 and returns the new value. +func (i *Int32) Inc() int32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Int32) Dec() int32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int32) CAS(old, new int32) bool { + return atomic.CompareAndSwapInt32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int32) Store(n int32) { + atomic.StoreInt32(&i.v, n) +} + +// Swap atomically swaps the wrapped int32 and returns the old value. +func (i *Int32) Swap(n int32) int32 { + return atomic.SwapInt32(&i.v, n) +} + +// Int64 is an atomic wrapper around an int64. +type Int64 struct{ v int64 } + +// NewInt64 creates an Int64. +func NewInt64(i int64) *Int64 { + return &Int64{i} +} + +// Load atomically loads the wrapped value. +func (i *Int64) Load() int64 { + return atomic.LoadInt64(&i.v) +} + +// Add atomically adds to the wrapped int64 and returns the new value. +func (i *Int64) Add(n int64) int64 { + return atomic.AddInt64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int64 and returns the new value. +func (i *Int64) Sub(n int64) int64 { + return atomic.AddInt64(&i.v, -n) +} + +// Inc atomically increments the wrapped int64 and returns the new value. +func (i *Int64) Inc() int64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int64 and returns the new value. +func (i *Int64) Dec() int64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int64) CAS(old, new int64) bool { + return atomic.CompareAndSwapInt64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int64) Store(n int64) { + atomic.StoreInt64(&i.v, n) +} + +// Swap atomically swaps the wrapped int64 and returns the old value. +func (i *Int64) Swap(n int64) int64 { + return atomic.SwapInt64(&i.v, n) +} + +// Uint32 is an atomic wrapper around an uint32. +type Uint32 struct{ v uint32 } + +// NewUint32 creates a Uint32. +func NewUint32(i uint32) *Uint32 { + return &Uint32{i} +} + +// Load atomically loads the wrapped value. +func (i *Uint32) Load() uint32 { + return atomic.LoadUint32(&i.v) +} + +// Add atomically adds to the wrapped uint32 and returns the new value. +func (i *Uint32) Add(n uint32) uint32 { + return atomic.AddUint32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint32 and returns the new value. +func (i *Uint32) Sub(n uint32) uint32 { + return atomic.AddUint32(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint32 and returns the new value. +func (i *Uint32) Inc() uint32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Uint32) Dec() uint32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint32) CAS(old, new uint32) bool { + return atomic.CompareAndSwapUint32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint32) Store(n uint32) { + atomic.StoreUint32(&i.v, n) +} + +// Swap atomically swaps the wrapped uint32 and returns the old value. +func (i *Uint32) Swap(n uint32) uint32 { + return atomic.SwapUint32(&i.v, n) +} + +// Uint64 is an atomic wrapper around a uint64. +type Uint64 struct{ v uint64 } + +// NewUint64 creates a Uint64. +func NewUint64(i uint64) *Uint64 { + return &Uint64{i} +} + +// Load atomically loads the wrapped value. +func (i *Uint64) Load() uint64 { + return atomic.LoadUint64(&i.v) +} + +// Add atomically adds to the wrapped uint64 and returns the new value. +func (i *Uint64) Add(n uint64) uint64 { + return atomic.AddUint64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint64 and returns the new value. +func (i *Uint64) Sub(n uint64) uint64 { + return atomic.AddUint64(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint64 and returns the new value. +func (i *Uint64) Inc() uint64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint64 and returns the new value. +func (i *Uint64) Dec() uint64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint64) CAS(old, new uint64) bool { + return atomic.CompareAndSwapUint64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint64) Store(n uint64) { + atomic.StoreUint64(&i.v, n) +} + +// Swap atomically swaps the wrapped uint64 and returns the old value. +func (i *Uint64) Swap(n uint64) uint64 { + return atomic.SwapUint64(&i.v, n) +} + +// Bool is an atomic Boolean. +type Bool struct{ v uint32 } + +// NewBool creates a Bool. +func NewBool(initial bool) *Bool { + return &Bool{boolToInt(initial)} +} + +// Load atomically loads the Boolean. +func (b *Bool) Load() bool { + return truthy(atomic.LoadUint32(&b.v)) +} + +// CAS is an atomic compare-and-swap. +func (b *Bool) CAS(old, new bool) bool { + return atomic.CompareAndSwapUint32(&b.v, boolToInt(old), boolToInt(new)) +} + +// Store atomically stores the passed value. +func (b *Bool) Store(new bool) { + atomic.StoreUint32(&b.v, boolToInt(new)) +} + +// Swap sets the given value and returns the previous value. +func (b *Bool) Swap(new bool) bool { + return truthy(atomic.SwapUint32(&b.v, boolToInt(new))) +} + +// Toggle atomically negates the Boolean and returns the previous value. +func (b *Bool) Toggle() bool { + return truthy(atomic.AddUint32(&b.v, 1) - 1) +} + +func truthy(n uint32) bool { + return n&1 == 1 +} + +func boolToInt(b bool) uint32 { + if b { + return 1 + } + return 0 +} + +// Float64 is an atomic wrapper around float64. +type Float64 struct { + v uint64 +} + +// NewFloat64 creates a Float64. +func NewFloat64(f float64) *Float64 { + return &Float64{math.Float64bits(f)} +} + +// Load atomically loads the wrapped value. +func (f *Float64) Load() float64 { + return math.Float64frombits(atomic.LoadUint64(&f.v)) +} + +// Store atomically stores the passed value. +func (f *Float64) Store(s float64) { + atomic.StoreUint64(&f.v, math.Float64bits(s)) +} + +// Add atomically adds to the wrapped float64 and returns the new value. +func (f *Float64) Add(s float64) float64 { + for { + old := f.Load() + new := old + s + if f.CAS(old, new) { + return new + } + } +} + +// Sub atomically subtracts from the wrapped float64 and returns the new value. +func (f *Float64) Sub(s float64) float64 { + return f.Add(-s) +} + +// CAS is an atomic compare-and-swap. +func (f *Float64) CAS(old, new float64) bool { + return atomic.CompareAndSwapUint64(&f.v, math.Float64bits(old), math.Float64bits(new)) +} + +// Duration is an atomic wrapper around time.Duration +// https://godoc.org/time#Duration +type Duration struct { + v Int64 +} + +// NewDuration creates a Duration. +func NewDuration(d time.Duration) *Duration { + return &Duration{v: *NewInt64(int64(d))} +} + +// Load atomically loads the wrapped value. +func (d *Duration) Load() time.Duration { + return time.Duration(d.v.Load()) +} + +// Store atomically stores the passed value. +func (d *Duration) Store(n time.Duration) { + d.v.Store(int64(n)) +} + +// Add atomically adds to the wrapped time.Duration and returns the new value. +func (d *Duration) Add(n time.Duration) time.Duration { + return time.Duration(d.v.Add(int64(n))) +} + +// Sub atomically subtracts from the wrapped time.Duration and returns the new value. +func (d *Duration) Sub(n time.Duration) time.Duration { + return time.Duration(d.v.Sub(int64(n))) +} + +// Swap atomically swaps the wrapped time.Duration and returns the old value. +func (d *Duration) Swap(n time.Duration) time.Duration { + return time.Duration(d.v.Swap(int64(n))) +} + +// CAS is an atomic compare-and-swap. +func (d *Duration) CAS(old, new time.Duration) bool { + return d.v.CAS(int64(old), int64(new)) +} + +// Value shadows the type of the same name from sync/atomic +// https://godoc.org/sync/atomic#Value +type Value struct{ atomic.Value } diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go new file mode 100644 index 000000000..0489d19ba --- /dev/null +++ b/vendor/go.uber.org/atomic/error.go @@ -0,0 +1,55 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// Error is an atomic type-safe wrapper around Value for errors +type Error struct{ v Value } + +// errorHolder is non-nil holder for error object. +// atomic.Value panics on saving nil object, so err object needs to be +// wrapped with valid object first. +type errorHolder struct{ err error } + +// NewError creates new atomic error object +func NewError(err error) *Error { + e := &Error{} + if err != nil { + e.Store(err) + } + return e +} + +// Load atomically loads the wrapped error +func (e *Error) Load() error { + v := e.v.Load() + if v == nil { + return nil + } + + eh := v.(errorHolder) + return eh.err +} + +// Store atomically stores error. +// NOTE: a holder object is allocated on each Store call. +func (e *Error) Store(err error) { + e.v.Store(errorHolder{err: err}) +} diff --git a/vendor/go.uber.org/atomic/glide.lock b/vendor/go.uber.org/atomic/glide.lock new file mode 100644 index 000000000..3c72c5997 --- /dev/null +++ b/vendor/go.uber.org/atomic/glide.lock @@ -0,0 +1,17 @@ +hash: f14d51408e3e0e4f73b34e4039484c78059cd7fc5f4996fdd73db20dc8d24f53 +updated: 2016-10-27T00:10:51.16960137-07:00 +imports: [] +testImports: +- name: github.com/davecgh/go-spew + version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d + subpackages: + - spew +- name: github.com/pmezard/go-difflib + version: d8ed2627bdf02c080bf22230dbb337003b7aba2d + subpackages: + - difflib +- name: github.com/stretchr/testify + version: d77da356e56a7428ad25149ca77381849a6a5232 + subpackages: + - assert + - require diff --git a/vendor/go.uber.org/atomic/glide.yaml b/vendor/go.uber.org/atomic/glide.yaml new file mode 100644 index 000000000..4cf608ec0 --- /dev/null +++ b/vendor/go.uber.org/atomic/glide.yaml @@ -0,0 +1,6 @@ +package: go.uber.org/atomic +testImport: +- package: github.com/stretchr/testify + subpackages: + - assert + - require diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go new file mode 100644 index 000000000..ede8136fa --- /dev/null +++ b/vendor/go.uber.org/atomic/string.go @@ -0,0 +1,49 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// String is an atomic type-safe wrapper around Value for strings. +type String struct{ v Value } + +// NewString creates a String. +func NewString(str string) *String { + s := &String{} + if str != "" { + s.Store(str) + } + return s +} + +// Load atomically loads the wrapped string. +func (s *String) Load() string { + v := s.v.Load() + if v == nil { + return "" + } + return v.(string) +} + +// Store atomically stores the passed string. +// Note: Converting the string to an interface{} to store in the Value +// requires an allocation. +func (s *String) Store(str string) { + s.v.Store(str) +} diff --git a/vendor/go.uber.org/multierr/.codecov.yml b/vendor/go.uber.org/multierr/.codecov.yml new file mode 100644 index 000000000..6d4d1be7b --- /dev/null +++ b/vendor/go.uber.org/multierr/.codecov.yml @@ -0,0 +1,15 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 100 # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure + diff --git a/vendor/go.uber.org/multierr/.gitignore b/vendor/go.uber.org/multierr/.gitignore new file mode 100644 index 000000000..61ead8666 --- /dev/null +++ b/vendor/go.uber.org/multierr/.gitignore @@ -0,0 +1 @@ +/vendor diff --git a/vendor/go.uber.org/multierr/.travis.yml b/vendor/go.uber.org/multierr/.travis.yml new file mode 100644 index 000000000..5ffa8fed4 --- /dev/null +++ b/vendor/go.uber.org/multierr/.travis.yml @@ -0,0 +1,33 @@ +sudo: false +language: go +go_import_path: go.uber.org/multierr + +env: + global: + - GO15VENDOREXPERIMENT=1 + +go: + - 1.7 + - 1.8 + - tip + +cache: + directories: + - vendor + +before_install: +- go version + +install: +- | + set -e + make install_ci + +script: +- | + set -e + make lint + make test_ci + +after_success: +- bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md new file mode 100644 index 000000000..898445d06 --- /dev/null +++ b/vendor/go.uber.org/multierr/CHANGELOG.md @@ -0,0 +1,28 @@ +Releases +======== + +v1.1.0 (2017-06-30) +=================== + +- Added an `Errors(error) []error` function to extract the underlying list of + errors for a multierr error. + + +v1.0.0 (2017-05-31) +=================== + +No changes since v0.2.0. This release is committing to making no breaking +changes to the current API in the 1.X series. + + +v0.2.0 (2017-04-11) +=================== + +- Repeatedly appending to the same error is now faster due to fewer + allocations. + + +v0.1.0 (2017-31-03) +=================== + +- Initial release diff --git a/vendor/go.uber.org/multierr/LICENSE.txt b/vendor/go.uber.org/multierr/LICENSE.txt new file mode 100644 index 000000000..858e02475 --- /dev/null +++ b/vendor/go.uber.org/multierr/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile new file mode 100644 index 000000000..a7437d061 --- /dev/null +++ b/vendor/go.uber.org/multierr/Makefile @@ -0,0 +1,74 @@ +export GO15VENDOREXPERIMENT=1 + +PACKAGES := $(shell glide nv) + +GO_FILES := $(shell \ + find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ + -o -name '*.go' -print | cut -b3-) + +.PHONY: install +install: + glide --version || go get github.com/Masterminds/glide + glide install + +.PHONY: build +build: + go build -i $(PACKAGES) + +.PHONY: test +test: + go test -cover -race $(PACKAGES) + +.PHONY: gofmt +gofmt: + $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) + @gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true + @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" | cat - $(FMT_LOG) && false) + +.PHONY: govet +govet: + $(eval VET_LOG := $(shell mktemp -t govet.XXXXX)) + @go vet $(PACKAGES) 2>&1 \ + | grep -v '^exit status' > $(VET_LOG) || true + @[ ! -s "$(VET_LOG)" ] || (echo "govet failed:" | cat - $(VET_LOG) && false) + +.PHONY: golint +golint: + @go get github.com/golang/lint/golint + $(eval LINT_LOG := $(shell mktemp -t golint.XXXXX)) + @cat /dev/null > $(LINT_LOG) + @$(foreach pkg, $(PACKAGES), golint $(pkg) >> $(LINT_LOG) || true;) + @[ ! -s "$(LINT_LOG)" ] || (echo "golint failed:" | cat - $(LINT_LOG) && false) + +.PHONY: staticcheck +staticcheck: + @go get honnef.co/go/tools/cmd/staticcheck + $(eval STATICCHECK_LOG := $(shell mktemp -t staticcheck.XXXXX)) + @staticcheck $(PACKAGES) 2>&1 > $(STATICCHECK_LOG) || true + @[ ! -s "$(STATICCHECK_LOG)" ] || (echo "staticcheck failed:" | cat - $(STATICCHECK_LOG) && false) + +.PHONY: lint +lint: gofmt govet golint staticcheck + +.PHONY: cover +cover: + ./scripts/cover.sh $(shell go list $(PACKAGES)) + go tool cover -html=cover.out -o cover.html + +update-license: + @go get go.uber.org/tools/update-license + @update-license \ + $(shell go list -json $(PACKAGES) | \ + jq -r '.Dir + "/" + (.GoFiles | .[])') + +############################################################################## + +.PHONY: install_ci +install_ci: install + go get github.com/wadey/gocovmerge + go get github.com/mattn/goveralls + go get golang.org/x/tools/cmd/cover + +.PHONY: test_ci +test_ci: install_ci + ./scripts/cover.sh $(shell go list $(PACKAGES)) diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md new file mode 100644 index 000000000..065088f64 --- /dev/null +++ b/vendor/go.uber.org/multierr/README.md @@ -0,0 +1,23 @@ +# multierr [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +`multierr` allows combining one or more Go `error`s together. + +## Installation + + go get -u go.uber.org/multierr + +## Status + +Stable: No breaking changes will be made before 2.0. + +------------------------------------------------------------------------------- + +Released under the [MIT License]. + +[MIT License]: LICENSE.txt +[doc-img]: https://godoc.org/go.uber.org/multierr?status.svg +[doc]: https://godoc.org/go.uber.org/multierr +[ci-img]: https://travis-ci.org/uber-go/multierr.svg?branch=master +[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg +[ci]: https://travis-ci.org/uber-go/multierr +[cov]: https://codecov.io/gh/uber-go/multierr diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go new file mode 100644 index 000000000..de6ce4736 --- /dev/null +++ b/vendor/go.uber.org/multierr/error.go @@ -0,0 +1,401 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package multierr allows combining one or more errors together. +// +// Overview +// +// Errors can be combined with the use of the Combine function. +// +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// conn.Close(), +// ) +// +// If only two errors are being combined, the Append function may be used +// instead. +// +// err = multierr.Combine(reader.Close(), writer.Close()) +// +// This makes it possible to record resource cleanup failures from deferred +// blocks with the help of named return values. +// +// func sendRequest(req Request) (err error) { +// conn, err := openConnection() +// if err != nil { +// return err +// } +// defer func() { +// err = multierr.Append(err, conn.Close()) +// }() +// // ... +// } +// +// The underlying list of errors for a returned error object may be retrieved +// with the Errors function. +// +// errors := multierr.Errors(err) +// if len(errors) > 0 { +// fmt.Println("The following errors occurred:") +// } +// +// Advanced Usage +// +// Errors returned by Combine and Append MAY implement the following +// interface. +// +// type errorGroup interface { +// // Returns a slice containing the underlying list of errors. +// // +// // This slice MUST NOT be modified by the caller. +// Errors() []error +// } +// +// Note that if you need access to list of errors behind a multierr error, you +// should prefer using the Errors function. That said, if you need cheap +// read-only access to the underlying errors slice, you can attempt to cast +// the error to this interface. You MUST handle the failure case gracefully +// because errors returned by Combine and Append are not guaranteed to +// implement this interface. +// +// var errors []error +// group, ok := err.(errorGroup) +// if ok { +// errors = group.Errors() +// } else { +// errors = []error{err} +// } +package multierr // import "go.uber.org/multierr" + +import ( + "bytes" + "fmt" + "io" + "strings" + "sync" + + "go.uber.org/atomic" +) + +var ( + // Separator for single-line error messages. + _singlelineSeparator = []byte("; ") + + _newline = []byte("\n") + + // Prefix for multi-line messages + _multilinePrefix = []byte("the following errors occurred:") + + // Prefix for the first and following lines of an item in a list of + // multi-line error messages. + // + // For example, if a single item is: + // + // foo + // bar + // + // It will become, + // + // - foo + // bar + _multilineSeparator = []byte("\n - ") + _multilineIndent = []byte(" ") +) + +// _bufferPool is a pool of bytes.Buffers. +var _bufferPool = sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, +} + +type errorGroup interface { + Errors() []error +} + +// Errors returns a slice containing zero or more errors that the supplied +// error is composed of. If the error is nil, the returned slice is empty. +// +// err := multierr.Append(r.Close(), w.Close()) +// errors := multierr.Errors(err) +// +// If the error is not composed of other errors, the returned slice contains +// just the error that was passed in. +// +// Callers of this function are free to modify the returned slice. +func Errors(err error) []error { + if err == nil { + return nil + } + + // Note that we're casting to multiError, not errorGroup. Our contract is + // that returned errors MAY implement errorGroup. Errors, however, only + // has special behavior for multierr-specific error objects. + // + // This behavior can be expanded in the future but I think it's prudent to + // start with as little as possible in terms of contract and possibility + // of misuse. + eg, ok := err.(*multiError) + if !ok { + return []error{err} + } + + errors := eg.Errors() + result := make([]error, len(errors)) + copy(result, errors) + return result +} + +// multiError is an error that holds one or more errors. +// +// An instance of this is guaranteed to be non-empty and flattened. That is, +// none of the errors inside multiError are other multiErrors. +// +// multiError formats to a semi-colon delimited list of error messages with +// %v and with a more readable multi-line format with %+v. +type multiError struct { + copyNeeded atomic.Bool + errors []error +} + +var _ errorGroup = (*multiError)(nil) + +// Errors returns the list of underlying errors. +// +// This slice MUST NOT be modified. +func (merr *multiError) Errors() []error { + if merr == nil { + return nil + } + return merr.errors +} + +func (merr *multiError) Error() string { + if merr == nil { + return "" + } + + buff := _bufferPool.Get().(*bytes.Buffer) + buff.Reset() + + merr.writeSingleline(buff) + + result := buff.String() + _bufferPool.Put(buff) + return result +} + +func (merr *multiError) Format(f fmt.State, c rune) { + if c == 'v' && f.Flag('+') { + merr.writeMultiline(f) + } else { + merr.writeSingleline(f) + } +} + +func (merr *multiError) writeSingleline(w io.Writer) { + first := true + for _, item := range merr.errors { + if first { + first = false + } else { + w.Write(_singlelineSeparator) + } + io.WriteString(w, item.Error()) + } +} + +func (merr *multiError) writeMultiline(w io.Writer) { + w.Write(_multilinePrefix) + for _, item := range merr.errors { + w.Write(_multilineSeparator) + writePrefixLine(w, _multilineIndent, fmt.Sprintf("%+v", item)) + } +} + +// Writes s to the writer with the given prefix added before each line after +// the first. +func writePrefixLine(w io.Writer, prefix []byte, s string) { + first := true + for len(s) > 0 { + if first { + first = false + } else { + w.Write(prefix) + } + + idx := strings.IndexByte(s, '\n') + if idx < 0 { + idx = len(s) - 1 + } + + io.WriteString(w, s[:idx+1]) + s = s[idx+1:] + } +} + +type inspectResult struct { + // Number of top-level non-nil errors + Count int + + // Total number of errors including multiErrors + Capacity int + + // Index of the first non-nil error in the list. Value is meaningless if + // Count is zero. + FirstErrorIdx int + + // Whether the list contains at least one multiError + ContainsMultiError bool +} + +// Inspects the given slice of errors so that we can efficiently allocate +// space for it. +func inspect(errors []error) (res inspectResult) { + first := true + for i, err := range errors { + if err == nil { + continue + } + + res.Count++ + if first { + first = false + res.FirstErrorIdx = i + } + + if merr, ok := err.(*multiError); ok { + res.Capacity += len(merr.errors) + res.ContainsMultiError = true + } else { + res.Capacity++ + } + } + return +} + +// fromSlice converts the given list of errors into a single error. +func fromSlice(errors []error) error { + res := inspect(errors) + switch res.Count { + case 0: + return nil + case 1: + // only one non-nil entry + return errors[res.FirstErrorIdx] + case len(errors): + if !res.ContainsMultiError { + // already flat + return &multiError{errors: errors} + } + } + + nonNilErrs := make([]error, 0, res.Capacity) + for _, err := range errors[res.FirstErrorIdx:] { + if err == nil { + continue + } + + if nested, ok := err.(*multiError); ok { + nonNilErrs = append(nonNilErrs, nested.errors...) + } else { + nonNilErrs = append(nonNilErrs, err) + } + } + + return &multiError{errors: nonNilErrs} +} + +// Combine combines the passed errors into a single error. +// +// If zero arguments were passed or if all items are nil, a nil error is +// returned. +// +// Combine(nil, nil) // == nil +// +// If only a single error was passed, it is returned as-is. +// +// Combine(err) // == err +// +// Combine skips over nil arguments so this function may be used to combine +// together errors from operations that fail independently of each other. +// +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// pipe.Close(), +// ) +// +// If any of the passed errors is a multierr error, it will be flattened along +// with the other errors. +// +// multierr.Combine(multierr.Combine(err1, err2), err3) +// // is the same as +// multierr.Combine(err1, err2, err3) +// +// The returned error formats into a readable multi-line error message if +// formatted with %+v. +// +// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) +func Combine(errors ...error) error { + return fromSlice(errors) +} + +// Append appends the given errors together. Either value may be nil. +// +// This function is a specialization of Combine for the common case where +// there are only two errors. +// +// err = multierr.Append(reader.Close(), writer.Close()) +// +// The following pattern may also be used to record failure of deferred +// operations without losing information about the original error. +// +// func doSomething(..) (err error) { +// f := acquireResource() +// defer func() { +// err = multierr.Append(err, f.Close()) +// }() +func Append(left error, right error) error { + switch { + case left == nil: + return right + case right == nil: + return left + } + + if _, ok := right.(*multiError); !ok { + if l, ok := left.(*multiError); ok && !l.copyNeeded.Swap(true) { + // Common case where the error on the left is constantly being + // appended to. + errs := append(l.errors, right) + return &multiError{errors: errs} + } else if !ok { + // Both errors are single errors. + return &multiError{errors: []error{left, right}} + } + } + + // Either right or both, left and right, are multiErrors. Rely on usual + // expensive logic. + errors := [2]error{left, right} + return fromSlice(errors[0:]) +} diff --git a/vendor/go.uber.org/multierr/glide.lock b/vendor/go.uber.org/multierr/glide.lock new file mode 100644 index 000000000..f9ea94c33 --- /dev/null +++ b/vendor/go.uber.org/multierr/glide.lock @@ -0,0 +1,19 @@ +hash: b53b5e9a84b9cb3cc4b2d0499e23da2feca1eec318ce9bb717ecf35bf24bf221 +updated: 2017-04-10T13:34:45.671678062-07:00 +imports: +- name: go.uber.org/atomic + version: 3b8db5e93c4c02efbc313e17b2e796b0914a01fb +testImports: +- name: github.com/davecgh/go-spew + version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9 + subpackages: + - spew +- name: github.com/pmezard/go-difflib + version: d8ed2627bdf02c080bf22230dbb337003b7aba2d + subpackages: + - difflib +- name: github.com/stretchr/testify + version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0 + subpackages: + - assert + - require diff --git a/vendor/go.uber.org/multierr/glide.yaml b/vendor/go.uber.org/multierr/glide.yaml new file mode 100644 index 000000000..6ef084ec2 --- /dev/null +++ b/vendor/go.uber.org/multierr/glide.yaml @@ -0,0 +1,8 @@ +package: go.uber.org/multierr +import: +- package: go.uber.org/atomic + version: ^1 +testImport: +- package: github.com/stretchr/testify + subpackages: + - assert diff --git a/vendor/go.uber.org/zap/.codecov.yml b/vendor/go.uber.org/zap/.codecov.yml new file mode 100644 index 000000000..8e5ca7d3e --- /dev/null +++ b/vendor/go.uber.org/zap/.codecov.yml @@ -0,0 +1,17 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 95% # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure +ignore: + - internal/readme/readme.go + diff --git a/vendor/go.uber.org/zap/.gitignore b/vendor/go.uber.org/zap/.gitignore new file mode 100644 index 000000000..08fbde6ce --- /dev/null +++ b/vendor/go.uber.org/zap/.gitignore @@ -0,0 +1,28 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +vendor + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.pprof +*.out +*.log diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl new file mode 100644 index 000000000..c6440db8e --- /dev/null +++ b/vendor/go.uber.org/zap/.readme.tmpl @@ -0,0 +1,108 @@ +# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +Blazing fast, structured, leveled logging in Go. + +## Installation + +`go get -u go.uber.org/zap` + +Note that zap only supports the two most recent minor versions of Go. + +## Quick Start + +In contexts where performance is nice, but not critical, use the +`SugaredLogger`. It's 4-10x faster than other structured logging +packages and includes both structured and `printf`-style APIs. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() // flushes buffer, if any +sugar := logger.Sugar() +sugar.Infow("failed to fetch URL", + // Structured context as loosely typed key-value pairs. + "url", url, + "attempt", 3, + "backoff", time.Second, +) +sugar.Infof("Failed to fetch URL: %s", url) +``` + +When performance and type safety are critical, use the `Logger`. It's even +faster than the `SugaredLogger` and allocates far less, but it only supports +structured logging. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() +logger.Info("failed to fetch URL", + // Structured context as strongly typed Field values. + zap.String("url", url), + zap.Int("attempt", 3), + zap.Duration("backoff", time.Second), +) +``` + +See the [documentation][doc] and [FAQ](FAQ.md) for more details. + +## Performance + +For applications that log in the hot path, reflection-based serialization and +string formatting are prohibitively expensive — they're CPU-intensive +and make many small allocations. Put differently, using `encoding/json` and +`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. + +Zap takes a different approach. It includes a reflection-free, zero-allocation +JSON encoder, and the base `Logger` strives to avoid serialization overhead +and allocations wherever possible. By building the high-level `SugaredLogger` +on that foundation, zap lets users *choose* when they need to count every +allocation and when they'd prefer a more familiar, loosely typed API. + +As measured by its own [benchmarking suite][], not only is zap more performant +than comparable structured logging packages — it's also faster than the +standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) + +Log a message and 10 fields: + +{{.BenchmarkAddingFields}} + +Log a message with a logger that already has 10 fields of context: + +{{.BenchmarkAccumulatedContext}} + +Log a static string, without any context or `printf`-style templating: + +{{.BenchmarkWithoutFields}} + +## Development Status: Stable + +All APIs are finalized, and no breaking changes will be made in the 1.x series +of releases. Users of semver-aware dependency management systems should pin +zap to `^1`. + +## Contributing + +We encourage and support an active, healthy community of contributors — +including you! Details are in the [contribution guide](CONTRIBUTING.md) and +the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on +issues and pull requests, but you can also report any negative conduct to +oss-conduct@uber.com. That email list is a private, safe space; even the zap +maintainers don't have access, so don't hesitate to hold us to a high +standard. + +
+ +Released under the [MIT License](LICENSE.txt). + +1 In particular, keep in mind that we may be +benchmarking against slightly older versions of other packages. Versions are +pinned in zap's [glide.lock][] file. [↩](#anchor-versions) + +[doc-img]: https://godoc.org/go.uber.org/zap?status.svg +[doc]: https://godoc.org/go.uber.org/zap +[ci-img]: https://travis-ci.org/uber-go/zap.svg?branch=master +[ci]: https://travis-ci.org/uber-go/zap +[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/zap +[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks +[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock diff --git a/vendor/go.uber.org/zap/.travis.yml b/vendor/go.uber.org/zap/.travis.yml new file mode 100644 index 000000000..ada5ebdcc --- /dev/null +++ b/vendor/go.uber.org/zap/.travis.yml @@ -0,0 +1,21 @@ +language: go +sudo: false +go: + - 1.11.x + - 1.12.x +go_import_path: go.uber.org/zap +env: + global: + - TEST_TIMEOUT_SCALE=10 +cache: + directories: + - vendor +install: + - make dependencies +script: + - make lint + - make test + - make bench +after_success: + - make cover + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md new file mode 100644 index 000000000..28d10677e --- /dev/null +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -0,0 +1,327 @@ +# Changelog + +## 1.10.0 (29 Apr 2019) + +Bugfixes: +* [#657][]: Fix `MapObjectEncoder.AppendByteString` not adding value as a + string. +* [#706][]: Fix incorrect call depth to determine caller in Go 1.12. + +Enhancements: +* [#610][]: Add `zaptest.WrapOptions` to wrap `zap.Option` for creating test + loggers. +* [#675][]: Don't panic when encoding a String field. +* [#704][]: Disable HTML escaping for JSON objects encoded using the + reflect-based encoder. + +Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions +to this release. + +## v1.9.1 (06 Aug 2018) + +Bugfixes: + +* [#614][]: MapObjectEncoder should not ignore empty slices. + +## v1.9.0 (19 Jul 2018) + +Enhancements: +* [#602][]: Reduce number of allocations when logging with reflection. +* [#572][], [#606][]: Expose a registry for third-party logging sinks. + +Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and +@dimroc for their contributions to this release. + +## v1.8.0 (13 Apr 2018) + +Enhancements: +* [#508][]: Make log level configurable when redirecting the standard + library's logger. +* [#518][]: Add a logger that writes to a `*testing.TB`. +* [#577][]: Add a top-level alias for `zapcore.Field` to clean up GoDoc. + +Bugfixes: +* [#574][]: Add a missing import comment to `go.uber.org/zap/buffer`. + +Thanks to @DiSiqueira and @djui for their contributions to this release. + +## v1.7.1 (25 Sep 2017) + +Bugfixes: +* [#504][]: Store strings when using AddByteString with the map encoder. + +## v1.7.0 (21 Sep 2017) + +Enhancements: + +* [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user + to specify the level of the logged messages. + +## v1.6.0 (30 Aug 2017) + +Enhancements: + +* [#491][]: Omit zap stack frames from stacktraces. +* [#490][]: Add a `ContextMap` method to observer logs for simpler + field validation in tests. + +## v1.5.0 (22 Jul 2017) + +Enhancements: + +* [#460][] and [#470][]: Support errors produced by `go.uber.org/multierr`. +* [#465][]: Support user-supplied encoders for logger names. + +Bugfixes: + +* [#477][]: Fix a bug that incorrectly truncated deep stacktraces. + +Thanks to @richard-tunein and @pavius for their contributions to this release. + +## v1.4.1 (08 Jun 2017) + +This release fixes two bugs. + +Bugfixes: + +* [#435][]: Support a variety of case conventions when unmarshaling levels. +* [#444][]: Fix a panic in the observer. + +## v1.4.0 (12 May 2017) + +This release adds a few small features and is fully backward-compatible. + +Enhancements: + +* [#424][]: Add a `LineEnding` field to `EncoderConfig`, allowing users to + override the Unix-style default. +* [#425][]: Preserve time zones when logging times. +* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a + variety of operations a bit simpler. + +## v1.3.0 (25 Apr 2017) + +This release adds an enhancement to zap's testing helpers as well as the +ability to marshal an AtomicLevel. It is fully backward-compatible. + +Enhancements: + +* [#415][]: Add a substring-filtering helper to zap's observer. This is + particularly useful when testing the `SugaredLogger`. +* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`. + +## v1.2.0 (13 Apr 2017) + +This release adds a gRPC compatibility wrapper. It is fully backward-compatible. + +Enhancements: + +* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements + `grpclog.Logger`. + +## v1.1.0 (31 Mar 2017) + +This release fixes two bugs and adds some enhancements to zap's testing helpers. +It is fully backward-compatible. + +Bugfixes: + +* [#385][]: Fix caller path trimming on Windows. +* [#396][]: Fix a panic when attempting to use non-existent directories with + zap's configuration struct. + +Enhancements: + +* [#386][]: Add filtering helpers to zaptest's observing logger. + +Thanks to @moitias for contributing to this release. + +## v1.0.0 (14 Mar 2017) + +This is zap's first stable release. All exported APIs are now final, and no +further breaking changes will be made in the 1.x release series. Anyone using a +semver-aware dependency manager should now pin to `^1`. + +Breaking changes: + +* [#366][]: Add byte-oriented APIs to encoders to log UTF-8 encoded text without + casting from `[]byte` to `string`. +* [#364][]: To support buffering outputs, add `Sync` methods to `zapcore.Core`, + `zap.Logger`, and `zap.SugaredLogger`. +* [#371][]: Rename the `testutils` package to `zaptest`, which is less likely to + clash with other testing helpers. + +Bugfixes: + +* [#362][]: Make the ISO8601 time formatters fixed-width, which is friendlier + for tab-separated console output. +* [#369][]: Remove the automatic locks in `zapcore.NewCore`, which allows zap to + work with concurrency-safe `WriteSyncer` implementations. +* [#347][]: Stop reporting errors when trying to `fsync` standard out on Linux + systems. +* [#373][]: Report the correct caller from zap's standard library + interoperability wrappers. + +Enhancements: + +* [#348][]: Add a registry allowing third-party encodings to work with zap's + built-in `Config`. +* [#327][]: Make the representation of logger callers configurable (like times, + levels, and durations). +* [#376][]: Allow third-party encoders to use their own buffer pools, which + removes the last performance advantage that zap's encoders have over plugins. +* [#346][]: Add `CombineWriteSyncers`, a convenience function to tee multiple + `WriteSyncer`s and lock the result. +* [#365][]: Make zap's stacktraces compatible with mid-stack inlining (coming in + Go 1.9). +* [#372][]: Export zap's observing logger as `zaptest/observer`. This makes it + easier for particularly punctilious users to unit test their application's + logging. + +Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their +contributions to this release. + +## v1.0.0-rc.3 (7 Mar 2017) + +This is the third release candidate for zap's stable release. There are no +breaking changes. + +Bugfixes: + +* [#339][]: Byte slices passed to `zap.Any` are now correctly treated as binary blobs + rather than `[]uint8`. + +Enhancements: + +* [#307][]: Users can opt into colored output for log levels. +* [#353][]: In addition to hijacking the output of the standard library's + package-global logging functions, users can now construct a zap-backed + `log.Logger` instance. +* [#311][]: Frames from common runtime functions and some of zap's internal + machinery are now omitted from stacktraces. + +Thanks to @ansel1 and @suyash for their contributions to this release. + +## v1.0.0-rc.2 (21 Feb 2017) + +This is the second release candidate for zap's stable release. It includes two +breaking changes. + +Breaking changes: + +* [#316][]: Zap's global loggers are now fully concurrency-safe + (previously, users had to ensure that `ReplaceGlobals` was called before the + loggers were in use). However, they must now be accessed via the `L()` and + `S()` functions. Users can update their projects with + + ``` + gofmt -r "zap.L -> zap.L()" -w . + gofmt -r "zap.S -> zap.S()" -w . + ``` +* [#309][] and [#317][]: RC1 was mistakenly shipped with invalid + JSON and YAML struct tags on all config structs. This release fixes the tags + and adds static analysis to prevent similar bugs in the future. + +Bugfixes: + +* [#321][]: Redirecting the standard library's `log` output now + correctly reports the logger's caller. + +Enhancements: + +* [#325][] and [#333][]: Zap now transparently supports non-standard, rich + errors like those produced by `github.com/pkg/errors`. +* [#326][]: Though `New(nil)` continues to return a no-op logger, `NewNop()` is + now preferred. Users can update their projects with `gofmt -r 'zap.New(nil) -> + zap.NewNop()' -w .`. +* [#300][]: Incorrectly importing zap as `github.com/uber-go/zap` now returns a + more informative error. + +Thanks to @skipor and @chapsuk for their contributions to this release. + +## v1.0.0-rc.1 (14 Feb 2017) + +This is the first release candidate for zap's stable release. There are multiple +breaking changes and improvements from the pre-release version. Most notably: + +* **Zap's import path is now "go.uber.org/zap"** — all users will + need to update their code. +* User-facing types and functions remain in the `zap` package. Code relevant + largely to extension authors is now in the `zapcore` package. +* The `zapcore.Core` type makes it easy for third-party packages to use zap's + internals but provide a different user-facing API. +* `Logger` is now a concrete type instead of an interface. +* A less verbose (though slower) logging API is included by default. +* Package-global loggers `L` and `S` are included. +* A human-friendly console encoder is included. +* A declarative config struct allows common logger configurations to be managed + as configuration instead of code. +* Sampling is more accurate, and doesn't depend on the standard library's shared + timer heap. + +## v0.1.0-beta.1 (6 Feb 2017) + +This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and +upgrade at their leisure. Since this is the first tagged release, there are no +backward compatibility concerns and all functionality is new. + +Early zap adopters should pin to the 0.1.x minor version until they're ready to +upgrade to the upcoming stable release. + +[#316]: https://github.com/uber-go/zap/pull/316 +[#309]: https://github.com/uber-go/zap/pull/309 +[#317]: https://github.com/uber-go/zap/pull/317 +[#321]: https://github.com/uber-go/zap/pull/321 +[#325]: https://github.com/uber-go/zap/pull/325 +[#333]: https://github.com/uber-go/zap/pull/333 +[#326]: https://github.com/uber-go/zap/pull/326 +[#300]: https://github.com/uber-go/zap/pull/300 +[#339]: https://github.com/uber-go/zap/pull/339 +[#307]: https://github.com/uber-go/zap/pull/307 +[#353]: https://github.com/uber-go/zap/pull/353 +[#311]: https://github.com/uber-go/zap/pull/311 +[#366]: https://github.com/uber-go/zap/pull/366 +[#364]: https://github.com/uber-go/zap/pull/364 +[#371]: https://github.com/uber-go/zap/pull/371 +[#362]: https://github.com/uber-go/zap/pull/362 +[#369]: https://github.com/uber-go/zap/pull/369 +[#347]: https://github.com/uber-go/zap/pull/347 +[#373]: https://github.com/uber-go/zap/pull/373 +[#348]: https://github.com/uber-go/zap/pull/348 +[#327]: https://github.com/uber-go/zap/pull/327 +[#376]: https://github.com/uber-go/zap/pull/376 +[#346]: https://github.com/uber-go/zap/pull/346 +[#365]: https://github.com/uber-go/zap/pull/365 +[#372]: https://github.com/uber-go/zap/pull/372 +[#385]: https://github.com/uber-go/zap/pull/385 +[#396]: https://github.com/uber-go/zap/pull/396 +[#386]: https://github.com/uber-go/zap/pull/386 +[#402]: https://github.com/uber-go/zap/pull/402 +[#415]: https://github.com/uber-go/zap/pull/415 +[#416]: https://github.com/uber-go/zap/pull/416 +[#424]: https://github.com/uber-go/zap/pull/424 +[#425]: https://github.com/uber-go/zap/pull/425 +[#431]: https://github.com/uber-go/zap/pull/431 +[#435]: https://github.com/uber-go/zap/pull/435 +[#444]: https://github.com/uber-go/zap/pull/444 +[#477]: https://github.com/uber-go/zap/pull/477 +[#465]: https://github.com/uber-go/zap/pull/465 +[#460]: https://github.com/uber-go/zap/pull/460 +[#470]: https://github.com/uber-go/zap/pull/470 +[#487]: https://github.com/uber-go/zap/pull/487 +[#490]: https://github.com/uber-go/zap/pull/490 +[#491]: https://github.com/uber-go/zap/pull/491 +[#504]: https://github.com/uber-go/zap/pull/504 +[#508]: https://github.com/uber-go/zap/pull/508 +[#518]: https://github.com/uber-go/zap/pull/518 +[#577]: https://github.com/uber-go/zap/pull/577 +[#574]: https://github.com/uber-go/zap/pull/574 +[#602]: https://github.com/uber-go/zap/pull/602 +[#572]: https://github.com/uber-go/zap/pull/572 +[#606]: https://github.com/uber-go/zap/pull/606 +[#614]: https://github.com/uber-go/zap/pull/614 +[#657]: https://github.com/uber-go/zap/pull/657 +[#706]: https://github.com/uber-go/zap/pull/706 +[#610]: https://github.com/uber-go/zap/pull/610 +[#675]: https://github.com/uber-go/zap/pull/675 +[#704]: https://github.com/uber-go/zap/pull/704 diff --git a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..e327d9aa5 --- /dev/null +++ b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md @@ -0,0 +1,75 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, +body size, disability, ethnicity, gender identity and expression, level of +experience, nationality, personal appearance, race, religion, or sexual +identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an +appointed representative at an online or offline event. Representation of a +project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at oss-conduct@uber.com. The project +team will review and investigate all complaints, and will respond in a way +that it deems appropriate to the circumstances. The project team is obligated +to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 1.4, available at +[http://contributor-covenant.org/version/1/4][version]. + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md new file mode 100644 index 000000000..9454bbaf0 --- /dev/null +++ b/vendor/go.uber.org/zap/CONTRIBUTING.md @@ -0,0 +1,81 @@ +# Contributing + +We'd love your help making zap the very best structured logging library in Go! + +If you'd like to add new exported APIs, please [open an issue][open-issue] +describing your proposal — discussing API changes ahead of time makes +pull request review much smoother. In your issue, pull request, and any other +communications, please remember to treat your fellow contributors with +respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously. + +Note that you'll need to sign [Uber's Contributor License Agreement][cla] +before we can accept any of your contributions. If necessary, a bot will remind +you to accept the CLA when you open your pull request. + +## Setup + +[Fork][fork], then clone the repository: + +``` +mkdir -p $GOPATH/src/go.uber.org +cd $GOPATH/src/go.uber.org +git clone git@github.com:your_github_username/zap.git +cd zap +git remote add upstream https://github.com/uber-go/zap.git +git fetch upstream +``` + +Install zap's dependencies: + +``` +make dependencies +``` + +Make sure that the tests and the linters pass: + +``` +make test +make lint +``` + +If you're not using the minor version of Go specified in the Makefile's +`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is +fine, but it means that you'll only discover lint failures after you open your +pull request. + +## Making Changes + +Start by creating a new branch for your changes: + +``` +cd $GOPATH/src/go.uber.org/zap +git checkout master +git fetch upstream +git rebase upstream/master +git checkout -b cool_new_feature +``` + +Make your changes, then ensure that `make lint` and `make test` still pass. If +you're satisfied with your changes, push them to your fork. + +``` +git push origin cool_new_feature +``` + +Then use the GitHub UI to open a pull request. + +At this point, you're waiting on us to review your changes. We *try* to respond +to issues and pull requests within a few business days, and we may suggest some +improvements or alternatives. Once your changes are approved, one of the +project maintainers will merge them. + +We're much more likely to approve your changes if you: + +* Add tests for new functionality. +* Write a [good commit message][commit-message]. +* Maintain backward compatibility. + +[fork]: https://github.com/uber-go/zap/fork +[open-issue]: https://github.com/uber-go/zap/issues/new +[cla]: https://cla-assistant.io/uber-go/zap +[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html diff --git a/vendor/go.uber.org/zap/FAQ.md b/vendor/go.uber.org/zap/FAQ.md new file mode 100644 index 000000000..4256d35c7 --- /dev/null +++ b/vendor/go.uber.org/zap/FAQ.md @@ -0,0 +1,155 @@ +# Frequently Asked Questions + +## Design + +### Why spend so much effort on logger performance? + +Of course, most applications won't notice the impact of a slow logger: they +already take tens or hundreds of milliseconds for each operation, so an extra +millisecond doesn't matter. + +On the other hand, why *not* make structured logging fast? The `SugaredLogger` +isn't any harder to use than other logging packages, and the `Logger` makes +structured logging possible in performance-sensitive contexts. Across a fleet +of Go microservices, making each application even slightly more efficient adds +up quickly. + +### Why aren't `Logger` and `SugaredLogger` interfaces? + +Unlike the familiar `io.Writer` and `http.Handler`, `Logger` and +`SugaredLogger` interfaces would include *many* methods. As [Rob Pike points +out][go-proverbs], "The bigger the interface, the weaker the abstraction." +Interfaces are also rigid — *any* change requires releasing a new major +version, since it breaks all third-party implementations. + +Making the `Logger` and `SugaredLogger` concrete types doesn't sacrifice much +abstraction, and it lets us add methods without introducing breaking changes. +Your applications should define and depend upon an interface that includes +just the methods you use. + +### Why sample application logs? + +Applications often experience runs of errors, either because of a bug or +because of a misbehaving user. Logging errors is usually a good idea, but it +can easily make this bad situation worse: not only is your application coping +with a flood of errors, it's also spending extra CPU cycles and I/O logging +those errors. Since writes are typically serialized, logging limits throughput +when you need it most. + +Sampling fixes this problem by dropping repetitive log entries. Under normal +conditions, your application writes out every entry. When similar entries are +logged hundreds or thousands of times each second, though, zap begins dropping +duplicates to preserve throughput. + +### Why do the structured logging APIs take a message in addition to fields? + +Subjectively, we find it helpful to accompany structured context with a brief +description. This isn't critical during development, but it makes debugging +and operating unfamiliar systems much easier. + +More concretely, zap's sampling algorithm uses the message to identify +duplicate entries. In our experience, this is a practical middle ground +between random sampling (which often drops the exact entry that you need while +debugging) and hashing the complete entry (which is prohibitively expensive). + +### Why include package-global loggers? + +Since so many other logging packages include a global logger, many +applications aren't designed to accept loggers as explicit parameters. +Changing function signatures is often a breaking change, so zap includes +global loggers to simplify migration. + +Avoid them where possible. + +### Why include dedicated Panic and Fatal log levels? + +In general, application code should handle errors gracefully instead of using +`panic` or `os.Exit`. However, every rule has exceptions, and it's common to +crash when an error is truly unrecoverable. To avoid losing any information +— especially the reason for the crash — the logger must flush any +buffered entries before the process exits. + +Zap makes this easy by offering `Panic` and `Fatal` logging methods that +automatically flush before exiting. Of course, this doesn't guarantee that +logs will never be lost, but it eliminates a common error. + +See the discussion in uber-go/zap#207 for more details. + +### What's `DPanic`? + +`DPanic` stands for "panic in development." In development, it logs at +`PanicLevel`; otherwise, it logs at `ErrorLevel`. `DPanic` makes it easier to +catch errors that are theoretically possible, but shouldn't actually happen, +*without* crashing in production. + +If you've ever written code like this, you need `DPanic`: + +```go +if err != nil { + panic(fmt.Sprintf("shouldn't ever get here: %v", err)) +} +``` + +## Installation + +### What does the error `expects import "go.uber.org/zap"` mean? + +Either zap was installed incorrectly or you're referencing the wrong package +name in your code. + +Zap's source code happens to be hosted on GitHub, but the [import +path][import-path] is `go.uber.org/zap`. This gives us, the project +maintainers, the freedom to move the source code if necessary. However, it +means that you need to take a little care when installing and using the +package. + +If you follow two simple rules, everything should work: install zap with `go +get -u go.uber.org/zap`, and always import it in your code with `import +"go.uber.org/zap"`. Your code shouldn't contain *any* references to +`github.com/uber-go/zap`. + +## Usage + +### Does zap support log rotation? + +Zap doesn't natively support rotating log files, since we prefer to leave this +to an external program like `logrotate`. + +However, it's easy to integrate a log rotation package like +[`gopkg.in/natefinch/lumberjack.v2`][lumberjack] as a `zapcore.WriteSyncer`. + +```go +// lumberjack.Logger is already safe for concurrent use, so we don't need to +// lock it. +w := zapcore.AddSync(&lumberjack.Logger{ + Filename: "/var/log/myapp/foo.log", + MaxSize: 500, // megabytes + MaxBackups: 3, + MaxAge: 28, // days +}) +core := zapcore.NewCore( + zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()), + w, + zap.InfoLevel, +) +logger := zap.New(core) +``` + +## Extensions + +We'd love to support every logging need within zap itself, but we're only +familiar with a handful of log ingestion systems, flag-parsing packages, and +the like. Rather than merging code that we can't effectively debug and +support, we'd rather grow an ecosystem of zap extensions. + +We're aware of the following extensions, but haven't used them ourselves: + +| Package | Integration | +| --- | --- | +| `github.com/tchap/zapext` | Sentry, syslog | +| `github.com/fgrosse/zaptest` | Ginkgo | +| `github.com/blendle/zapdriver` | Stackdriver | + +[go-proverbs]: https://go-proverbs.github.io/ +[import-path]: https://golang.org/cmd/go/#hdr-Remote_import_paths +[lumberjack]: https://godoc.org/gopkg.in/natefinch/lumberjack.v2 diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE.txt new file mode 100644 index 000000000..6652bed45 --- /dev/null +++ b/vendor/go.uber.org/zap/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016-2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile new file mode 100644 index 000000000..073e9aa91 --- /dev/null +++ b/vendor/go.uber.org/zap/Makefile @@ -0,0 +1,76 @@ +export GO15VENDOREXPERIMENT=1 + +BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem +PKGS ?= $(shell glide novendor) +# Many Go tools take file globs or directories as arguments instead of packages. +PKG_FILES ?= *.go zapcore benchmarks buffer zapgrpc zaptest zaptest/observer internal/bufferpool internal/exit internal/color internal/ztest + +# The linting tools evolve with each Go version, so run them only on the latest +# stable release. +GO_VERSION := $(shell go version | cut -d " " -f 3) +GO_MINOR_VERSION := $(word 2,$(subst ., ,$(GO_VERSION))) +LINTABLE_MINOR_VERSIONS := 12 +ifneq ($(filter $(LINTABLE_MINOR_VERSIONS),$(GO_MINOR_VERSION)),) +SHOULD_LINT := true +endif + + +.PHONY: all +all: lint test + +.PHONY: dependencies +dependencies: + @echo "Installing Glide and locked dependencies..." + glide --version || go get -u -f github.com/Masterminds/glide + glide install + @echo "Installing test dependencies..." + go install ./vendor/github.com/axw/gocov/gocov + go install ./vendor/github.com/mattn/goveralls +ifdef SHOULD_LINT + @echo "Installing golint..." + go install ./vendor/github.com/golang/lint/golint +else + @echo "Not installing golint, since we don't expect to lint on" $(GO_VERSION) +endif + +# Disable printf-like invocation checking due to testify.assert.Error() +VET_RULES := -printf=false + +.PHONY: lint +lint: +ifdef SHOULD_LINT + @rm -rf lint.log + @echo "Checking formatting..." + @gofmt -d -s $(PKG_FILES) 2>&1 | tee lint.log + @echo "Installing test dependencies for vet..." + @go test -i $(PKGS) + @echo "Checking vet..." + @go vet $(VET_RULES) $(PKGS) 2>&1 | tee -a lint.log + @echo "Checking lint..." + @$(foreach dir,$(PKGS),golint $(dir) 2>&1 | tee -a lint.log;) + @echo "Checking for unresolved FIXMEs..." + @git grep -i fixme | grep -v -e vendor -e Makefile | tee -a lint.log + @echo "Checking for license headers..." + @./check_license.sh | tee -a lint.log + @[ ! -s lint.log ] +else + @echo "Skipping linters on" $(GO_VERSION) +endif + +.PHONY: test +test: + go test -race $(PKGS) + +.PHONY: cover +cover: + ./scripts/cover.sh $(PKGS) + +.PHONY: bench +BENCH ?= . +bench: + @$(foreach pkg,$(PKGS),go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) $(pkg);) + +.PHONY: updatereadme +updatereadme: + rm -f README.md + cat .readme.tmpl | go run internal/readme/readme.go > README.md diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md new file mode 100644 index 000000000..f4fd1cb44 --- /dev/null +++ b/vendor/go.uber.org/zap/README.md @@ -0,0 +1,136 @@ +# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +Blazing fast, structured, leveled logging in Go. + +## Installation + +`go get -u go.uber.org/zap` + +Note that zap only supports the two most recent minor versions of Go. + +## Quick Start + +In contexts where performance is nice, but not critical, use the +`SugaredLogger`. It's 4-10x faster than other structured logging +packages and includes both structured and `printf`-style APIs. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() // flushes buffer, if any +sugar := logger.Sugar() +sugar.Infow("failed to fetch URL", + // Structured context as loosely typed key-value pairs. + "url", url, + "attempt", 3, + "backoff", time.Second, +) +sugar.Infof("Failed to fetch URL: %s", url) +``` + +When performance and type safety are critical, use the `Logger`. It's even +faster than the `SugaredLogger` and allocates far less, but it only supports +structured logging. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() +logger.Info("failed to fetch URL", + // Structured context as strongly typed Field values. + zap.String("url", url), + zap.Int("attempt", 3), + zap.Duration("backoff", time.Second), +) +``` + +See the [documentation][doc] and [FAQ](FAQ.md) for more details. + +## Performance + +For applications that log in the hot path, reflection-based serialization and +string formatting are prohibitively expensive — they're CPU-intensive +and make many small allocations. Put differently, using `encoding/json` and +`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. + +Zap takes a different approach. It includes a reflection-free, zero-allocation +JSON encoder, and the base `Logger` strives to avoid serialization overhead +and allocations wherever possible. By building the high-level `SugaredLogger` +on that foundation, zap lets users *choose* when they need to count every +allocation and when they'd prefer a more familiar, loosely typed API. + +As measured by its own [benchmarking suite][], not only is zap more performant +than comparable structured logging packages — it's also faster than the +standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) + +Log a message and 10 fields: + +| Package | Time | Objects Allocated | +| :--- | :---: | :---: | +| :zap: zap | 3131 ns/op | 5 allocs/op | +| :zap: zap (sugared) | 4173 ns/op | 21 allocs/op | +| zerolog | 16154 ns/op | 90 allocs/op | +| lion | 16341 ns/op | 111 allocs/op | +| go-kit | 17049 ns/op | 126 allocs/op | +| logrus | 23662 ns/op | 142 allocs/op | +| log15 | 36351 ns/op | 149 allocs/op | +| apex/log | 42530 ns/op | 126 allocs/op | + +Log a message with a logger that already has 10 fields of context: + +| Package | Time | Objects Allocated | +| :--- | :---: | :---: | +| :zap: zap | 380 ns/op | 0 allocs/op | +| :zap: zap (sugared) | 564 ns/op | 2 allocs/op | +| zerolog | 321 ns/op | 0 allocs/op | +| lion | 7092 ns/op | 39 allocs/op | +| go-kit | 20226 ns/op | 115 allocs/op | +| logrus | 22312 ns/op | 130 allocs/op | +| log15 | 28788 ns/op | 79 allocs/op | +| apex/log | 42063 ns/op | 115 allocs/op | + +Log a static string, without any context or `printf`-style templating: + +| Package | Time | Objects Allocated | +| :--- | :---: | :---: | +| :zap: zap | 361 ns/op | 0 allocs/op | +| :zap: zap (sugared) | 534 ns/op | 2 allocs/op | +| zerolog | 323 ns/op | 0 allocs/op | +| standard library | 575 ns/op | 2 allocs/op | +| go-kit | 922 ns/op | 13 allocs/op | +| lion | 1413 ns/op | 10 allocs/op | +| logrus | 2291 ns/op | 27 allocs/op | +| apex/log | 3690 ns/op | 11 allocs/op | +| log15 | 5954 ns/op | 26 allocs/op | + +## Development Status: Stable + +All APIs are finalized, and no breaking changes will be made in the 1.x series +of releases. Users of semver-aware dependency management systems should pin +zap to `^1`. + +## Contributing + +We encourage and support an active, healthy community of contributors — +including you! Details are in the [contribution guide](CONTRIBUTING.md) and +the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on +issues and pull requests, but you can also report any negative conduct to +oss-conduct@uber.com. That email list is a private, safe space; even the zap +maintainers don't have access, so don't hesitate to hold us to a high +standard. + +
+ +Released under the [MIT License](LICENSE.txt). + +1 In particular, keep in mind that we may be +benchmarking against slightly older versions of other packages. Versions are +pinned in zap's [glide.lock][] file. [↩](#anchor-versions) + +[doc-img]: https://godoc.org/go.uber.org/zap?status.svg +[doc]: https://godoc.org/go.uber.org/zap +[ci-img]: https://travis-ci.org/uber-go/zap.svg?branch=master +[ci]: https://travis-ci.org/uber-go/zap +[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/zap +[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks +[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go new file mode 100644 index 000000000..5be3704a3 --- /dev/null +++ b/vendor/go.uber.org/zap/array.go @@ -0,0 +1,320 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "time" + + "go.uber.org/zap/zapcore" +) + +// Array constructs a field with the given key and ArrayMarshaler. It provides +// a flexible, but still type-safe and efficient, way to add array-like types +// to the logging context. The struct's MarshalLogArray method is called lazily. +func Array(key string, val zapcore.ArrayMarshaler) Field { + return Field{Key: key, Type: zapcore.ArrayMarshalerType, Interface: val} +} + +// Bools constructs a field that carries a slice of bools. +func Bools(key string, bs []bool) Field { + return Array(key, bools(bs)) +} + +// ByteStrings constructs a field that carries a slice of []byte, each of which +// must be UTF-8 encoded text. +func ByteStrings(key string, bss [][]byte) Field { + return Array(key, byteStringsArray(bss)) +} + +// Complex128s constructs a field that carries a slice of complex numbers. +func Complex128s(key string, nums []complex128) Field { + return Array(key, complex128s(nums)) +} + +// Complex64s constructs a field that carries a slice of complex numbers. +func Complex64s(key string, nums []complex64) Field { + return Array(key, complex64s(nums)) +} + +// Durations constructs a field that carries a slice of time.Durations. +func Durations(key string, ds []time.Duration) Field { + return Array(key, durations(ds)) +} + +// Float64s constructs a field that carries a slice of floats. +func Float64s(key string, nums []float64) Field { + return Array(key, float64s(nums)) +} + +// Float32s constructs a field that carries a slice of floats. +func Float32s(key string, nums []float32) Field { + return Array(key, float32s(nums)) +} + +// Ints constructs a field that carries a slice of integers. +func Ints(key string, nums []int) Field { + return Array(key, ints(nums)) +} + +// Int64s constructs a field that carries a slice of integers. +func Int64s(key string, nums []int64) Field { + return Array(key, int64s(nums)) +} + +// Int32s constructs a field that carries a slice of integers. +func Int32s(key string, nums []int32) Field { + return Array(key, int32s(nums)) +} + +// Int16s constructs a field that carries a slice of integers. +func Int16s(key string, nums []int16) Field { + return Array(key, int16s(nums)) +} + +// Int8s constructs a field that carries a slice of integers. +func Int8s(key string, nums []int8) Field { + return Array(key, int8s(nums)) +} + +// Strings constructs a field that carries a slice of strings. +func Strings(key string, ss []string) Field { + return Array(key, stringArray(ss)) +} + +// Times constructs a field that carries a slice of time.Times. +func Times(key string, ts []time.Time) Field { + return Array(key, times(ts)) +} + +// Uints constructs a field that carries a slice of unsigned integers. +func Uints(key string, nums []uint) Field { + return Array(key, uints(nums)) +} + +// Uint64s constructs a field that carries a slice of unsigned integers. +func Uint64s(key string, nums []uint64) Field { + return Array(key, uint64s(nums)) +} + +// Uint32s constructs a field that carries a slice of unsigned integers. +func Uint32s(key string, nums []uint32) Field { + return Array(key, uint32s(nums)) +} + +// Uint16s constructs a field that carries a slice of unsigned integers. +func Uint16s(key string, nums []uint16) Field { + return Array(key, uint16s(nums)) +} + +// Uint8s constructs a field that carries a slice of unsigned integers. +func Uint8s(key string, nums []uint8) Field { + return Array(key, uint8s(nums)) +} + +// Uintptrs constructs a field that carries a slice of pointer addresses. +func Uintptrs(key string, us []uintptr) Field { + return Array(key, uintptrs(us)) +} + +// Errors constructs a field that carries a slice of errors. +func Errors(key string, errs []error) Field { + return Array(key, errArray(errs)) +} + +type bools []bool + +func (bs bools) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range bs { + arr.AppendBool(bs[i]) + } + return nil +} + +type byteStringsArray [][]byte + +func (bss byteStringsArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range bss { + arr.AppendByteString(bss[i]) + } + return nil +} + +type complex128s []complex128 + +func (nums complex128s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendComplex128(nums[i]) + } + return nil +} + +type complex64s []complex64 + +func (nums complex64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendComplex64(nums[i]) + } + return nil +} + +type durations []time.Duration + +func (ds durations) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ds { + arr.AppendDuration(ds[i]) + } + return nil +} + +type float64s []float64 + +func (nums float64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendFloat64(nums[i]) + } + return nil +} + +type float32s []float32 + +func (nums float32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendFloat32(nums[i]) + } + return nil +} + +type ints []int + +func (nums ints) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt(nums[i]) + } + return nil +} + +type int64s []int64 + +func (nums int64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt64(nums[i]) + } + return nil +} + +type int32s []int32 + +func (nums int32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt32(nums[i]) + } + return nil +} + +type int16s []int16 + +func (nums int16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt16(nums[i]) + } + return nil +} + +type int8s []int8 + +func (nums int8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt8(nums[i]) + } + return nil +} + +type stringArray []string + +func (ss stringArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ss { + arr.AppendString(ss[i]) + } + return nil +} + +type times []time.Time + +func (ts times) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ts { + arr.AppendTime(ts[i]) + } + return nil +} + +type uints []uint + +func (nums uints) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint(nums[i]) + } + return nil +} + +type uint64s []uint64 + +func (nums uint64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint64(nums[i]) + } + return nil +} + +type uint32s []uint32 + +func (nums uint32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint32(nums[i]) + } + return nil +} + +type uint16s []uint16 + +func (nums uint16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint16(nums[i]) + } + return nil +} + +type uint8s []uint8 + +func (nums uint8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint8(nums[i]) + } + return nil +} + +type uintptrs []uintptr + +func (nums uintptrs) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUintptr(nums[i]) + } + return nil +} diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go new file mode 100644 index 000000000..7592e8c63 --- /dev/null +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -0,0 +1,115 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package buffer provides a thin wrapper around a byte slice. Unlike the +// standard library's bytes.Buffer, it supports a portion of the strconv +// package's zero-allocation formatters. +package buffer // import "go.uber.org/zap/buffer" + +import "strconv" + +const _size = 1024 // by default, create 1 KiB buffers + +// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so +// the only way to construct one is via a Pool. +type Buffer struct { + bs []byte + pool Pool +} + +// AppendByte writes a single byte to the Buffer. +func (b *Buffer) AppendByte(v byte) { + b.bs = append(b.bs, v) +} + +// AppendString writes a string to the Buffer. +func (b *Buffer) AppendString(s string) { + b.bs = append(b.bs, s...) +} + +// AppendInt appends an integer to the underlying buffer (assuming base 10). +func (b *Buffer) AppendInt(i int64) { + b.bs = strconv.AppendInt(b.bs, i, 10) +} + +// AppendUint appends an unsigned integer to the underlying buffer (assuming +// base 10). +func (b *Buffer) AppendUint(i uint64) { + b.bs = strconv.AppendUint(b.bs, i, 10) +} + +// AppendBool appends a bool to the underlying buffer. +func (b *Buffer) AppendBool(v bool) { + b.bs = strconv.AppendBool(b.bs, v) +} + +// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN +// or +/- Inf. +func (b *Buffer) AppendFloat(f float64, bitSize int) { + b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize) +} + +// Len returns the length of the underlying byte slice. +func (b *Buffer) Len() int { + return len(b.bs) +} + +// Cap returns the capacity of the underlying byte slice. +func (b *Buffer) Cap() int { + return cap(b.bs) +} + +// Bytes returns a mutable reference to the underlying byte slice. +func (b *Buffer) Bytes() []byte { + return b.bs +} + +// String returns a string copy of the underlying byte slice. +func (b *Buffer) String() string { + return string(b.bs) +} + +// Reset resets the underlying byte slice. Subsequent writes re-use the slice's +// backing array. +func (b *Buffer) Reset() { + b.bs = b.bs[:0] +} + +// Write implements io.Writer. +func (b *Buffer) Write(bs []byte) (int, error) { + b.bs = append(b.bs, bs...) + return len(bs), nil +} + +// TrimNewline trims any final "\n" byte from the end of the buffer. +func (b *Buffer) TrimNewline() { + if i := len(b.bs) - 1; i >= 0 { + if b.bs[i] == '\n' { + b.bs = b.bs[:i] + } + } +} + +// Free returns the Buffer to its Pool. +// +// Callers must not retain references to the Buffer after calling Free. +func (b *Buffer) Free() { + b.pool.put(b) +} diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go new file mode 100644 index 000000000..8fb3e202c --- /dev/null +++ b/vendor/go.uber.org/zap/buffer/pool.go @@ -0,0 +1,49 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package buffer + +import "sync" + +// A Pool is a type-safe wrapper around a sync.Pool. +type Pool struct { + p *sync.Pool +} + +// NewPool constructs a new Pool. +func NewPool() Pool { + return Pool{p: &sync.Pool{ + New: func() interface{} { + return &Buffer{bs: make([]byte, 0, _size)} + }, + }} +} + +// Get retrieves a Buffer from the pool, creating one if necessary. +func (p Pool) Get() *Buffer { + buf := p.p.Get().(*Buffer) + buf.Reset() + buf.pool = p + return buf +} + +func (p Pool) put(buf *Buffer) { + p.p.Put(buf) +} diff --git a/vendor/go.uber.org/zap/check_license.sh b/vendor/go.uber.org/zap/check_license.sh new file mode 100644 index 000000000..345ac8b89 --- /dev/null +++ b/vendor/go.uber.org/zap/check_license.sh @@ -0,0 +1,17 @@ +#!/bin/bash -e + +ERROR_COUNT=0 +while read -r file +do + case "$(head -1 "${file}")" in + *"Copyright (c) "*" Uber Technologies, Inc.") + # everything's cool + ;; + *) + echo "$file is missing license header." + (( ERROR_COUNT++ )) + ;; + esac +done < <(git ls-files "*\.go") + +exit $ERROR_COUNT diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go new file mode 100644 index 000000000..6fe17d9e0 --- /dev/null +++ b/vendor/go.uber.org/zap/config.go @@ -0,0 +1,243 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "sort" + "time" + + "go.uber.org/zap/zapcore" +) + +// SamplingConfig sets a sampling strategy for the logger. Sampling caps the +// global CPU and I/O load that logging puts on your process while attempting +// to preserve a representative subset of your logs. +// +// Values configured here are per-second. See zapcore.NewSampler for details. +type SamplingConfig struct { + Initial int `json:"initial" yaml:"initial"` + Thereafter int `json:"thereafter" yaml:"thereafter"` +} + +// Config offers a declarative way to construct a logger. It doesn't do +// anything that can't be done with New, Options, and the various +// zapcore.WriteSyncer and zapcore.Core wrappers, but it's a simpler way to +// toggle common options. +// +// Note that Config intentionally supports only the most common options. More +// unusual logging setups (logging to network connections or message queues, +// splitting output between multiple files, etc.) are possible, but require +// direct use of the zapcore package. For sample code, see the package-level +// BasicConfiguration and AdvancedConfiguration examples. +// +// For an example showing runtime log level changes, see the documentation for +// AtomicLevel. +type Config struct { + // Level is the minimum enabled logging level. Note that this is a dynamic + // level, so calling Config.Level.SetLevel will atomically change the log + // level of all loggers descended from this config. + Level AtomicLevel `json:"level" yaml:"level"` + // Development puts the logger in development mode, which changes the + // behavior of DPanicLevel and takes stacktraces more liberally. + Development bool `json:"development" yaml:"development"` + // DisableCaller stops annotating logs with the calling function's file + // name and line number. By default, all logs are annotated. + DisableCaller bool `json:"disableCaller" yaml:"disableCaller"` + // DisableStacktrace completely disables automatic stacktrace capturing. By + // default, stacktraces are captured for WarnLevel and above logs in + // development and ErrorLevel and above in production. + DisableStacktrace bool `json:"disableStacktrace" yaml:"disableStacktrace"` + // Sampling sets a sampling policy. A nil SamplingConfig disables sampling. + Sampling *SamplingConfig `json:"sampling" yaml:"sampling"` + // Encoding sets the logger's encoding. Valid values are "json" and + // "console", as well as any third-party encodings registered via + // RegisterEncoder. + Encoding string `json:"encoding" yaml:"encoding"` + // EncoderConfig sets options for the chosen encoder. See + // zapcore.EncoderConfig for details. + EncoderConfig zapcore.EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"` + // OutputPaths is a list of URLs or file paths to write logging output to. + // See Open for details. + OutputPaths []string `json:"outputPaths" yaml:"outputPaths"` + // ErrorOutputPaths is a list of URLs to write internal logger errors to. + // The default is standard error. + // + // Note that this setting only affects internal errors; for sample code that + // sends error-level logs to a different location from info- and debug-level + // logs, see the package-level AdvancedConfiguration example. + ErrorOutputPaths []string `json:"errorOutputPaths" yaml:"errorOutputPaths"` + // InitialFields is a collection of fields to add to the root logger. + InitialFields map[string]interface{} `json:"initialFields" yaml:"initialFields"` +} + +// NewProductionEncoderConfig returns an opinionated EncoderConfig for +// production environments. +func NewProductionEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + TimeKey: "ts", + LevelKey: "level", + NameKey: "logger", + CallerKey: "caller", + MessageKey: "msg", + StacktraceKey: "stacktrace", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.EpochTimeEncoder, + EncodeDuration: zapcore.SecondsDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +// NewProductionConfig is a reasonable production logging configuration. +// Logging is enabled at InfoLevel and above. +// +// It uses a JSON encoder, writes to standard error, and enables sampling. +// Stacktraces are automatically included on logs of ErrorLevel and above. +func NewProductionConfig() Config { + return Config{ + Level: NewAtomicLevelAt(InfoLevel), + Development: false, + Sampling: &SamplingConfig{ + Initial: 100, + Thereafter: 100, + }, + Encoding: "json", + EncoderConfig: NewProductionEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } +} + +// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for +// development environments. +func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + // Keys can be anything except the empty string. + TimeKey: "T", + LevelKey: "L", + NameKey: "N", + CallerKey: "C", + MessageKey: "M", + StacktraceKey: "S", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.CapitalLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +// NewDevelopmentConfig is a reasonable development logging configuration. +// Logging is enabled at DebugLevel and above. +// +// It enables development mode (which makes DPanicLevel logs panic), uses a +// console encoder, writes to standard error, and disables sampling. +// Stacktraces are automatically included on logs of WarnLevel and above. +func NewDevelopmentConfig() Config { + return Config{ + Level: NewAtomicLevelAt(DebugLevel), + Development: true, + Encoding: "console", + EncoderConfig: NewDevelopmentEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } +} + +// Build constructs a logger from the Config and Options. +func (cfg Config) Build(opts ...Option) (*Logger, error) { + enc, err := cfg.buildEncoder() + if err != nil { + return nil, err + } + + sink, errSink, err := cfg.openSinks() + if err != nil { + return nil, err + } + + log := New( + zapcore.NewCore(enc, sink, cfg.Level), + cfg.buildOptions(errSink)..., + ) + if len(opts) > 0 { + log = log.WithOptions(opts...) + } + return log, nil +} + +func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option { + opts := []Option{ErrorOutput(errSink)} + + if cfg.Development { + opts = append(opts, Development()) + } + + if !cfg.DisableCaller { + opts = append(opts, AddCaller()) + } + + stackLevel := ErrorLevel + if cfg.Development { + stackLevel = WarnLevel + } + if !cfg.DisableStacktrace { + opts = append(opts, AddStacktrace(stackLevel)) + } + + if cfg.Sampling != nil { + opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core { + return zapcore.NewSampler(core, time.Second, int(cfg.Sampling.Initial), int(cfg.Sampling.Thereafter)) + })) + } + + if len(cfg.InitialFields) > 0 { + fs := make([]Field, 0, len(cfg.InitialFields)) + keys := make([]string, 0, len(cfg.InitialFields)) + for k := range cfg.InitialFields { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + fs = append(fs, Any(k, cfg.InitialFields[k])) + } + opts = append(opts, Fields(fs...)) + } + + return opts +} + +func (cfg Config) openSinks() (zapcore.WriteSyncer, zapcore.WriteSyncer, error) { + sink, closeOut, err := Open(cfg.OutputPaths...) + if err != nil { + return nil, nil, err + } + errSink, _, err := Open(cfg.ErrorOutputPaths...) + if err != nil { + closeOut() + return nil, nil, err + } + return sink, errSink, nil +} + +func (cfg Config) buildEncoder() (zapcore.Encoder, error) { + return newEncoder(cfg.Encoding, cfg.EncoderConfig) +} diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go new file mode 100644 index 000000000..8638dd1b9 --- /dev/null +++ b/vendor/go.uber.org/zap/doc.go @@ -0,0 +1,113 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package zap provides fast, structured, leveled logging. +// +// For applications that log in the hot path, reflection-based serialization +// and string formatting are prohibitively expensive - they're CPU-intensive +// and make many small allocations. Put differently, using json.Marshal and +// fmt.Fprintf to log tons of interface{} makes your application slow. +// +// Zap takes a different approach. It includes a reflection-free, +// zero-allocation JSON encoder, and the base Logger strives to avoid +// serialization overhead and allocations wherever possible. By building the +// high-level SugaredLogger on that foundation, zap lets users choose when +// they need to count every allocation and when they'd prefer a more familiar, +// loosely typed API. +// +// Choosing a Logger +// +// In contexts where performance is nice, but not critical, use the +// SugaredLogger. It's 4-10x faster than other structured logging packages and +// supports both structured and printf-style logging. Like log15 and go-kit, +// the SugaredLogger's structured logging APIs are loosely typed and accept a +// variadic number of key-value pairs. (For more advanced use cases, they also +// accept strongly typed fields - see the SugaredLogger.With documentation for +// details.) +// sugar := zap.NewExample().Sugar() +// defer sugar.Sync() +// sugar.Infow("failed to fetch URL", +// "url", "http://example.com", +// "attempt", 3, +// "backoff", time.Second, +// ) +// sugar.Infof("failed to fetch URL: %s", "http://example.com") +// +// By default, loggers are unbuffered. However, since zap's low-level APIs +// allow buffering, calling Sync before letting your process exit is a good +// habit. +// +// In the rare contexts where every microsecond and every allocation matter, +// use the Logger. It's even faster than the SugaredLogger and allocates far +// less, but it only supports strongly-typed, structured logging. +// logger := zap.NewExample() +// defer logger.Sync() +// logger.Info("failed to fetch URL", +// zap.String("url", "http://example.com"), +// zap.Int("attempt", 3), +// zap.Duration("backoff", time.Second), +// ) +// +// Choosing between the Logger and SugaredLogger doesn't need to be an +// application-wide decision: converting between the two is simple and +// inexpensive. +// logger := zap.NewExample() +// defer logger.Sync() +// sugar := logger.Sugar() +// plain := sugar.Desugar() +// +// Configuring Zap +// +// The simplest way to build a Logger is to use zap's opinionated presets: +// NewExample, NewProduction, and NewDevelopment. These presets build a logger +// with a single function call: +// logger, err := zap.NewProduction() +// if err != nil { +// log.Fatalf("can't initialize zap logger: %v", err) +// } +// defer logger.Sync() +// +// Presets are fine for small projects, but larger projects and organizations +// naturally require a bit more customization. For most users, zap's Config +// struct strikes the right balance between flexibility and convenience. See +// the package-level BasicConfiguration example for sample code. +// +// More unusual configurations (splitting output between files, sending logs +// to a message queue, etc.) are possible, but require direct use of +// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration +// example for sample code. +// +// Extending Zap +// +// The zap package itself is a relatively thin wrapper around the interfaces +// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g., +// BSON), a new log sink (e.g., Kafka), or something more exotic (perhaps an +// exception aggregation service, like Sentry or Rollbar) typically requires +// implementing the zapcore.Encoder, zapcore.WriteSyncer, or zapcore.Core +// interfaces. See the zapcore documentation for details. +// +// Similarly, package authors can use the high-performance Encoder and Core +// implementations in the zapcore package to build their own loggers. +// +// Frequently Asked Questions +// +// An FAQ covering everything from installation errors to design decisions is +// available at https://github.com/uber-go/zap/blob/master/FAQ.md. +package zap // import "go.uber.org/zap" diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go new file mode 100644 index 000000000..2e9d3c341 --- /dev/null +++ b/vendor/go.uber.org/zap/encoder.go @@ -0,0 +1,75 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "fmt" + "sync" + + "go.uber.org/zap/zapcore" +) + +var ( + errNoEncoderNameSpecified = errors.New("no encoder name specified") + + _encoderNameToConstructor = map[string]func(zapcore.EncoderConfig) (zapcore.Encoder, error){ + "console": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + return zapcore.NewConsoleEncoder(encoderConfig), nil + }, + "json": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + return zapcore.NewJSONEncoder(encoderConfig), nil + }, + } + _encoderMutex sync.RWMutex +) + +// RegisterEncoder registers an encoder constructor, which the Config struct +// can then reference. By default, the "json" and "console" encoders are +// registered. +// +// Attempting to register an encoder whose name is already taken returns an +// error. +func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapcore.Encoder, error)) error { + _encoderMutex.Lock() + defer _encoderMutex.Unlock() + if name == "" { + return errNoEncoderNameSpecified + } + if _, ok := _encoderNameToConstructor[name]; ok { + return fmt.Errorf("encoder already registered for name %q", name) + } + _encoderNameToConstructor[name] = constructor + return nil +} + +func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + _encoderMutex.RLock() + defer _encoderMutex.RUnlock() + if name == "" { + return nil, errNoEncoderNameSpecified + } + constructor, ok := _encoderNameToConstructor[name] + if !ok { + return nil, fmt.Errorf("no encoder registered for name %q", name) + } + return constructor(encoderConfig) +} diff --git a/vendor/go.uber.org/zap/error.go b/vendor/go.uber.org/zap/error.go new file mode 100644 index 000000000..65982a51e --- /dev/null +++ b/vendor/go.uber.org/zap/error.go @@ -0,0 +1,80 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "sync" + + "go.uber.org/zap/zapcore" +) + +var _errArrayElemPool = sync.Pool{New: func() interface{} { + return &errArrayElem{} +}} + +// Error is shorthand for the common idiom NamedError("error", err). +func Error(err error) Field { + return NamedError("error", err) +} + +// NamedError constructs a field that lazily stores err.Error() under the +// provided key. Errors which also implement fmt.Formatter (like those produced +// by github.com/pkg/errors) will also have their verbose representation stored +// under key+"Verbose". If passed a nil error, the field is a no-op. +// +// For the common case in which the key is simply "error", the Error function +// is shorter and less repetitive. +func NamedError(key string, err error) Field { + if err == nil { + return Skip() + } + return Field{Key: key, Type: zapcore.ErrorType, Interface: err} +} + +type errArray []error + +func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range errs { + if errs[i] == nil { + continue + } + // To represent each error as an object with an "error" attribute and + // potentially an "errorVerbose" attribute, we need to wrap it in a + // type that implements LogObjectMarshaler. To prevent this from + // allocating, pool the wrapper type. + elem := _errArrayElemPool.Get().(*errArrayElem) + elem.error = errs[i] + arr.AppendObject(elem) + elem.error = nil + _errArrayElemPool.Put(elem) + } + return nil +} + +type errArrayElem struct { + error +} + +func (e *errArrayElem) MarshalLogObject(enc zapcore.ObjectEncoder) error { + // Re-use the error field's logic, which supports non-standard error types. + Error(e.error).AddTo(enc) + return nil +} diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go new file mode 100644 index 000000000..5130e1347 --- /dev/null +++ b/vendor/go.uber.org/zap/field.go @@ -0,0 +1,310 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "math" + "time" + + "go.uber.org/zap/zapcore" +) + +// Field is an alias for Field. Aliasing this type dramatically +// improves the navigability of this package's API documentation. +type Field = zapcore.Field + +// Skip constructs a no-op field, which is often useful when handling invalid +// inputs in other Field constructors. +func Skip() Field { + return Field{Type: zapcore.SkipType} +} + +// Binary constructs a field that carries an opaque binary blob. +// +// Binary data is serialized in an encoding-appropriate format. For example, +// zap's JSON encoder base64-encodes binary blobs. To log UTF-8 encoded text, +// use ByteString. +func Binary(key string, val []byte) Field { + return Field{Key: key, Type: zapcore.BinaryType, Interface: val} +} + +// Bool constructs a field that carries a bool. +func Bool(key string, val bool) Field { + var ival int64 + if val { + ival = 1 + } + return Field{Key: key, Type: zapcore.BoolType, Integer: ival} +} + +// ByteString constructs a field that carries UTF-8 encoded text as a []byte. +// To log opaque binary blobs (which aren't necessarily valid UTF-8), use +// Binary. +func ByteString(key string, val []byte) Field { + return Field{Key: key, Type: zapcore.ByteStringType, Interface: val} +} + +// Complex128 constructs a field that carries a complex number. Unlike most +// numeric fields, this costs an allocation (to convert the complex128 to +// interface{}). +func Complex128(key string, val complex128) Field { + return Field{Key: key, Type: zapcore.Complex128Type, Interface: val} +} + +// Complex64 constructs a field that carries a complex number. Unlike most +// numeric fields, this costs an allocation (to convert the complex64 to +// interface{}). +func Complex64(key string, val complex64) Field { + return Field{Key: key, Type: zapcore.Complex64Type, Interface: val} +} + +// Float64 constructs a field that carries a float64. The way the +// floating-point value is represented is encoder-dependent, so marshaling is +// necessarily lazy. +func Float64(key string, val float64) Field { + return Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))} +} + +// Float32 constructs a field that carries a float32. The way the +// floating-point value is represented is encoder-dependent, so marshaling is +// necessarily lazy. +func Float32(key string, val float32) Field { + return Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))} +} + +// Int constructs a field with the given key and value. +func Int(key string, val int) Field { + return Int64(key, int64(val)) +} + +// Int64 constructs a field with the given key and value. +func Int64(key string, val int64) Field { + return Field{Key: key, Type: zapcore.Int64Type, Integer: val} +} + +// Int32 constructs a field with the given key and value. +func Int32(key string, val int32) Field { + return Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)} +} + +// Int16 constructs a field with the given key and value. +func Int16(key string, val int16) Field { + return Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)} +} + +// Int8 constructs a field with the given key and value. +func Int8(key string, val int8) Field { + return Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)} +} + +// String constructs a field with the given key and value. +func String(key string, val string) Field { + return Field{Key: key, Type: zapcore.StringType, String: val} +} + +// Uint constructs a field with the given key and value. +func Uint(key string, val uint) Field { + return Uint64(key, uint64(val)) +} + +// Uint64 constructs a field with the given key and value. +func Uint64(key string, val uint64) Field { + return Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)} +} + +// Uint32 constructs a field with the given key and value. +func Uint32(key string, val uint32) Field { + return Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)} +} + +// Uint16 constructs a field with the given key and value. +func Uint16(key string, val uint16) Field { + return Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)} +} + +// Uint8 constructs a field with the given key and value. +func Uint8(key string, val uint8) Field { + return Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)} +} + +// Uintptr constructs a field with the given key and value. +func Uintptr(key string, val uintptr) Field { + return Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)} +} + +// Reflect constructs a field with the given key and an arbitrary object. It uses +// an encoding-appropriate, reflection-based function to lazily serialize nearly +// any object into the logging context, but it's relatively slow and +// allocation-heavy. Outside tests, Any is always a better choice. +// +// If encoding fails (e.g., trying to serialize a map[int]string to JSON), Reflect +// includes the error message in the final log output. +func Reflect(key string, val interface{}) Field { + return Field{Key: key, Type: zapcore.ReflectType, Interface: val} +} + +// Namespace creates a named, isolated scope within the logger's context. All +// subsequent fields will be added to the new namespace. +// +// This helps prevent key collisions when injecting loggers into sub-components +// or third-party libraries. +func Namespace(key string) Field { + return Field{Key: key, Type: zapcore.NamespaceType} +} + +// Stringer constructs a field with the given key and the output of the value's +// String method. The Stringer's String method is called lazily. +func Stringer(key string, val fmt.Stringer) Field { + return Field{Key: key, Type: zapcore.StringerType, Interface: val} +} + +// Time constructs a Field with the given key and value. The encoder +// controls how the time is serialized. +func Time(key string, val time.Time) Field { + return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()} +} + +// Stack constructs a field that stores a stacktrace of the current goroutine +// under provided key. Keep in mind that taking a stacktrace is eager and +// expensive (relatively speaking); this function both makes an allocation and +// takes about two microseconds. +func Stack(key string) Field { + // Returning the stacktrace as a string costs an allocation, but saves us + // from expanding the zapcore.Field union struct to include a byte slice. Since + // taking a stacktrace is already so expensive (~10us), the extra allocation + // is okay. + return String(key, takeStacktrace()) +} + +// Duration constructs a field with the given key and value. The encoder +// controls how the duration is serialized. +func Duration(key string, val time.Duration) Field { + return Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)} +} + +// Object constructs a field with the given key and ObjectMarshaler. It +// provides a flexible, but still type-safe and efficient, way to add map- or +// struct-like user-defined types to the logging context. The struct's +// MarshalLogObject method is called lazily. +func Object(key string, val zapcore.ObjectMarshaler) Field { + return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val} +} + +// Any takes a key and an arbitrary value and chooses the best way to represent +// them as a field, falling back to a reflection-based approach only if +// necessary. +// +// Since byte/uint8 and rune/int32 are aliases, Any can't differentiate between +// them. To minimize surprises, []byte values are treated as binary blobs, byte +// values are treated as uint8, and runes are always treated as integers. +func Any(key string, value interface{}) Field { + switch val := value.(type) { + case zapcore.ObjectMarshaler: + return Object(key, val) + case zapcore.ArrayMarshaler: + return Array(key, val) + case bool: + return Bool(key, val) + case []bool: + return Bools(key, val) + case complex128: + return Complex128(key, val) + case []complex128: + return Complex128s(key, val) + case complex64: + return Complex64(key, val) + case []complex64: + return Complex64s(key, val) + case float64: + return Float64(key, val) + case []float64: + return Float64s(key, val) + case float32: + return Float32(key, val) + case []float32: + return Float32s(key, val) + case int: + return Int(key, val) + case []int: + return Ints(key, val) + case int64: + return Int64(key, val) + case []int64: + return Int64s(key, val) + case int32: + return Int32(key, val) + case []int32: + return Int32s(key, val) + case int16: + return Int16(key, val) + case []int16: + return Int16s(key, val) + case int8: + return Int8(key, val) + case []int8: + return Int8s(key, val) + case string: + return String(key, val) + case []string: + return Strings(key, val) + case uint: + return Uint(key, val) + case []uint: + return Uints(key, val) + case uint64: + return Uint64(key, val) + case []uint64: + return Uint64s(key, val) + case uint32: + return Uint32(key, val) + case []uint32: + return Uint32s(key, val) + case uint16: + return Uint16(key, val) + case []uint16: + return Uint16s(key, val) + case uint8: + return Uint8(key, val) + case []byte: + return Binary(key, val) + case uintptr: + return Uintptr(key, val) + case []uintptr: + return Uintptrs(key, val) + case time.Time: + return Time(key, val) + case []time.Time: + return Times(key, val) + case time.Duration: + return Duration(key, val) + case []time.Duration: + return Durations(key, val) + case error: + return NamedError(key, val) + case []error: + return Errors(key, val) + case fmt.Stringer: + return Stringer(key, val) + default: + return Reflect(key, val) + } +} diff --git a/vendor/go.uber.org/zap/flag.go b/vendor/go.uber.org/zap/flag.go new file mode 100644 index 000000000..131287507 --- /dev/null +++ b/vendor/go.uber.org/zap/flag.go @@ -0,0 +1,39 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "flag" + + "go.uber.org/zap/zapcore" +) + +// LevelFlag uses the standard library's flag.Var to declare a global flag +// with the specified name, default, and usage guidance. The returned value is +// a pointer to the value of the flag. +// +// If you don't want to use the flag package's global state, you can use any +// non-nil *Level as a flag.Value with your own *flag.FlagSet. +func LevelFlag(name string, defaultLevel zapcore.Level, usage string) *zapcore.Level { + lvl := defaultLevel + flag.Var(&lvl, name, usage) + return &lvl +} diff --git a/vendor/go.uber.org/zap/glide.lock b/vendor/go.uber.org/zap/glide.lock new file mode 100644 index 000000000..881b462c0 --- /dev/null +++ b/vendor/go.uber.org/zap/glide.lock @@ -0,0 +1,76 @@ +hash: f073ba522c06c88ea3075bde32a8aaf0969a840a66cab6318a0897d141ffee92 +updated: 2017-07-22T18:06:49.598185334-07:00 +imports: +- name: go.uber.org/atomic + version: 4e336646b2ef9fc6e47be8e21594178f98e5ebcf +- name: go.uber.org/multierr + version: 3c4937480c32f4c13a875a1829af76c98ca3d40a +testImports: +- name: github.com/apex/log + version: d9b960447bfa720077b2da653cc79e533455b499 + subpackages: + - handlers/json +- name: github.com/axw/gocov + version: 3a69a0d2a4ef1f263e2d92b041a69593d6964fe8 + subpackages: + - gocov +- name: github.com/davecgh/go-spew + version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 + subpackages: + - spew +- name: github.com/fatih/color + version: 62e9147c64a1ed519147b62a56a14e83e2be02c1 +- name: github.com/go-kit/kit + version: e10f5bf035be9af21fd5b2fb4469d5716c6ab07d + subpackages: + - log +- name: github.com/go-logfmt/logfmt + version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 +- name: github.com/go-stack/stack + version: 54be5f394ed2c3e19dac9134a40a95ba5a017f7b +- name: github.com/golang/lint + version: c5fb716d6688a859aae56d26d3e6070808df29f7 + subpackages: + - golint +- name: github.com/kr/logfmt + version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 +- name: github.com/mattn/go-colorable + version: 3fa8c76f9daed4067e4a806fb7e4dc86455c6d6a +- name: github.com/mattn/go-isatty + version: fc9e8d8ef48496124e79ae0df75490096eccf6fe +- name: github.com/mattn/goveralls + version: 6efce81852ad1b7567c17ad71b03aeccc9dd9ae0 +- name: github.com/pborman/uuid + version: e790cca94e6cc75c7064b1332e63811d4aae1a53 +- name: github.com/pkg/errors + version: 645ef00459ed84a119197bfb8d8205042c6df63d +- name: github.com/pmezard/go-difflib + version: d8ed2627bdf02c080bf22230dbb337003b7aba2d + subpackages: + - difflib +- name: github.com/rs/zerolog + version: eed4c2b94d945e0b2456ad6aa518a443986b5f22 +- name: github.com/satori/go.uuid + version: 5bf94b69c6b68ee1b541973bb8e1144db23a194b +- name: github.com/sirupsen/logrus + version: 7dd06bf38e1e13df288d471a57d5adbac106be9e +- name: github.com/stretchr/testify + version: f6abca593680b2315d2075e0f5e2a9751e3f431a + subpackages: + - assert + - require +- name: go.pedge.io/lion + version: 87958e8713f1fa138d993087133b97e976642159 +- name: golang.org/x/sys + version: c4489faa6e5ab84c0ef40d6ee878f7a030281f0f + subpackages: + - unix +- name: golang.org/x/tools + version: 496819729719f9d07692195e0a94d6edd2251389 + subpackages: + - cover +- name: gopkg.in/inconshreveable/log15.v2 + version: b105bd37f74e5d9dc7b6ad7806715c7a2b83fd3f + subpackages: + - stack + - term diff --git a/vendor/go.uber.org/zap/glide.yaml b/vendor/go.uber.org/zap/glide.yaml new file mode 100644 index 000000000..94412594c --- /dev/null +++ b/vendor/go.uber.org/zap/glide.yaml @@ -0,0 +1,35 @@ +package: go.uber.org/zap +license: MIT +import: +- package: go.uber.org/atomic + version: ^1 +- package: go.uber.org/multierr + version: ^1 +testImport: +- package: github.com/satori/go.uuid +- package: github.com/sirupsen/logrus +- package: github.com/apex/log + subpackages: + - handlers/json +- package: github.com/go-kit/kit + subpackages: + - log +- package: github.com/stretchr/testify + subpackages: + - assert + - require +- package: gopkg.in/inconshreveable/log15.v2 +- package: github.com/mattn/goveralls +- package: github.com/pborman/uuid +- package: github.com/pkg/errors +- package: go.pedge.io/lion +- package: github.com/rs/zerolog +- package: golang.org/x/tools + subpackages: + - cover +- package: github.com/golang/lint + subpackages: + - golint +- package: github.com/axw/gocov + subpackages: + - gocov diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go new file mode 100644 index 000000000..c1ac0507c --- /dev/null +++ b/vendor/go.uber.org/zap/global.go @@ -0,0 +1,168 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "bytes" + "fmt" + "log" + "os" + "sync" + + "go.uber.org/zap/zapcore" +) + +const ( + _loggerWriterDepth = 2 + _programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " + + "https://github.com/uber-go/zap/issues/new and reference this error: %v" +) + +var ( + _globalMu sync.RWMutex + _globalL = NewNop() + _globalS = _globalL.Sugar() +) + +// L returns the global Logger, which can be reconfigured with ReplaceGlobals. +// It's safe for concurrent use. +func L() *Logger { + _globalMu.RLock() + l := _globalL + _globalMu.RUnlock() + return l +} + +// S returns the global SugaredLogger, which can be reconfigured with +// ReplaceGlobals. It's safe for concurrent use. +func S() *SugaredLogger { + _globalMu.RLock() + s := _globalS + _globalMu.RUnlock() + return s +} + +// ReplaceGlobals replaces the global Logger and SugaredLogger, and returns a +// function to restore the original values. It's safe for concurrent use. +func ReplaceGlobals(logger *Logger) func() { + _globalMu.Lock() + prev := _globalL + _globalL = logger + _globalS = logger.Sugar() + _globalMu.Unlock() + return func() { ReplaceGlobals(prev) } +} + +// NewStdLog returns a *log.Logger which writes to the supplied zap Logger at +// InfoLevel. To redirect the standard library's package-global logging +// functions, use RedirectStdLog instead. +func NewStdLog(l *Logger) *log.Logger { + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + f := logger.Info + return log.New(&loggerWriter{f}, "" /* prefix */, 0 /* flags */) +} + +// NewStdLogAt returns *log.Logger which writes to supplied zap logger at +// required level. +func NewStdLogAt(l *Logger, level zapcore.Level) (*log.Logger, error) { + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + logFunc, err := levelToFunc(logger, level) + if err != nil { + return nil, err + } + return log.New(&loggerWriter{logFunc}, "" /* prefix */, 0 /* flags */), nil +} + +// RedirectStdLog redirects output from the standard library's package-global +// logger to the supplied logger at InfoLevel. Since zap already handles caller +// annotations, timestamps, etc., it automatically disables the standard +// library's annotations and prefixing. +// +// It returns a function to restore the original prefix and flags and reset the +// standard library's output to os.Stderr. +func RedirectStdLog(l *Logger) func() { + f, err := redirectStdLogAt(l, InfoLevel) + if err != nil { + // Can't get here, since passing InfoLevel to redirectStdLogAt always + // works. + panic(fmt.Sprintf(_programmerErrorTemplate, err)) + } + return f +} + +// RedirectStdLogAt redirects output from the standard library's package-global +// logger to the supplied logger at the specified level. Since zap already +// handles caller annotations, timestamps, etc., it automatically disables the +// standard library's annotations and prefixing. +// +// It returns a function to restore the original prefix and flags and reset the +// standard library's output to os.Stderr. +func RedirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { + return redirectStdLogAt(l, level) +} + +func redirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { + flags := log.Flags() + prefix := log.Prefix() + log.SetFlags(0) + log.SetPrefix("") + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + logFunc, err := levelToFunc(logger, level) + if err != nil { + return nil, err + } + log.SetOutput(&loggerWriter{logFunc}) + return func() { + log.SetFlags(flags) + log.SetPrefix(prefix) + log.SetOutput(os.Stderr) + }, nil +} + +func levelToFunc(logger *Logger, lvl zapcore.Level) (func(string, ...Field), error) { + switch lvl { + case DebugLevel: + return logger.Debug, nil + case InfoLevel: + return logger.Info, nil + case WarnLevel: + return logger.Warn, nil + case ErrorLevel: + return logger.Error, nil + case DPanicLevel: + return logger.DPanic, nil + case PanicLevel: + return logger.Panic, nil + case FatalLevel: + return logger.Fatal, nil + } + return nil, fmt.Errorf("unrecognized level: %q", lvl) +} + +type loggerWriter struct { + logFunc func(msg string, fields ...Field) +} + +func (l *loggerWriter) Write(p []byte) (int, error) { + p = bytes.TrimSpace(p) + l.logFunc(string(p)) + return len(p), nil +} diff --git a/vendor/go.uber.org/zap/global_go112.go b/vendor/go.uber.org/zap/global_go112.go new file mode 100644 index 000000000..6b5dbda80 --- /dev/null +++ b/vendor/go.uber.org/zap/global_go112.go @@ -0,0 +1,26 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// See #682 for more information. +// +build go1.12 + +package zap + +const _stdLogDefaultDepth = 1 diff --git a/vendor/go.uber.org/zap/global_prego112.go b/vendor/go.uber.org/zap/global_prego112.go new file mode 100644 index 000000000..d3ab9af93 --- /dev/null +++ b/vendor/go.uber.org/zap/global_prego112.go @@ -0,0 +1,26 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// See #682 for more information. +// +build !go1.12 + +package zap + +const _stdLogDefaultDepth = 2 diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go new file mode 100644 index 000000000..1b0ecaca9 --- /dev/null +++ b/vendor/go.uber.org/zap/http_handler.go @@ -0,0 +1,81 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "encoding/json" + "fmt" + "net/http" + + "go.uber.org/zap/zapcore" +) + +// ServeHTTP is a simple JSON endpoint that can report on or change the current +// logging level. +// +// GET requests return a JSON description of the current logging level. PUT +// requests change the logging level and expect a payload like: +// {"level":"info"} +// +// It's perfectly safe to change the logging level while a program is running. +func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { + type errorResponse struct { + Error string `json:"error"` + } + type payload struct { + Level *zapcore.Level `json:"level"` + } + + enc := json.NewEncoder(w) + + switch r.Method { + + case http.MethodGet: + current := lvl.Level() + enc.Encode(payload{Level: ¤t}) + + case http.MethodPut: + var req payload + + if errmess := func() string { + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return fmt.Sprintf("Request body must be well-formed JSON: %v", err) + } + if req.Level == nil { + return "Must specify a logging level." + } + return "" + }(); errmess != "" { + w.WriteHeader(http.StatusBadRequest) + enc.Encode(errorResponse{Error: errmess}) + return + } + + lvl.SetLevel(*req.Level) + enc.Encode(req) + + default: + w.WriteHeader(http.StatusMethodNotAllowed) + enc.Encode(errorResponse{ + Error: "Only GET and PUT are supported.", + }) + } +} diff --git a/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go new file mode 100644 index 000000000..dad583aaa --- /dev/null +++ b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go @@ -0,0 +1,31 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package bufferpool houses zap's shared internal buffer pool. Third-party +// packages can recreate the same functionality with buffers.NewPool. +package bufferpool + +import "go.uber.org/zap/buffer" + +var ( + _pool = buffer.NewPool() + // Get retrieves a buffer from the pool, creating one if necessary. + Get = _pool.Get +) diff --git a/vendor/go.uber.org/zap/internal/color/color.go b/vendor/go.uber.org/zap/internal/color/color.go new file mode 100644 index 000000000..c4d5d02ab --- /dev/null +++ b/vendor/go.uber.org/zap/internal/color/color.go @@ -0,0 +1,44 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package color adds coloring functionality for TTY output. +package color + +import "fmt" + +// Foreground colors. +const ( + Black Color = iota + 30 + Red + Green + Yellow + Blue + Magenta + Cyan + White +) + +// Color represents a text color. +type Color uint8 + +// Add adds the coloring to the given string. +func (c Color) Add(s string) string { + return fmt.Sprintf("\x1b[%dm%s\x1b[0m", uint8(c), s) +} diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go new file mode 100644 index 000000000..dfc5b05fe --- /dev/null +++ b/vendor/go.uber.org/zap/internal/exit/exit.go @@ -0,0 +1,64 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package exit provides stubs so that unit tests can exercise code that calls +// os.Exit(1). +package exit + +import "os" + +var real = func() { os.Exit(1) } + +// Exit normally terminates the process by calling os.Exit(1). If the package +// is stubbed, it instead records a call in the testing spy. +func Exit() { + real() +} + +// A StubbedExit is a testing fake for os.Exit. +type StubbedExit struct { + Exited bool + prev func() +} + +// Stub substitutes a fake for the call to os.Exit(1). +func Stub() *StubbedExit { + s := &StubbedExit{prev: real} + real = s.exit + return s +} + +// WithStub runs the supplied function with Exit stubbed. It returns the stub +// used, so that users can test whether the process would have crashed. +func WithStub(f func()) *StubbedExit { + s := Stub() + defer s.Unstub() + f() + return s +} + +// Unstub restores the previous exit function. +func (se *StubbedExit) Unstub() { + real = se.prev +} + +func (se *StubbedExit) exit() { + se.Exited = true +} diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go new file mode 100644 index 000000000..3567a9a1e --- /dev/null +++ b/vendor/go.uber.org/zap/level.go @@ -0,0 +1,132 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "go.uber.org/atomic" + "go.uber.org/zap/zapcore" +) + +const ( + // DebugLevel logs are typically voluminous, and are usually disabled in + // production. + DebugLevel = zapcore.DebugLevel + // InfoLevel is the default logging priority. + InfoLevel = zapcore.InfoLevel + // WarnLevel logs are more important than Info, but don't need individual + // human review. + WarnLevel = zapcore.WarnLevel + // ErrorLevel logs are high-priority. If an application is running smoothly, + // it shouldn't generate any error-level logs. + ErrorLevel = zapcore.ErrorLevel + // DPanicLevel logs are particularly important errors. In development the + // logger panics after writing the message. + DPanicLevel = zapcore.DPanicLevel + // PanicLevel logs a message, then panics. + PanicLevel = zapcore.PanicLevel + // FatalLevel logs a message, then calls os.Exit(1). + FatalLevel = zapcore.FatalLevel +) + +// LevelEnablerFunc is a convenient way to implement zapcore.LevelEnabler with +// an anonymous function. +// +// It's particularly useful when splitting log output between different +// outputs (e.g., standard error and standard out). For sample code, see the +// package-level AdvancedConfiguration example. +type LevelEnablerFunc func(zapcore.Level) bool + +// Enabled calls the wrapped function. +func (f LevelEnablerFunc) Enabled(lvl zapcore.Level) bool { return f(lvl) } + +// An AtomicLevel is an atomically changeable, dynamic logging level. It lets +// you safely change the log level of a tree of loggers (the root logger and +// any children created by adding context) at runtime. +// +// The AtomicLevel itself is an http.Handler that serves a JSON endpoint to +// alter its level. +// +// AtomicLevels must be created with the NewAtomicLevel constructor to allocate +// their internal atomic pointer. +type AtomicLevel struct { + l *atomic.Int32 +} + +// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging +// enabled. +func NewAtomicLevel() AtomicLevel { + return AtomicLevel{ + l: atomic.NewInt32(int32(InfoLevel)), + } +} + +// NewAtomicLevelAt is a convenience function that creates an AtomicLevel +// and then calls SetLevel with the given level. +func NewAtomicLevelAt(l zapcore.Level) AtomicLevel { + a := NewAtomicLevel() + a.SetLevel(l) + return a +} + +// Enabled implements the zapcore.LevelEnabler interface, which allows the +// AtomicLevel to be used in place of traditional static levels. +func (lvl AtomicLevel) Enabled(l zapcore.Level) bool { + return lvl.Level().Enabled(l) +} + +// Level returns the minimum enabled log level. +func (lvl AtomicLevel) Level() zapcore.Level { + return zapcore.Level(int8(lvl.l.Load())) +} + +// SetLevel alters the logging level. +func (lvl AtomicLevel) SetLevel(l zapcore.Level) { + lvl.l.Store(int32(l)) +} + +// String returns the string representation of the underlying Level. +func (lvl AtomicLevel) String() string { + return lvl.Level().String() +} + +// UnmarshalText unmarshals the text to an AtomicLevel. It uses the same text +// representations as the static zapcore.Levels ("debug", "info", "warn", +// "error", "dpanic", "panic", and "fatal"). +func (lvl *AtomicLevel) UnmarshalText(text []byte) error { + if lvl.l == nil { + lvl.l = &atomic.Int32{} + } + + var l zapcore.Level + if err := l.UnmarshalText(text); err != nil { + return err + } + + lvl.SetLevel(l) + return nil +} + +// MarshalText marshals the AtomicLevel to a byte slice. It uses the same +// text representation as the static zapcore.Levels ("debug", "info", "warn", +// "error", "dpanic", "panic", and "fatal"). +func (lvl AtomicLevel) MarshalText() (text []byte, err error) { + return lvl.Level().MarshalText() +} diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go new file mode 100644 index 000000000..dc8f6e3a4 --- /dev/null +++ b/vendor/go.uber.org/zap/logger.go @@ -0,0 +1,305 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "io/ioutil" + "os" + "runtime" + "strings" + "time" + + "go.uber.org/zap/zapcore" +) + +// A Logger provides fast, leveled, structured logging. All methods are safe +// for concurrent use. +// +// The Logger is designed for contexts in which every microsecond and every +// allocation matters, so its API intentionally favors performance and type +// safety over brevity. For most applications, the SugaredLogger strikes a +// better balance between performance and ergonomics. +type Logger struct { + core zapcore.Core + + development bool + name string + errorOutput zapcore.WriteSyncer + + addCaller bool + addStack zapcore.LevelEnabler + + callerSkip int +} + +// New constructs a new Logger from the provided zapcore.Core and Options. If +// the passed zapcore.Core is nil, it falls back to using a no-op +// implementation. +// +// This is the most flexible way to construct a Logger, but also the most +// verbose. For typical use cases, the highly-opinionated presets +// (NewProduction, NewDevelopment, and NewExample) or the Config struct are +// more convenient. +// +// For sample code, see the package-level AdvancedConfiguration example. +func New(core zapcore.Core, options ...Option) *Logger { + if core == nil { + return NewNop() + } + log := &Logger{ + core: core, + errorOutput: zapcore.Lock(os.Stderr), + addStack: zapcore.FatalLevel + 1, + } + return log.WithOptions(options...) +} + +// NewNop returns a no-op Logger. It never writes out logs or internal errors, +// and it never runs user-defined hooks. +// +// Using WithOptions to replace the Core or error output of a no-op Logger can +// re-enable logging. +func NewNop() *Logger { + return &Logger{ + core: zapcore.NewNopCore(), + errorOutput: zapcore.AddSync(ioutil.Discard), + addStack: zapcore.FatalLevel + 1, + } +} + +// NewProduction builds a sensible production Logger that writes InfoLevel and +// above logs to standard error as JSON. +// +// It's a shortcut for NewProductionConfig().Build(...Option). +func NewProduction(options ...Option) (*Logger, error) { + return NewProductionConfig().Build(options...) +} + +// NewDevelopment builds a development Logger that writes DebugLevel and above +// logs to standard error in a human-friendly format. +// +// It's a shortcut for NewDevelopmentConfig().Build(...Option). +func NewDevelopment(options ...Option) (*Logger, error) { + return NewDevelopmentConfig().Build(options...) +} + +// NewExample builds a Logger that's designed for use in zap's testable +// examples. It writes DebugLevel and above logs to standard out as JSON, but +// omits the timestamp and calling function to keep example output +// short and deterministic. +func NewExample(options ...Option) *Logger { + encoderCfg := zapcore.EncoderConfig{ + MessageKey: "msg", + LevelKey: "level", + NameKey: "logger", + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + } + core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderCfg), os.Stdout, DebugLevel) + return New(core).WithOptions(options...) +} + +// Sugar wraps the Logger to provide a more ergonomic, but slightly slower, +// API. Sugaring a Logger is quite inexpensive, so it's reasonable for a +// single application to use both Loggers and SugaredLoggers, converting +// between them on the boundaries of performance-sensitive code. +func (log *Logger) Sugar() *SugaredLogger { + core := log.clone() + core.callerSkip += 2 + return &SugaredLogger{core} +} + +// Named adds a new path segment to the logger's name. Segments are joined by +// periods. By default, Loggers are unnamed. +func (log *Logger) Named(s string) *Logger { + if s == "" { + return log + } + l := log.clone() + if log.name == "" { + l.name = s + } else { + l.name = strings.Join([]string{l.name, s}, ".") + } + return l +} + +// WithOptions clones the current Logger, applies the supplied Options, and +// returns the resulting Logger. It's safe to use concurrently. +func (log *Logger) WithOptions(opts ...Option) *Logger { + c := log.clone() + for _, opt := range opts { + opt.apply(c) + } + return c +} + +// With creates a child logger and adds structured context to it. Fields added +// to the child don't affect the parent, and vice versa. +func (log *Logger) With(fields ...Field) *Logger { + if len(fields) == 0 { + return log + } + l := log.clone() + l.core = l.core.With(fields) + return l +} + +// Check returns a CheckedEntry if logging a message at the specified level +// is enabled. It's a completely optional optimization; in high-performance +// applications, Check can help avoid allocating a slice to hold fields. +func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { + return log.check(lvl, msg) +} + +// Debug logs a message at DebugLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Debug(msg string, fields ...Field) { + if ce := log.check(DebugLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Info logs a message at InfoLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Info(msg string, fields ...Field) { + if ce := log.check(InfoLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Warn logs a message at WarnLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Warn(msg string, fields ...Field) { + if ce := log.check(WarnLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Error logs a message at ErrorLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Error(msg string, fields ...Field) { + if ce := log.check(ErrorLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// DPanic logs a message at DPanicLevel. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +// +// If the logger is in development mode, it then panics (DPanic means +// "development panic"). This is useful for catching errors that are +// recoverable, but shouldn't ever happen. +func (log *Logger) DPanic(msg string, fields ...Field) { + if ce := log.check(DPanicLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Panic logs a message at PanicLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then panics, even if logging at PanicLevel is disabled. +func (log *Logger) Panic(msg string, fields ...Field) { + if ce := log.check(PanicLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Fatal logs a message at FatalLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then calls os.Exit(1), even if logging at FatalLevel is +// disabled. +func (log *Logger) Fatal(msg string, fields ...Field) { + if ce := log.check(FatalLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Sync calls the underlying Core's Sync method, flushing any buffered log +// entries. Applications should take care to call Sync before exiting. +func (log *Logger) Sync() error { + return log.core.Sync() +} + +// Core returns the Logger's underlying zapcore.Core. +func (log *Logger) Core() zapcore.Core { + return log.core +} + +func (log *Logger) clone() *Logger { + copy := *log + return © +} + +func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { + // check must always be called directly by a method in the Logger interface + // (e.g., Check, Info, Fatal). + const callerSkipOffset = 2 + + // Create basic checked entry thru the core; this will be non-nil if the + // log message will actually be written somewhere. + ent := zapcore.Entry{ + LoggerName: log.name, + Time: time.Now(), + Level: lvl, + Message: msg, + } + ce := log.core.Check(ent, nil) + willWrite := ce != nil + + // Set up any required terminal behavior. + switch ent.Level { + case zapcore.PanicLevel: + ce = ce.Should(ent, zapcore.WriteThenPanic) + case zapcore.FatalLevel: + ce = ce.Should(ent, zapcore.WriteThenFatal) + case zapcore.DPanicLevel: + if log.development { + ce = ce.Should(ent, zapcore.WriteThenPanic) + } + } + + // Only do further annotation if we're going to write this message; checked + // entries that exist only for terminal behavior don't benefit from + // annotation. + if !willWrite { + return ce + } + + // Thread the error output through to the CheckedEntry. + ce.ErrorOutput = log.errorOutput + if log.addCaller { + ce.Entry.Caller = zapcore.NewEntryCaller(runtime.Caller(log.callerSkip + callerSkipOffset)) + if !ce.Entry.Caller.Defined { + fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", time.Now().UTC()) + log.errorOutput.Sync() + } + } + if log.addStack.Enabled(ce.Entry.Level) { + ce.Entry.Stack = Stack("").String + } + + return ce +} diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go new file mode 100644 index 000000000..7a6b0fca1 --- /dev/null +++ b/vendor/go.uber.org/zap/options.go @@ -0,0 +1,109 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import "go.uber.org/zap/zapcore" + +// An Option configures a Logger. +type Option interface { + apply(*Logger) +} + +// optionFunc wraps a func so it satisfies the Option interface. +type optionFunc func(*Logger) + +func (f optionFunc) apply(log *Logger) { + f(log) +} + +// WrapCore wraps or replaces the Logger's underlying zapcore.Core. +func WrapCore(f func(zapcore.Core) zapcore.Core) Option { + return optionFunc(func(log *Logger) { + log.core = f(log.core) + }) +} + +// Hooks registers functions which will be called each time the Logger writes +// out an Entry. Repeated use of Hooks is additive. +// +// Hooks are useful for simple side effects, like capturing metrics for the +// number of emitted logs. More complex side effects, including anything that +// requires access to the Entry's structured fields, should be implemented as +// a zapcore.Core instead. See zapcore.RegisterHooks for details. +func Hooks(hooks ...func(zapcore.Entry) error) Option { + return optionFunc(func(log *Logger) { + log.core = zapcore.RegisterHooks(log.core, hooks...) + }) +} + +// Fields adds fields to the Logger. +func Fields(fs ...Field) Option { + return optionFunc(func(log *Logger) { + log.core = log.core.With(fs) + }) +} + +// ErrorOutput sets the destination for errors generated by the Logger. Note +// that this option only affects internal errors; for sample code that sends +// error-level logs to a different location from info- and debug-level logs, +// see the package-level AdvancedConfiguration example. +// +// The supplied WriteSyncer must be safe for concurrent use. The Open and +// zapcore.Lock functions are the simplest ways to protect files with a mutex. +func ErrorOutput(w zapcore.WriteSyncer) Option { + return optionFunc(func(log *Logger) { + log.errorOutput = w + }) +} + +// Development puts the logger in development mode, which makes DPanic-level +// logs panic instead of simply logging an error. +func Development() Option { + return optionFunc(func(log *Logger) { + log.development = true + }) +} + +// AddCaller configures the Logger to annotate each message with the filename +// and line number of zap's caller. +func AddCaller() Option { + return optionFunc(func(log *Logger) { + log.addCaller = true + }) +} + +// AddCallerSkip increases the number of callers skipped by caller annotation +// (as enabled by the AddCaller option). When building wrappers around the +// Logger and SugaredLogger, supplying this Option prevents zap from always +// reporting the wrapper code as the caller. +func AddCallerSkip(skip int) Option { + return optionFunc(func(log *Logger) { + log.callerSkip += skip + }) +} + +// AddStacktrace configures the Logger to record a stack trace for all messages at +// or above a given level. +func AddStacktrace(lvl zapcore.LevelEnabler) Option { + return optionFunc(func(log *Logger) { + log.addStack = lvl + }) +} diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go new file mode 100644 index 000000000..ff0becfe5 --- /dev/null +++ b/vendor/go.uber.org/zap/sink.go @@ -0,0 +1,161 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "fmt" + "io" + "net/url" + "os" + "strings" + "sync" + + "go.uber.org/zap/zapcore" +) + +const schemeFile = "file" + +var ( + _sinkMutex sync.RWMutex + _sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme +) + +func init() { + resetSinkRegistry() +} + +func resetSinkRegistry() { + _sinkMutex.Lock() + defer _sinkMutex.Unlock() + + _sinkFactories = map[string]func(*url.URL) (Sink, error){ + schemeFile: newFileSink, + } +} + +// Sink defines the interface to write to and close logger destinations. +type Sink interface { + zapcore.WriteSyncer + io.Closer +} + +type nopCloserSink struct{ zapcore.WriteSyncer } + +func (nopCloserSink) Close() error { return nil } + +type errSinkNotFound struct { + scheme string +} + +func (e *errSinkNotFound) Error() string { + return fmt.Sprintf("no sink found for scheme %q", e.scheme) +} + +// RegisterSink registers a user-supplied factory for all sinks with a +// particular scheme. +// +// All schemes must be ASCII, valid under section 3.1 of RFC 3986 +// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already +// have a factory registered. Zap automatically registers a factory for the +// "file" scheme. +func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + _sinkMutex.Lock() + defer _sinkMutex.Unlock() + + if scheme == "" { + return errors.New("can't register a sink factory for empty string") + } + normalized, err := normalizeScheme(scheme) + if err != nil { + return fmt.Errorf("%q is not a valid scheme: %v", scheme, err) + } + if _, ok := _sinkFactories[normalized]; ok { + return fmt.Errorf("sink factory already registered for scheme %q", normalized) + } + _sinkFactories[normalized] = factory + return nil +} + +func newSink(rawURL string) (Sink, error) { + u, err := url.Parse(rawURL) + if err != nil { + return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err) + } + if u.Scheme == "" { + u.Scheme = schemeFile + } + + _sinkMutex.RLock() + factory, ok := _sinkFactories[u.Scheme] + _sinkMutex.RUnlock() + if !ok { + return nil, &errSinkNotFound{u.Scheme} + } + return factory(u) +} + +func newFileSink(u *url.URL) (Sink, error) { + if u.User != nil { + return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u) + } + if u.Fragment != "" { + return nil, fmt.Errorf("fragments not allowed with file URLs: got %v", u) + } + if u.RawQuery != "" { + return nil, fmt.Errorf("query parameters not allowed with file URLs: got %v", u) + } + // Error messages are better if we check hostname and port separately. + if u.Port() != "" { + return nil, fmt.Errorf("ports not allowed with file URLs: got %v", u) + } + if hn := u.Hostname(); hn != "" && hn != "localhost" { + return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u) + } + switch u.Path { + case "stdout": + return nopCloserSink{os.Stdout}, nil + case "stderr": + return nopCloserSink{os.Stderr}, nil + } + return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) +} + +func normalizeScheme(s string) (string, error) { + // https://tools.ietf.org/html/rfc3986#section-3.1 + s = strings.ToLower(s) + if first := s[0]; 'a' > first || 'z' < first { + return "", errors.New("must start with a letter") + } + for i := 1; i < len(s); i++ { // iterate over bytes, not runes + c := s[i] + switch { + case 'a' <= c && c <= 'z': + continue + case '0' <= c && c <= '9': + continue + case c == '.' || c == '+' || c == '-': + continue + } + return "", fmt.Errorf("may not contain %q", c) + } + return s, nil +} diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go new file mode 100644 index 000000000..100fac216 --- /dev/null +++ b/vendor/go.uber.org/zap/stacktrace.go @@ -0,0 +1,126 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "runtime" + "strings" + "sync" + + "go.uber.org/zap/internal/bufferpool" +) + +const _zapPackage = "go.uber.org/zap" + +var ( + _stacktracePool = sync.Pool{ + New: func() interface{} { + return newProgramCounters(64) + }, + } + + // We add "." and "/" suffixes to the package name to ensure we only match + // the exact package and not any package with the same prefix. + _zapStacktracePrefixes = addPrefix(_zapPackage, ".", "/") + _zapStacktraceVendorContains = addPrefix("/vendor/", _zapStacktracePrefixes...) +) + +func takeStacktrace() string { + buffer := bufferpool.Get() + defer buffer.Free() + programCounters := _stacktracePool.Get().(*programCounters) + defer _stacktracePool.Put(programCounters) + + var numFrames int + for { + // Skip the call to runtime.Counters and takeStacktrace so that the + // program counters start at the caller of takeStacktrace. + numFrames = runtime.Callers(2, programCounters.pcs) + if numFrames < len(programCounters.pcs) { + break + } + // Don't put the too-short counter slice back into the pool; this lets + // the pool adjust if we consistently take deep stacktraces. + programCounters = newProgramCounters(len(programCounters.pcs) * 2) + } + + i := 0 + skipZapFrames := true // skip all consecutive zap frames at the beginning. + frames := runtime.CallersFrames(programCounters.pcs[:numFrames]) + + // Note: On the last iteration, frames.Next() returns false, with a valid + // frame, but we ignore this frame. The last frame is a a runtime frame which + // adds noise, since it's only either runtime.main or runtime.goexit. + for frame, more := frames.Next(); more; frame, more = frames.Next() { + if skipZapFrames && isZapFrame(frame.Function) { + continue + } else { + skipZapFrames = false + } + + if i != 0 { + buffer.AppendByte('\n') + } + i++ + buffer.AppendString(frame.Function) + buffer.AppendByte('\n') + buffer.AppendByte('\t') + buffer.AppendString(frame.File) + buffer.AppendByte(':') + buffer.AppendInt(int64(frame.Line)) + } + + return buffer.String() +} + +func isZapFrame(function string) bool { + for _, prefix := range _zapStacktracePrefixes { + if strings.HasPrefix(function, prefix) { + return true + } + } + + // We can't use a prefix match here since the location of the vendor + // directory affects the prefix. Instead we do a contains match. + for _, contains := range _zapStacktraceVendorContains { + if strings.Contains(function, contains) { + return true + } + } + + return false +} + +type programCounters struct { + pcs []uintptr +} + +func newProgramCounters(size int) *programCounters { + return &programCounters{make([]uintptr, size)} +} + +func addPrefix(prefix string, ss ...string) []string { + withPrefix := make([]string, len(ss)) + for i, s := range ss { + withPrefix[i] = prefix + s + } + return withPrefix +} diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go new file mode 100644 index 000000000..77ca227f4 --- /dev/null +++ b/vendor/go.uber.org/zap/sugar.go @@ -0,0 +1,304 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + + "go.uber.org/zap/zapcore" + + "go.uber.org/multierr" +) + +const ( + _oddNumberErrMsg = "Ignored key without a value." + _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys." +) + +// A SugaredLogger wraps the base Logger functionality in a slower, but less +// verbose, API. Any Logger can be converted to a SugaredLogger with its Sugar +// method. +// +// Unlike the Logger, the SugaredLogger doesn't insist on structured logging. +// For each log level, it exposes three methods: one for loosely-typed +// structured logging, one for println-style formatting, and one for +// printf-style formatting. For example, SugaredLoggers can produce InfoLevel +// output with Infow ("info with" structured context), Info, or Infof. +type SugaredLogger struct { + base *Logger +} + +// Desugar unwraps a SugaredLogger, exposing the original Logger. Desugaring +// is quite inexpensive, so it's reasonable for a single application to use +// both Loggers and SugaredLoggers, converting between them on the boundaries +// of performance-sensitive code. +func (s *SugaredLogger) Desugar() *Logger { + base := s.base.clone() + base.callerSkip -= 2 + return base +} + +// Named adds a sub-scope to the logger's name. See Logger.Named for details. +func (s *SugaredLogger) Named(name string) *SugaredLogger { + return &SugaredLogger{base: s.base.Named(name)} +} + +// With adds a variadic number of fields to the logging context. It accepts a +// mix of strongly-typed Field objects and loosely-typed key-value pairs. When +// processing pairs, the first element of the pair is used as the field key +// and the second as the field value. +// +// For example, +// sugaredLogger.With( +// "hello", "world", +// "failure", errors.New("oh no"), +// Stack(), +// "count", 42, +// "user", User{Name: "alice"}, +// ) +// is the equivalent of +// unsugared.With( +// String("hello", "world"), +// String("failure", "oh no"), +// Stack(), +// Int("count", 42), +// Object("user", User{Name: "alice"}), +// ) +// +// Note that the keys in key-value pairs should be strings. In development, +// passing a non-string key panics. In production, the logger is more +// forgiving: a separate error is logged, but the key-value pair is skipped +// and execution continues. Passing an orphaned key triggers similar behavior: +// panics in development and errors in production. +func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { + return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} +} + +// Debug uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Debug(args ...interface{}) { + s.log(DebugLevel, "", args, nil) +} + +// Info uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Info(args ...interface{}) { + s.log(InfoLevel, "", args, nil) +} + +// Warn uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Warn(args ...interface{}) { + s.log(WarnLevel, "", args, nil) +} + +// Error uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Error(args ...interface{}) { + s.log(ErrorLevel, "", args, nil) +} + +// DPanic uses fmt.Sprint to construct and log a message. In development, the +// logger then panics. (See DPanicLevel for details.) +func (s *SugaredLogger) DPanic(args ...interface{}) { + s.log(DPanicLevel, "", args, nil) +} + +// Panic uses fmt.Sprint to construct and log a message, then panics. +func (s *SugaredLogger) Panic(args ...interface{}) { + s.log(PanicLevel, "", args, nil) +} + +// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit. +func (s *SugaredLogger) Fatal(args ...interface{}) { + s.log(FatalLevel, "", args, nil) +} + +// Debugf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Debugf(template string, args ...interface{}) { + s.log(DebugLevel, template, args, nil) +} + +// Infof uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Infof(template string, args ...interface{}) { + s.log(InfoLevel, template, args, nil) +} + +// Warnf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Warnf(template string, args ...interface{}) { + s.log(WarnLevel, template, args, nil) +} + +// Errorf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Errorf(template string, args ...interface{}) { + s.log(ErrorLevel, template, args, nil) +} + +// DPanicf uses fmt.Sprintf to log a templated message. In development, the +// logger then panics. (See DPanicLevel for details.) +func (s *SugaredLogger) DPanicf(template string, args ...interface{}) { + s.log(DPanicLevel, template, args, nil) +} + +// Panicf uses fmt.Sprintf to log a templated message, then panics. +func (s *SugaredLogger) Panicf(template string, args ...interface{}) { + s.log(PanicLevel, template, args, nil) +} + +// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit. +func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { + s.log(FatalLevel, template, args, nil) +} + +// Debugw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +// +// When debug-level logging is disabled, this is much faster than +// s.With(keysAndValues).Debug(msg) +func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) { + s.log(DebugLevel, msg, nil, keysAndValues) +} + +// Infow logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Infow(msg string, keysAndValues ...interface{}) { + s.log(InfoLevel, msg, nil, keysAndValues) +} + +// Warnw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Warnw(msg string, keysAndValues ...interface{}) { + s.log(WarnLevel, msg, nil, keysAndValues) +} + +// Errorw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Errorw(msg string, keysAndValues ...interface{}) { + s.log(ErrorLevel, msg, nil, keysAndValues) +} + +// DPanicw logs a message with some additional context. In development, the +// logger then panics. (See DPanicLevel for details.) The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) DPanicw(msg string, keysAndValues ...interface{}) { + s.log(DPanicLevel, msg, nil, keysAndValues) +} + +// Panicw logs a message with some additional context, then panics. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) Panicw(msg string, keysAndValues ...interface{}) { + s.log(PanicLevel, msg, nil, keysAndValues) +} + +// Fatalw logs a message with some additional context, then calls os.Exit. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { + s.log(FatalLevel, msg, nil, keysAndValues) +} + +// Sync flushes any buffered log entries. +func (s *SugaredLogger) Sync() error { + return s.base.Sync() +} + +func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) { + // If logging at this level is completely disabled, skip the overhead of + // string formatting. + if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) { + return + } + + // Format with Sprint, Sprintf, or neither. + msg := template + if msg == "" && len(fmtArgs) > 0 { + msg = fmt.Sprint(fmtArgs...) + } else if msg != "" && len(fmtArgs) > 0 { + msg = fmt.Sprintf(template, fmtArgs...) + } + + if ce := s.base.Check(lvl, msg); ce != nil { + ce.Write(s.sweetenFields(context)...) + } +} + +func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { + if len(args) == 0 { + return nil + } + + // Allocate enough space for the worst case; if users pass only structured + // fields, we shouldn't penalize them with extra allocations. + fields := make([]Field, 0, len(args)) + var invalid invalidPairs + + for i := 0; i < len(args); { + // This is a strongly-typed field. Consume it and move on. + if f, ok := args[i].(Field); ok { + fields = append(fields, f) + i++ + continue + } + + // Make sure this element isn't a dangling key. + if i == len(args)-1 { + s.base.DPanic(_oddNumberErrMsg, Any("ignored", args[i])) + break + } + + // Consume this value and the next, treating them as a key-value pair. If the + // key isn't a string, add this pair to the slice of invalid pairs. + key, val := args[i], args[i+1] + if keyStr, ok := key.(string); !ok { + // Subsequent errors are likely, so allocate once up front. + if cap(invalid) == 0 { + invalid = make(invalidPairs, 0, len(args)/2) + } + invalid = append(invalid, invalidPair{i, key, val}) + } else { + fields = append(fields, Any(keyStr, val)) + } + i += 2 + } + + // If we encountered any invalid key-value pairs, log an error. + if len(invalid) > 0 { + s.base.DPanic(_nonStringKeyErrMsg, Array("invalid", invalid)) + } + return fields +} + +type invalidPair struct { + position int + key, value interface{} +} + +func (p invalidPair) MarshalLogObject(enc zapcore.ObjectEncoder) error { + enc.AddInt64("position", int64(p.position)) + Any("key", p.key).AddTo(enc) + Any("value", p.value).AddTo(enc) + return nil +} + +type invalidPairs []invalidPair + +func (ps invalidPairs) MarshalLogArray(enc zapcore.ArrayEncoder) error { + var err error + for i := range ps { + err = multierr.Append(err, enc.AppendObject(ps[i])) + } + return err +} diff --git a/vendor/go.uber.org/zap/time.go b/vendor/go.uber.org/zap/time.go new file mode 100644 index 000000000..c5a1f1622 --- /dev/null +++ b/vendor/go.uber.org/zap/time.go @@ -0,0 +1,27 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import "time" + +func timeToMillis(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond) +} diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go new file mode 100644 index 000000000..86a709ab0 --- /dev/null +++ b/vendor/go.uber.org/zap/writer.go @@ -0,0 +1,99 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "io" + "io/ioutil" + + "go.uber.org/zap/zapcore" + + "go.uber.org/multierr" +) + +// Open is a high-level wrapper that takes a variadic number of URLs, opens or +// creates each of the specified resources, and combines them into a locked +// WriteSyncer. It also returns any error encountered and a function to close +// any opened files. +// +// Passing no URLs returns a no-op WriteSyncer. Zap handles URLs without a +// scheme and URLs with the "file" scheme. Third-party code may register +// factories for other schemes using RegisterSink. +// +// URLs with the "file" scheme must use absolute paths on the local +// filesystem. No user, password, port, fragments, or query parameters are +// allowed, and the hostname must be empty or "localhost". +// +// Since it's common to write logs to the local filesystem, URLs without a +// scheme (e.g., "/var/log/foo.log") are treated as local file paths. Without +// a scheme, the special paths "stdout" and "stderr" are interpreted as +// os.Stdout and os.Stderr. When specified without a scheme, relative file +// paths also work. +func Open(paths ...string) (zapcore.WriteSyncer, func(), error) { + writers, close, err := open(paths) + if err != nil { + return nil, nil, err + } + + writer := CombineWriteSyncers(writers...) + return writer, close, nil +} + +func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { + writers := make([]zapcore.WriteSyncer, 0, len(paths)) + closers := make([]io.Closer, 0, len(paths)) + close := func() { + for _, c := range closers { + c.Close() + } + } + + var openErr error + for _, path := range paths { + sink, err := newSink(path) + if err != nil { + openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err)) + continue + } + writers = append(writers, sink) + closers = append(closers, sink) + } + if openErr != nil { + close() + return writers, nil, openErr + } + + return writers, close, nil +} + +// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a +// single, locked WriteSyncer. If no inputs are supplied, it returns a no-op +// WriteSyncer. +// +// It's provided purely as a convenience; the result is no different from +// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually. +func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer { + if len(writers) == 0 { + return zapcore.AddSync(ioutil.Discard) + } + return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...)) +} diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go new file mode 100644 index 000000000..b7875966f --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -0,0 +1,147 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "sync" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" +) + +var _sliceEncoderPool = sync.Pool{ + New: func() interface{} { + return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)} + }, +} + +func getSliceEncoder() *sliceArrayEncoder { + return _sliceEncoderPool.Get().(*sliceArrayEncoder) +} + +func putSliceEncoder(e *sliceArrayEncoder) { + e.elems = e.elems[:0] + _sliceEncoderPool.Put(e) +} + +type consoleEncoder struct { + *jsonEncoder +} + +// NewConsoleEncoder creates an encoder whose output is designed for human - +// rather than machine - consumption. It serializes the core log entry data +// (message, level, timestamp, etc.) in a plain-text format and leaves the +// structured context as JSON. +// +// Note that although the console encoder doesn't use the keys specified in the +// encoder configuration, it will omit any element whose key is set to the empty +// string. +func NewConsoleEncoder(cfg EncoderConfig) Encoder { + return consoleEncoder{newJSONEncoder(cfg, true)} +} + +func (c consoleEncoder) Clone() Encoder { + return consoleEncoder{c.jsonEncoder.Clone().(*jsonEncoder)} +} + +func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { + line := bufferpool.Get() + + // We don't want the entry's metadata to be quoted and escaped (if it's + // encoded as strings), which means that we can't use the JSON encoder. The + // simplest option is to use the memory encoder and fmt.Fprint. + // + // If this ever becomes a performance bottleneck, we can implement + // ArrayEncoder for our plain-text format. + arr := getSliceEncoder() + if c.TimeKey != "" && c.EncodeTime != nil { + c.EncodeTime(ent.Time, arr) + } + if c.LevelKey != "" && c.EncodeLevel != nil { + c.EncodeLevel(ent.Level, arr) + } + if ent.LoggerName != "" && c.NameKey != "" { + nameEncoder := c.EncodeName + + if nameEncoder == nil { + // Fall back to FullNameEncoder for backward compatibility. + nameEncoder = FullNameEncoder + } + + nameEncoder(ent.LoggerName, arr) + } + if ent.Caller.Defined && c.CallerKey != "" && c.EncodeCaller != nil { + c.EncodeCaller(ent.Caller, arr) + } + for i := range arr.elems { + if i > 0 { + line.AppendByte('\t') + } + fmt.Fprint(line, arr.elems[i]) + } + putSliceEncoder(arr) + + // Add the message itself. + if c.MessageKey != "" { + c.addTabIfNecessary(line) + line.AppendString(ent.Message) + } + + // Add any structured context. + c.writeContext(line, fields) + + // If there's no stacktrace key, honor that; this allows users to force + // single-line output. + if ent.Stack != "" && c.StacktraceKey != "" { + line.AppendByte('\n') + line.AppendString(ent.Stack) + } + + if c.LineEnding != "" { + line.AppendString(c.LineEnding) + } else { + line.AppendString(DefaultLineEnding) + } + return line, nil +} + +func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) { + context := c.jsonEncoder.Clone().(*jsonEncoder) + defer context.buf.Free() + + addFields(context, extra) + context.closeOpenNamespaces() + if context.buf.Len() == 0 { + return + } + + c.addTabIfNecessary(line) + line.AppendByte('{') + line.Write(context.buf.Bytes()) + line.AppendByte('}') +} + +func (c consoleEncoder) addTabIfNecessary(line *buffer.Buffer) { + if line.Len() > 0 { + line.AppendByte('\t') + } +} diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go new file mode 100644 index 000000000..a1ef8b034 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/core.go @@ -0,0 +1,113 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +// Core is a minimal, fast logger interface. It's designed for library authors +// to wrap in a more user-friendly API. +type Core interface { + LevelEnabler + + // With adds structured context to the Core. + With([]Field) Core + // Check determines whether the supplied Entry should be logged (using the + // embedded LevelEnabler and possibly some extra logic). If the entry + // should be logged, the Core adds itself to the CheckedEntry and returns + // the result. + // + // Callers must use Check before calling Write. + Check(Entry, *CheckedEntry) *CheckedEntry + // Write serializes the Entry and any Fields supplied at the log site and + // writes them to their destination. + // + // If called, Write should always log the Entry and Fields; it should not + // replicate the logic of Check. + Write(Entry, []Field) error + // Sync flushes buffered logs (if any). + Sync() error +} + +type nopCore struct{} + +// NewNopCore returns a no-op Core. +func NewNopCore() Core { return nopCore{} } +func (nopCore) Enabled(Level) bool { return false } +func (n nopCore) With([]Field) Core { return n } +func (nopCore) Check(_ Entry, ce *CheckedEntry) *CheckedEntry { return ce } +func (nopCore) Write(Entry, []Field) error { return nil } +func (nopCore) Sync() error { return nil } + +// NewCore creates a Core that writes logs to a WriteSyncer. +func NewCore(enc Encoder, ws WriteSyncer, enab LevelEnabler) Core { + return &ioCore{ + LevelEnabler: enab, + enc: enc, + out: ws, + } +} + +type ioCore struct { + LevelEnabler + enc Encoder + out WriteSyncer +} + +func (c *ioCore) With(fields []Field) Core { + clone := c.clone() + addFields(clone.enc, fields) + return clone +} + +func (c *ioCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if c.Enabled(ent.Level) { + return ce.AddCore(ent, c) + } + return ce +} + +func (c *ioCore) Write(ent Entry, fields []Field) error { + buf, err := c.enc.EncodeEntry(ent, fields) + if err != nil { + return err + } + _, err = c.out.Write(buf.Bytes()) + buf.Free() + if err != nil { + return err + } + if ent.Level > ErrorLevel { + // Since we may be crashing the program, sync the output. Ignore Sync + // errors, pending a clean solution to issue #370. + c.Sync() + } + return nil +} + +func (c *ioCore) Sync() error { + return c.out.Sync() +} + +func (c *ioCore) clone() *ioCore { + return &ioCore{ + LevelEnabler: c.LevelEnabler, + enc: c.enc.Clone(), + out: c.out, + } +} diff --git a/vendor/go.uber.org/zap/zapcore/doc.go b/vendor/go.uber.org/zap/zapcore/doc.go new file mode 100644 index 000000000..31000e91f --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/doc.go @@ -0,0 +1,24 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package zapcore defines and implements the low-level interfaces upon which +// zap is built. By providing alternate implementations of these interfaces, +// external packages can extend zap's capabilities. +package zapcore // import "go.uber.org/zap/zapcore" diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go new file mode 100644 index 000000000..f0509522b --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/encoder.go @@ -0,0 +1,348 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "time" + + "go.uber.org/zap/buffer" +) + +// DefaultLineEnding defines the default line ending when writing logs. +// Alternate line endings specified in EncoderConfig can override this +// behavior. +const DefaultLineEnding = "\n" + +// A LevelEncoder serializes a Level to a primitive type. +type LevelEncoder func(Level, PrimitiveArrayEncoder) + +// LowercaseLevelEncoder serializes a Level to a lowercase string. For example, +// InfoLevel is serialized to "info". +func LowercaseLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + enc.AppendString(l.String()) +} + +// LowercaseColorLevelEncoder serializes a Level to a lowercase string and adds coloring. +// For example, InfoLevel is serialized to "info" and colored blue. +func LowercaseColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + s, ok := _levelToLowercaseColorString[l] + if !ok { + s = _unknownLevelColor.Add(l.String()) + } + enc.AppendString(s) +} + +// CapitalLevelEncoder serializes a Level to an all-caps string. For example, +// InfoLevel is serialized to "INFO". +func CapitalLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + enc.AppendString(l.CapitalString()) +} + +// CapitalColorLevelEncoder serializes a Level to an all-caps string and adds color. +// For example, InfoLevel is serialized to "INFO" and colored blue. +func CapitalColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + s, ok := _levelToCapitalColorString[l] + if !ok { + s = _unknownLevelColor.Add(l.CapitalString()) + } + enc.AppendString(s) +} + +// UnmarshalText unmarshals text to a LevelEncoder. "capital" is unmarshaled to +// CapitalLevelEncoder, "coloredCapital" is unmarshaled to CapitalColorLevelEncoder, +// "colored" is unmarshaled to LowercaseColorLevelEncoder, and anything else +// is unmarshaled to LowercaseLevelEncoder. +func (e *LevelEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "capital": + *e = CapitalLevelEncoder + case "capitalColor": + *e = CapitalColorLevelEncoder + case "color": + *e = LowercaseColorLevelEncoder + default: + *e = LowercaseLevelEncoder + } + return nil +} + +// A TimeEncoder serializes a time.Time to a primitive type. +type TimeEncoder func(time.Time, PrimitiveArrayEncoder) + +// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds +// since the Unix epoch. +func EpochTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + nanos := t.UnixNano() + sec := float64(nanos) / float64(time.Second) + enc.AppendFloat64(sec) +} + +// EpochMillisTimeEncoder serializes a time.Time to a floating-point number of +// milliseconds since the Unix epoch. +func EpochMillisTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + nanos := t.UnixNano() + millis := float64(nanos) / float64(time.Millisecond) + enc.AppendFloat64(millis) +} + +// EpochNanosTimeEncoder serializes a time.Time to an integer number of +// nanoseconds since the Unix epoch. +func EpochNanosTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + enc.AppendInt64(t.UnixNano()) +} + +// ISO8601TimeEncoder serializes a time.Time to an ISO8601-formatted string +// with millisecond precision. +func ISO8601TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + enc.AppendString(t.Format("2006-01-02T15:04:05.000Z0700")) +} + +// UnmarshalText unmarshals text to a TimeEncoder. "iso8601" and "ISO8601" are +// unmarshaled to ISO8601TimeEncoder, "millis" is unmarshaled to +// EpochMillisTimeEncoder, and anything else is unmarshaled to EpochTimeEncoder. +func (e *TimeEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "iso8601", "ISO8601": + *e = ISO8601TimeEncoder + case "millis": + *e = EpochMillisTimeEncoder + case "nanos": + *e = EpochNanosTimeEncoder + default: + *e = EpochTimeEncoder + } + return nil +} + +// A DurationEncoder serializes a time.Duration to a primitive type. +type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) + +// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. +func SecondsDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendFloat64(float64(d) / float64(time.Second)) +} + +// NanosDurationEncoder serializes a time.Duration to an integer number of +// nanoseconds elapsed. +func NanosDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendInt64(int64(d)) +} + +// StringDurationEncoder serializes a time.Duration using its built-in String +// method. +func StringDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendString(d.String()) +} + +// UnmarshalText unmarshals text to a DurationEncoder. "string" is unmarshaled +// to StringDurationEncoder, and anything else is unmarshaled to +// NanosDurationEncoder. +func (e *DurationEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "string": + *e = StringDurationEncoder + case "nanos": + *e = NanosDurationEncoder + default: + *e = SecondsDurationEncoder + } + return nil +} + +// A CallerEncoder serializes an EntryCaller to a primitive type. +type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder) + +// FullCallerEncoder serializes a caller in /full/path/to/package/file:line +// format. +func FullCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { + // TODO: consider using a byte-oriented API to save an allocation. + enc.AppendString(caller.String()) +} + +// ShortCallerEncoder serializes a caller in package/file:line format, trimming +// all but the final directory from the full path. +func ShortCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { + // TODO: consider using a byte-oriented API to save an allocation. + enc.AppendString(caller.TrimmedPath()) +} + +// UnmarshalText unmarshals text to a CallerEncoder. "full" is unmarshaled to +// FullCallerEncoder and anything else is unmarshaled to ShortCallerEncoder. +func (e *CallerEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "full": + *e = FullCallerEncoder + default: + *e = ShortCallerEncoder + } + return nil +} + +// A NameEncoder serializes a period-separated logger name to a primitive +// type. +type NameEncoder func(string, PrimitiveArrayEncoder) + +// FullNameEncoder serializes the logger name as-is. +func FullNameEncoder(loggerName string, enc PrimitiveArrayEncoder) { + enc.AppendString(loggerName) +} + +// UnmarshalText unmarshals text to a NameEncoder. Currently, everything is +// unmarshaled to FullNameEncoder. +func (e *NameEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "full": + *e = FullNameEncoder + default: + *e = FullNameEncoder + } + return nil +} + +// An EncoderConfig allows users to configure the concrete encoders supplied by +// zapcore. +type EncoderConfig struct { + // Set the keys used for each log entry. If any key is empty, that portion + // of the entry is omitted. + MessageKey string `json:"messageKey" yaml:"messageKey"` + LevelKey string `json:"levelKey" yaml:"levelKey"` + TimeKey string `json:"timeKey" yaml:"timeKey"` + NameKey string `json:"nameKey" yaml:"nameKey"` + CallerKey string `json:"callerKey" yaml:"callerKey"` + StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"` + LineEnding string `json:"lineEnding" yaml:"lineEnding"` + // Configure the primitive representations of common complex types. For + // example, some users may want all time.Times serialized as floating-point + // seconds since epoch, while others may prefer ISO8601 strings. + EncodeLevel LevelEncoder `json:"levelEncoder" yaml:"levelEncoder"` + EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"` + EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"` + EncodeCaller CallerEncoder `json:"callerEncoder" yaml:"callerEncoder"` + // Unlike the other primitive type encoders, EncodeName is optional. The + // zero value falls back to FullNameEncoder. + EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"` +} + +// ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a +// map- or struct-like object to the logging context. Like maps, ObjectEncoders +// aren't safe for concurrent use (though typical use shouldn't require locks). +type ObjectEncoder interface { + // Logging-specific marshalers. + AddArray(key string, marshaler ArrayMarshaler) error + AddObject(key string, marshaler ObjectMarshaler) error + + // Built-in types. + AddBinary(key string, value []byte) // for arbitrary bytes + AddByteString(key string, value []byte) // for UTF-8 encoded bytes + AddBool(key string, value bool) + AddComplex128(key string, value complex128) + AddComplex64(key string, value complex64) + AddDuration(key string, value time.Duration) + AddFloat64(key string, value float64) + AddFloat32(key string, value float32) + AddInt(key string, value int) + AddInt64(key string, value int64) + AddInt32(key string, value int32) + AddInt16(key string, value int16) + AddInt8(key string, value int8) + AddString(key, value string) + AddTime(key string, value time.Time) + AddUint(key string, value uint) + AddUint64(key string, value uint64) + AddUint32(key string, value uint32) + AddUint16(key string, value uint16) + AddUint8(key string, value uint8) + AddUintptr(key string, value uintptr) + + // AddReflected uses reflection to serialize arbitrary objects, so it's slow + // and allocation-heavy. + AddReflected(key string, value interface{}) error + // OpenNamespace opens an isolated namespace where all subsequent fields will + // be added. Applications can use namespaces to prevent key collisions when + // injecting loggers into sub-components or third-party libraries. + OpenNamespace(key string) +} + +// ArrayEncoder is a strongly-typed, encoding-agnostic interface for adding +// array-like objects to the logging context. Of note, it supports mixed-type +// arrays even though they aren't typical in Go. Like slices, ArrayEncoders +// aren't safe for concurrent use (though typical use shouldn't require locks). +type ArrayEncoder interface { + // Built-in types. + PrimitiveArrayEncoder + + // Time-related types. + AppendDuration(time.Duration) + AppendTime(time.Time) + + // Logging-specific marshalers. + AppendArray(ArrayMarshaler) error + AppendObject(ObjectMarshaler) error + + // AppendReflected uses reflection to serialize arbitrary objects, so it's + // slow and allocation-heavy. + AppendReflected(value interface{}) error +} + +// PrimitiveArrayEncoder is the subset of the ArrayEncoder interface that deals +// only in Go's built-in types. It's included only so that Duration- and +// TimeEncoders cannot trigger infinite recursion. +type PrimitiveArrayEncoder interface { + // Built-in types. + AppendBool(bool) + AppendByteString([]byte) // for UTF-8 encoded bytes + AppendComplex128(complex128) + AppendComplex64(complex64) + AppendFloat64(float64) + AppendFloat32(float32) + AppendInt(int) + AppendInt64(int64) + AppendInt32(int32) + AppendInt16(int16) + AppendInt8(int8) + AppendString(string) + AppendUint(uint) + AppendUint64(uint64) + AppendUint32(uint32) + AppendUint16(uint16) + AppendUint8(uint8) + AppendUintptr(uintptr) +} + +// Encoder is a format-agnostic interface for all log entry marshalers. Since +// log encoders don't need to support the same wide range of use cases as +// general-purpose marshalers, it's possible to make them faster and +// lower-allocation. +// +// Implementations of the ObjectEncoder interface's methods can, of course, +// freely modify the receiver. However, the Clone and EncodeEntry methods will +// be called concurrently and shouldn't modify the receiver. +type Encoder interface { + ObjectEncoder + + // Clone copies the encoder, ensuring that adding fields to the copy doesn't + // affect the original. + Clone() Encoder + + // EncodeEntry encodes an entry and fields, along with any accumulated + // context, into a byte buffer and returns it. + EncodeEntry(Entry, []Field) (*buffer.Buffer, error) +} diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go new file mode 100644 index 000000000..7d9893f33 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -0,0 +1,257 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "strings" + "sync" + "time" + + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/exit" + + "go.uber.org/multierr" +) + +var ( + _cePool = sync.Pool{New: func() interface{} { + // Pre-allocate some space for cores. + return &CheckedEntry{ + cores: make([]Core, 4), + } + }} +) + +func getCheckedEntry() *CheckedEntry { + ce := _cePool.Get().(*CheckedEntry) + ce.reset() + return ce +} + +func putCheckedEntry(ce *CheckedEntry) { + if ce == nil { + return + } + _cePool.Put(ce) +} + +// NewEntryCaller makes an EntryCaller from the return signature of +// runtime.Caller. +func NewEntryCaller(pc uintptr, file string, line int, ok bool) EntryCaller { + if !ok { + return EntryCaller{} + } + return EntryCaller{ + PC: pc, + File: file, + Line: line, + Defined: true, + } +} + +// EntryCaller represents the caller of a logging function. +type EntryCaller struct { + Defined bool + PC uintptr + File string + Line int +} + +// String returns the full path and line number of the caller. +func (ec EntryCaller) String() string { + return ec.FullPath() +} + +// FullPath returns a /full/path/to/package/file:line description of the +// caller. +func (ec EntryCaller) FullPath() string { + if !ec.Defined { + return "undefined" + } + buf := bufferpool.Get() + buf.AppendString(ec.File) + buf.AppendByte(':') + buf.AppendInt(int64(ec.Line)) + caller := buf.String() + buf.Free() + return caller +} + +// TrimmedPath returns a package/file:line description of the caller, +// preserving only the leaf directory name and file name. +func (ec EntryCaller) TrimmedPath() string { + if !ec.Defined { + return "undefined" + } + // nb. To make sure we trim the path correctly on Windows too, we + // counter-intuitively need to use '/' and *not* os.PathSeparator here, + // because the path given originates from Go stdlib, specifically + // runtime.Caller() which (as of Mar/17) returns forward slashes even on + // Windows. + // + // See https://github.com/golang/go/issues/3335 + // and https://github.com/golang/go/issues/18151 + // + // for discussion on the issue on Go side. + // + // Find the last separator. + // + idx := strings.LastIndexByte(ec.File, '/') + if idx == -1 { + return ec.FullPath() + } + // Find the penultimate separator. + idx = strings.LastIndexByte(ec.File[:idx], '/') + if idx == -1 { + return ec.FullPath() + } + buf := bufferpool.Get() + // Keep everything after the penultimate separator. + buf.AppendString(ec.File[idx+1:]) + buf.AppendByte(':') + buf.AppendInt(int64(ec.Line)) + caller := buf.String() + buf.Free() + return caller +} + +// An Entry represents a complete log message. The entry's structured context +// is already serialized, but the log level, time, message, and call site +// information are available for inspection and modification. +// +// Entries are pooled, so any functions that accept them MUST be careful not to +// retain references to them. +type Entry struct { + Level Level + Time time.Time + LoggerName string + Message string + Caller EntryCaller + Stack string +} + +// CheckWriteAction indicates what action to take after a log entry is +// processed. Actions are ordered in increasing severity. +type CheckWriteAction uint8 + +const ( + // WriteThenNoop indicates that nothing special needs to be done. It's the + // default behavior. + WriteThenNoop CheckWriteAction = iota + // WriteThenPanic causes a panic after Write. + WriteThenPanic + // WriteThenFatal causes a fatal os.Exit after Write. + WriteThenFatal +) + +// CheckedEntry is an Entry together with a collection of Cores that have +// already agreed to log it. +// +// CheckedEntry references should be created by calling AddCore or Should on a +// nil *CheckedEntry. References are returned to a pool after Write, and MUST +// NOT be retained after calling their Write method. +type CheckedEntry struct { + Entry + ErrorOutput WriteSyncer + dirty bool // best-effort detection of pool misuse + should CheckWriteAction + cores []Core +} + +func (ce *CheckedEntry) reset() { + ce.Entry = Entry{} + ce.ErrorOutput = nil + ce.dirty = false + ce.should = WriteThenNoop + for i := range ce.cores { + // don't keep references to cores + ce.cores[i] = nil + } + ce.cores = ce.cores[:0] +} + +// Write writes the entry to the stored Cores, returns any errors, and returns +// the CheckedEntry reference to a pool for immediate re-use. Finally, it +// executes any required CheckWriteAction. +func (ce *CheckedEntry) Write(fields ...Field) { + if ce == nil { + return + } + + if ce.dirty { + if ce.ErrorOutput != nil { + // Make a best effort to detect unsafe re-use of this CheckedEntry. + // If the entry is dirty, log an internal error; because the + // CheckedEntry is being used after it was returned to the pool, + // the message may be an amalgamation from multiple call sites. + fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", time.Now(), ce.Entry) + ce.ErrorOutput.Sync() + } + return + } + ce.dirty = true + + var err error + for i := range ce.cores { + err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields)) + } + if ce.ErrorOutput != nil { + if err != nil { + fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", time.Now(), err) + ce.ErrorOutput.Sync() + } + } + + should, msg := ce.should, ce.Message + putCheckedEntry(ce) + + switch should { + case WriteThenPanic: + panic(msg) + case WriteThenFatal: + exit.Exit() + } +} + +// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be +// used by Core.Check implementations, and is safe to call on nil CheckedEntry +// references. +func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry { + if ce == nil { + ce = getCheckedEntry() + ce.Entry = ent + } + ce.cores = append(ce.cores, core) + return ce +} + +// Should sets this CheckedEntry's CheckWriteAction, which controls whether a +// Core will panic or fatal after writing this log entry. Like AddCore, it's +// safe to call on nil CheckedEntry references. +func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry { + if ce == nil { + ce = getCheckedEntry() + ce.Entry = ent + } + ce.should = should + return ce +} diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go new file mode 100644 index 000000000..a67c7bacc --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/error.go @@ -0,0 +1,120 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "sync" +) + +// Encodes the given error into fields of an object. A field with the given +// name is added for the error message. +// +// If the error implements fmt.Formatter, a field with the name ${key}Verbose +// is also added with the full verbose error message. +// +// Finally, if the error implements errorGroup (from go.uber.org/multierr) or +// causer (from github.com/pkg/errors), a ${key}Causes field is added with an +// array of objects containing the errors this error was comprised of. +// +// { +// "error": err.Error(), +// "errorVerbose": fmt.Sprintf("%+v", err), +// "errorCauses": [ +// ... +// ], +// } +func encodeError(key string, err error, enc ObjectEncoder) error { + basic := err.Error() + enc.AddString(key, basic) + + switch e := err.(type) { + case errorGroup: + return enc.AddArray(key+"Causes", errArray(e.Errors())) + case fmt.Formatter: + verbose := fmt.Sprintf("%+v", e) + if verbose != basic { + // This is a rich error type, like those produced by + // github.com/pkg/errors. + enc.AddString(key+"Verbose", verbose) + } + } + return nil +} + +type errorGroup interface { + // Provides read-only access to the underlying list of errors, preferably + // without causing any allocs. + Errors() []error +} + +type causer interface { + // Provides access to the error that caused this error. + Cause() error +} + +// Note that errArry and errArrayElem are very similar to the version +// implemented in the top-level error.go file. We can't re-use this because +// that would require exporting errArray as part of the zapcore API. + +// Encodes a list of errors using the standard error encoding logic. +type errArray []error + +func (errs errArray) MarshalLogArray(arr ArrayEncoder) error { + for i := range errs { + if errs[i] == nil { + continue + } + + el := newErrArrayElem(errs[i]) + arr.AppendObject(el) + el.Free() + } + return nil +} + +var _errArrayElemPool = sync.Pool{New: func() interface{} { + return &errArrayElem{} +}} + +// Encodes any error into a {"error": ...} re-using the same errors logic. +// +// May be passed in place of an array to build a single-element array. +type errArrayElem struct{ err error } + +func newErrArrayElem(err error) *errArrayElem { + e := _errArrayElemPool.Get().(*errArrayElem) + e.err = err + return e +} + +func (e *errArrayElem) MarshalLogArray(arr ArrayEncoder) error { + return arr.AppendObject(e) +} + +func (e *errArrayElem) MarshalLogObject(enc ObjectEncoder) error { + return encodeError("error", e.err, enc) +} + +func (e *errArrayElem) Free() { + e.err = nil + _errArrayElemPool.Put(e) +} diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go new file mode 100644 index 000000000..ae772e4a1 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/field.go @@ -0,0 +1,212 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bytes" + "fmt" + "math" + "reflect" + "time" +) + +// A FieldType indicates which member of the Field union struct should be used +// and how it should be serialized. +type FieldType uint8 + +const ( + // UnknownType is the default field type. Attempting to add it to an encoder will panic. + UnknownType FieldType = iota + // ArrayMarshalerType indicates that the field carries an ArrayMarshaler. + ArrayMarshalerType + // ObjectMarshalerType indicates that the field carries an ObjectMarshaler. + ObjectMarshalerType + // BinaryType indicates that the field carries an opaque binary blob. + BinaryType + // BoolType indicates that the field carries a bool. + BoolType + // ByteStringType indicates that the field carries UTF-8 encoded bytes. + ByteStringType + // Complex128Type indicates that the field carries a complex128. + Complex128Type + // Complex64Type indicates that the field carries a complex128. + Complex64Type + // DurationType indicates that the field carries a time.Duration. + DurationType + // Float64Type indicates that the field carries a float64. + Float64Type + // Float32Type indicates that the field carries a float32. + Float32Type + // Int64Type indicates that the field carries an int64. + Int64Type + // Int32Type indicates that the field carries an int32. + Int32Type + // Int16Type indicates that the field carries an int16. + Int16Type + // Int8Type indicates that the field carries an int8. + Int8Type + // StringType indicates that the field carries a string. + StringType + // TimeType indicates that the field carries a time.Time. + TimeType + // Uint64Type indicates that the field carries a uint64. + Uint64Type + // Uint32Type indicates that the field carries a uint32. + Uint32Type + // Uint16Type indicates that the field carries a uint16. + Uint16Type + // Uint8Type indicates that the field carries a uint8. + Uint8Type + // UintptrType indicates that the field carries a uintptr. + UintptrType + // ReflectType indicates that the field carries an interface{}, which should + // be serialized using reflection. + ReflectType + // NamespaceType signals the beginning of an isolated namespace. All + // subsequent fields should be added to the new namespace. + NamespaceType + // StringerType indicates that the field carries a fmt.Stringer. + StringerType + // ErrorType indicates that the field carries an error. + ErrorType + // SkipType indicates that the field is a no-op. + SkipType +) + +// A Field is a marshaling operation used to add a key-value pair to a logger's +// context. Most fields are lazily marshaled, so it's inexpensive to add fields +// to disabled debug-level log statements. +type Field struct { + Key string + Type FieldType + Integer int64 + String string + Interface interface{} +} + +// AddTo exports a field through the ObjectEncoder interface. It's primarily +// useful to library authors, and shouldn't be necessary in most applications. +func (f Field) AddTo(enc ObjectEncoder) { + var err error + + switch f.Type { + case ArrayMarshalerType: + err = enc.AddArray(f.Key, f.Interface.(ArrayMarshaler)) + case ObjectMarshalerType: + err = enc.AddObject(f.Key, f.Interface.(ObjectMarshaler)) + case BinaryType: + enc.AddBinary(f.Key, f.Interface.([]byte)) + case BoolType: + enc.AddBool(f.Key, f.Integer == 1) + case ByteStringType: + enc.AddByteString(f.Key, f.Interface.([]byte)) + case Complex128Type: + enc.AddComplex128(f.Key, f.Interface.(complex128)) + case Complex64Type: + enc.AddComplex64(f.Key, f.Interface.(complex64)) + case DurationType: + enc.AddDuration(f.Key, time.Duration(f.Integer)) + case Float64Type: + enc.AddFloat64(f.Key, math.Float64frombits(uint64(f.Integer))) + case Float32Type: + enc.AddFloat32(f.Key, math.Float32frombits(uint32(f.Integer))) + case Int64Type: + enc.AddInt64(f.Key, f.Integer) + case Int32Type: + enc.AddInt32(f.Key, int32(f.Integer)) + case Int16Type: + enc.AddInt16(f.Key, int16(f.Integer)) + case Int8Type: + enc.AddInt8(f.Key, int8(f.Integer)) + case StringType: + enc.AddString(f.Key, f.String) + case TimeType: + if f.Interface != nil { + enc.AddTime(f.Key, time.Unix(0, f.Integer).In(f.Interface.(*time.Location))) + } else { + // Fall back to UTC if location is nil. + enc.AddTime(f.Key, time.Unix(0, f.Integer)) + } + case Uint64Type: + enc.AddUint64(f.Key, uint64(f.Integer)) + case Uint32Type: + enc.AddUint32(f.Key, uint32(f.Integer)) + case Uint16Type: + enc.AddUint16(f.Key, uint16(f.Integer)) + case Uint8Type: + enc.AddUint8(f.Key, uint8(f.Integer)) + case UintptrType: + enc.AddUintptr(f.Key, uintptr(f.Integer)) + case ReflectType: + err = enc.AddReflected(f.Key, f.Interface) + case NamespaceType: + enc.OpenNamespace(f.Key) + case StringerType: + err = encodeStringer(f.Key, f.Interface, enc) + case ErrorType: + encodeError(f.Key, f.Interface.(error), enc) + case SkipType: + break + default: + panic(fmt.Sprintf("unknown field type: %v", f)) + } + + if err != nil { + enc.AddString(fmt.Sprintf("%sError", f.Key), err.Error()) + } +} + +// Equals returns whether two fields are equal. For non-primitive types such as +// errors, marshalers, or reflect types, it uses reflect.DeepEqual. +func (f Field) Equals(other Field) bool { + if f.Type != other.Type { + return false + } + if f.Key != other.Key { + return false + } + + switch f.Type { + case BinaryType, ByteStringType: + return bytes.Equal(f.Interface.([]byte), other.Interface.([]byte)) + case ArrayMarshalerType, ObjectMarshalerType, ErrorType, ReflectType: + return reflect.DeepEqual(f.Interface, other.Interface) + default: + return f == other + } +} + +func addFields(enc ObjectEncoder, fields []Field) { + for i := range fields { + fields[i].AddTo(enc) + } +} + +func encodeStringer(key string, stringer interface{}, enc ObjectEncoder) (err error) { + defer func() { + if v := recover(); v != nil { + err = fmt.Errorf("PANIC=%v", v) + } + }() + + enc.AddString(key, stringer.(fmt.Stringer).String()) + return +} diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go new file mode 100644 index 000000000..5db4afb30 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/hook.go @@ -0,0 +1,68 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/multierr" + +type hooked struct { + Core + funcs []func(Entry) error +} + +// RegisterHooks wraps a Core and runs a collection of user-defined callback +// hooks each time a message is logged. Execution of the callbacks is blocking. +// +// This offers users an easy way to register simple callbacks (e.g., metrics +// collection) without implementing the full Core interface. +func RegisterHooks(core Core, hooks ...func(Entry) error) Core { + funcs := append([]func(Entry) error{}, hooks...) + return &hooked{ + Core: core, + funcs: funcs, + } +} + +func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + // Let the wrapped Core decide whether to log this message or not. This + // also gives the downstream a chance to register itself directly with the + // CheckedEntry. + if downstream := h.Core.Check(ent, ce); downstream != nil { + return downstream.AddCore(ent, h) + } + return ce +} + +func (h *hooked) With(fields []Field) Core { + return &hooked{ + Core: h.Core.With(fields), + funcs: h.funcs, + } +} + +func (h *hooked) Write(ent Entry, _ []Field) error { + // Since our downstream had a chance to register itself directly with the + // CheckedMessage, we don't need to call it here. + var err error + for i := range h.funcs { + err = multierr.Append(err, h.funcs[i](ent)) + } + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go new file mode 100644 index 000000000..9aec4eada --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -0,0 +1,505 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "encoding/base64" + "encoding/json" + "math" + "sync" + "time" + "unicode/utf8" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" +) + +// For JSON-escaping; see jsonEncoder.safeAddString below. +const _hex = "0123456789abcdef" + +var _jsonPool = sync.Pool{New: func() interface{} { + return &jsonEncoder{} +}} + +func getJSONEncoder() *jsonEncoder { + return _jsonPool.Get().(*jsonEncoder) +} + +func putJSONEncoder(enc *jsonEncoder) { + if enc.reflectBuf != nil { + enc.reflectBuf.Free() + } + enc.EncoderConfig = nil + enc.buf = nil + enc.spaced = false + enc.openNamespaces = 0 + enc.reflectBuf = nil + enc.reflectEnc = nil + _jsonPool.Put(enc) +} + +type jsonEncoder struct { + *EncoderConfig + buf *buffer.Buffer + spaced bool // include spaces after colons and commas + openNamespaces int + + // for encoding generic values by reflection + reflectBuf *buffer.Buffer + reflectEnc *json.Encoder +} + +// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder +// appropriately escapes all field keys and values. +// +// Note that the encoder doesn't deduplicate keys, so it's possible to produce +// a message like +// {"foo":"bar","foo":"baz"} +// This is permitted by the JSON specification, but not encouraged. Many +// libraries will ignore duplicate key-value pairs (typically keeping the last +// pair) when unmarshaling, but users should attempt to avoid adding duplicate +// keys. +func NewJSONEncoder(cfg EncoderConfig) Encoder { + return newJSONEncoder(cfg, false) +} + +func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder { + return &jsonEncoder{ + EncoderConfig: &cfg, + buf: bufferpool.Get(), + spaced: spaced, + } +} + +func (enc *jsonEncoder) AddArray(key string, arr ArrayMarshaler) error { + enc.addKey(key) + return enc.AppendArray(arr) +} + +func (enc *jsonEncoder) AddObject(key string, obj ObjectMarshaler) error { + enc.addKey(key) + return enc.AppendObject(obj) +} + +func (enc *jsonEncoder) AddBinary(key string, val []byte) { + enc.AddString(key, base64.StdEncoding.EncodeToString(val)) +} + +func (enc *jsonEncoder) AddByteString(key string, val []byte) { + enc.addKey(key) + enc.AppendByteString(val) +} + +func (enc *jsonEncoder) AddBool(key string, val bool) { + enc.addKey(key) + enc.AppendBool(val) +} + +func (enc *jsonEncoder) AddComplex128(key string, val complex128) { + enc.addKey(key) + enc.AppendComplex128(val) +} + +func (enc *jsonEncoder) AddDuration(key string, val time.Duration) { + enc.addKey(key) + enc.AppendDuration(val) +} + +func (enc *jsonEncoder) AddFloat64(key string, val float64) { + enc.addKey(key) + enc.AppendFloat64(val) +} + +func (enc *jsonEncoder) AddInt64(key string, val int64) { + enc.addKey(key) + enc.AppendInt64(val) +} + +func (enc *jsonEncoder) resetReflectBuf() { + if enc.reflectBuf == nil { + enc.reflectBuf = bufferpool.Get() + enc.reflectEnc = json.NewEncoder(enc.reflectBuf) + + // For consistency with our custom JSON encoder. + enc.reflectEnc.SetEscapeHTML(false) + } else { + enc.reflectBuf.Reset() + } +} + +func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error { + enc.resetReflectBuf() + err := enc.reflectEnc.Encode(obj) + if err != nil { + return err + } + enc.reflectBuf.TrimNewline() + enc.addKey(key) + _, err = enc.buf.Write(enc.reflectBuf.Bytes()) + return err +} + +func (enc *jsonEncoder) OpenNamespace(key string) { + enc.addKey(key) + enc.buf.AppendByte('{') + enc.openNamespaces++ +} + +func (enc *jsonEncoder) AddString(key, val string) { + enc.addKey(key) + enc.AppendString(val) +} + +func (enc *jsonEncoder) AddTime(key string, val time.Time) { + enc.addKey(key) + enc.AppendTime(val) +} + +func (enc *jsonEncoder) AddUint64(key string, val uint64) { + enc.addKey(key) + enc.AppendUint64(val) +} + +func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error { + enc.addElementSeparator() + enc.buf.AppendByte('[') + err := arr.MarshalLogArray(enc) + enc.buf.AppendByte(']') + return err +} + +func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error { + enc.addElementSeparator() + enc.buf.AppendByte('{') + err := obj.MarshalLogObject(enc) + enc.buf.AppendByte('}') + return err +} + +func (enc *jsonEncoder) AppendBool(val bool) { + enc.addElementSeparator() + enc.buf.AppendBool(val) +} + +func (enc *jsonEncoder) AppendByteString(val []byte) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddByteString(val) + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendComplex128(val complex128) { + enc.addElementSeparator() + // Cast to a platform-independent, fixed-size type. + r, i := float64(real(val)), float64(imag(val)) + enc.buf.AppendByte('"') + // Because we're always in a quoted string, we can use strconv without + // special-casing NaN and +/-Inf. + enc.buf.AppendFloat(r, 64) + enc.buf.AppendByte('+') + enc.buf.AppendFloat(i, 64) + enc.buf.AppendByte('i') + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendDuration(val time.Duration) { + cur := enc.buf.Len() + enc.EncodeDuration(val, enc) + if cur == enc.buf.Len() { + // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep + // JSON valid. + enc.AppendInt64(int64(val)) + } +} + +func (enc *jsonEncoder) AppendInt64(val int64) { + enc.addElementSeparator() + enc.buf.AppendInt(val) +} + +func (enc *jsonEncoder) AppendReflected(val interface{}) error { + enc.resetReflectBuf() + err := enc.reflectEnc.Encode(val) + if err != nil { + return err + } + enc.reflectBuf.TrimNewline() + enc.addElementSeparator() + _, err = enc.buf.Write(enc.reflectBuf.Bytes()) + return err +} + +func (enc *jsonEncoder) AppendString(val string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddString(val) + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendTime(val time.Time) { + cur := enc.buf.Len() + enc.EncodeTime(val, enc) + if cur == enc.buf.Len() { + // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep + // output JSON valid. + enc.AppendInt64(val.UnixNano()) + } +} + +func (enc *jsonEncoder) AppendUint64(val uint64) { + enc.addElementSeparator() + enc.buf.AppendUint(val) +} + +func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) } +func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) } +func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) } +func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) } +func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) } +func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) } + +func (enc *jsonEncoder) Clone() Encoder { + clone := enc.clone() + clone.buf.Write(enc.buf.Bytes()) + return clone +} + +func (enc *jsonEncoder) clone() *jsonEncoder { + clone := getJSONEncoder() + clone.EncoderConfig = enc.EncoderConfig + clone.spaced = enc.spaced + clone.openNamespaces = enc.openNamespaces + clone.buf = bufferpool.Get() + return clone +} + +func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { + final := enc.clone() + final.buf.AppendByte('{') + + if final.LevelKey != "" { + final.addKey(final.LevelKey) + cur := final.buf.Len() + final.EncodeLevel(ent.Level, final) + if cur == final.buf.Len() { + // User-supplied EncodeLevel was a no-op. Fall back to strings to keep + // output JSON valid. + final.AppendString(ent.Level.String()) + } + } + if final.TimeKey != "" { + final.AddTime(final.TimeKey, ent.Time) + } + if ent.LoggerName != "" && final.NameKey != "" { + final.addKey(final.NameKey) + cur := final.buf.Len() + nameEncoder := final.EncodeName + + // if no name encoder provided, fall back to FullNameEncoder for backwards + // compatibility + if nameEncoder == nil { + nameEncoder = FullNameEncoder + } + + nameEncoder(ent.LoggerName, final) + if cur == final.buf.Len() { + // User-supplied EncodeName was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.LoggerName) + } + } + if ent.Caller.Defined && final.CallerKey != "" { + final.addKey(final.CallerKey) + cur := final.buf.Len() + final.EncodeCaller(ent.Caller, final) + if cur == final.buf.Len() { + // User-supplied EncodeCaller was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.Caller.String()) + } + } + if final.MessageKey != "" { + final.addKey(enc.MessageKey) + final.AppendString(ent.Message) + } + if enc.buf.Len() > 0 { + final.addElementSeparator() + final.buf.Write(enc.buf.Bytes()) + } + addFields(final, fields) + final.closeOpenNamespaces() + if ent.Stack != "" && final.StacktraceKey != "" { + final.AddString(final.StacktraceKey, ent.Stack) + } + final.buf.AppendByte('}') + if final.LineEnding != "" { + final.buf.AppendString(final.LineEnding) + } else { + final.buf.AppendString(DefaultLineEnding) + } + + ret := final.buf + putJSONEncoder(final) + return ret, nil +} + +func (enc *jsonEncoder) truncate() { + enc.buf.Reset() +} + +func (enc *jsonEncoder) closeOpenNamespaces() { + for i := 0; i < enc.openNamespaces; i++ { + enc.buf.AppendByte('}') + } +} + +func (enc *jsonEncoder) addKey(key string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddString(key) + enc.buf.AppendByte('"') + enc.buf.AppendByte(':') + if enc.spaced { + enc.buf.AppendByte(' ') + } +} + +func (enc *jsonEncoder) addElementSeparator() { + last := enc.buf.Len() - 1 + if last < 0 { + return + } + switch enc.buf.Bytes()[last] { + case '{', '[', ':', ',', ' ': + return + default: + enc.buf.AppendByte(',') + if enc.spaced { + enc.buf.AppendByte(' ') + } + } +} + +func (enc *jsonEncoder) appendFloat(val float64, bitSize int) { + enc.addElementSeparator() + switch { + case math.IsNaN(val): + enc.buf.AppendString(`"NaN"`) + case math.IsInf(val, 1): + enc.buf.AppendString(`"+Inf"`) + case math.IsInf(val, -1): + enc.buf.AppendString(`"-Inf"`) + default: + enc.buf.AppendFloat(val, bitSize) + } +} + +// safeAddString JSON-escapes a string and appends it to the internal buffer. +// Unlike the standard library's encoder, it doesn't attempt to protect the +// user from browser vulnerabilities or JSONP-related problems. +func (enc *jsonEncoder) safeAddString(s string) { + for i := 0; i < len(s); { + if enc.tryAddRuneSelf(s[i]) { + i++ + continue + } + r, size := utf8.DecodeRuneInString(s[i:]) + if enc.tryAddRuneError(r, size) { + i++ + continue + } + enc.buf.AppendString(s[i : i+size]) + i += size + } +} + +// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte. +func (enc *jsonEncoder) safeAddByteString(s []byte) { + for i := 0; i < len(s); { + if enc.tryAddRuneSelf(s[i]) { + i++ + continue + } + r, size := utf8.DecodeRune(s[i:]) + if enc.tryAddRuneError(r, size) { + i++ + continue + } + enc.buf.Write(s[i : i+size]) + i += size + } +} + +// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte. +func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool { + if b >= utf8.RuneSelf { + return false + } + if 0x20 <= b && b != '\\' && b != '"' { + enc.buf.AppendByte(b) + return true + } + switch b { + case '\\', '"': + enc.buf.AppendByte('\\') + enc.buf.AppendByte(b) + case '\n': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('n') + case '\r': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('r') + case '\t': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('t') + default: + // Encode bytes < 0x20, except for the escape sequences above. + enc.buf.AppendString(`\u00`) + enc.buf.AppendByte(_hex[b>>4]) + enc.buf.AppendByte(_hex[b&0xF]) + } + return true +} + +func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool { + if r == utf8.RuneError && size == 1 { + enc.buf.AppendString(`\ufffd`) + return true + } + return false +} diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go new file mode 100644 index 000000000..e575c9f43 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/level.go @@ -0,0 +1,175 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bytes" + "errors" + "fmt" +) + +var errUnmarshalNilLevel = errors.New("can't unmarshal a nil *Level") + +// A Level is a logging priority. Higher levels are more important. +type Level int8 + +const ( + // DebugLevel logs are typically voluminous, and are usually disabled in + // production. + DebugLevel Level = iota - 1 + // InfoLevel is the default logging priority. + InfoLevel + // WarnLevel logs are more important than Info, but don't need individual + // human review. + WarnLevel + // ErrorLevel logs are high-priority. If an application is running smoothly, + // it shouldn't generate any error-level logs. + ErrorLevel + // DPanicLevel logs are particularly important errors. In development the + // logger panics after writing the message. + DPanicLevel + // PanicLevel logs a message, then panics. + PanicLevel + // FatalLevel logs a message, then calls os.Exit(1). + FatalLevel + + _minLevel = DebugLevel + _maxLevel = FatalLevel +) + +// String returns a lower-case ASCII representation of the log level. +func (l Level) String() string { + switch l { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warn" + case ErrorLevel: + return "error" + case DPanicLevel: + return "dpanic" + case PanicLevel: + return "panic" + case FatalLevel: + return "fatal" + default: + return fmt.Sprintf("Level(%d)", l) + } +} + +// CapitalString returns an all-caps ASCII representation of the log level. +func (l Level) CapitalString() string { + // Printing levels in all-caps is common enough that we should export this + // functionality. + switch l { + case DebugLevel: + return "DEBUG" + case InfoLevel: + return "INFO" + case WarnLevel: + return "WARN" + case ErrorLevel: + return "ERROR" + case DPanicLevel: + return "DPANIC" + case PanicLevel: + return "PANIC" + case FatalLevel: + return "FATAL" + default: + return fmt.Sprintf("LEVEL(%d)", l) + } +} + +// MarshalText marshals the Level to text. Note that the text representation +// drops the -Level suffix (see example). +func (l Level) MarshalText() ([]byte, error) { + return []byte(l.String()), nil +} + +// UnmarshalText unmarshals text to a level. Like MarshalText, UnmarshalText +// expects the text representation of a Level to drop the -Level suffix (see +// example). +// +// In particular, this makes it easy to configure logging levels using YAML, +// TOML, or JSON files. +func (l *Level) UnmarshalText(text []byte) error { + if l == nil { + return errUnmarshalNilLevel + } + if !l.unmarshalText(text) && !l.unmarshalText(bytes.ToLower(text)) { + return fmt.Errorf("unrecognized level: %q", text) + } + return nil +} + +func (l *Level) unmarshalText(text []byte) bool { + switch string(text) { + case "debug", "DEBUG": + *l = DebugLevel + case "info", "INFO", "": // make the zero value useful + *l = InfoLevel + case "warn", "WARN": + *l = WarnLevel + case "error", "ERROR": + *l = ErrorLevel + case "dpanic", "DPANIC": + *l = DPanicLevel + case "panic", "PANIC": + *l = PanicLevel + case "fatal", "FATAL": + *l = FatalLevel + default: + return false + } + return true +} + +// Set sets the level for the flag.Value interface. +func (l *Level) Set(s string) error { + return l.UnmarshalText([]byte(s)) +} + +// Get gets the level for the flag.Getter interface. +func (l *Level) Get() interface{} { + return *l +} + +// Enabled returns true if the given level is at or above this level. +func (l Level) Enabled(lvl Level) bool { + return lvl >= l +} + +// LevelEnabler decides whether a given logging level is enabled when logging a +// message. +// +// Enablers are intended to be used to implement deterministic filters; +// concerns like sampling are better implemented as a Core. +// +// Each concrete Level value implements a static LevelEnabler which returns +// true for itself and all higher logging levels. For example WarnLevel.Enabled() +// will return true for WarnLevel, ErrorLevel, DPanicLevel, PanicLevel, and +// FatalLevel, but return false for InfoLevel and DebugLevel. +type LevelEnabler interface { + Enabled(Level) bool +} diff --git a/vendor/go.uber.org/zap/zapcore/level_strings.go b/vendor/go.uber.org/zap/zapcore/level_strings.go new file mode 100644 index 000000000..7af8dadcb --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/level_strings.go @@ -0,0 +1,46 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/zap/internal/color" + +var ( + _levelToColor = map[Level]color.Color{ + DebugLevel: color.Magenta, + InfoLevel: color.Blue, + WarnLevel: color.Yellow, + ErrorLevel: color.Red, + DPanicLevel: color.Red, + PanicLevel: color.Red, + FatalLevel: color.Red, + } + _unknownLevelColor = color.Red + + _levelToLowercaseColorString = make(map[Level]string, len(_levelToColor)) + _levelToCapitalColorString = make(map[Level]string, len(_levelToColor)) +) + +func init() { + for level, color := range _levelToColor { + _levelToLowercaseColorString[level] = color.Add(level.String()) + _levelToCapitalColorString[level] = color.Add(level.CapitalString()) + } +} diff --git a/vendor/go.uber.org/zap/zapcore/marshaler.go b/vendor/go.uber.org/zap/zapcore/marshaler.go new file mode 100644 index 000000000..2627a653d --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/marshaler.go @@ -0,0 +1,53 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +// ObjectMarshaler allows user-defined types to efficiently add themselves to the +// logging context, and to selectively omit information which shouldn't be +// included in logs (e.g., passwords). +type ObjectMarshaler interface { + MarshalLogObject(ObjectEncoder) error +} + +// ObjectMarshalerFunc is a type adapter that turns a function into an +// ObjectMarshaler. +type ObjectMarshalerFunc func(ObjectEncoder) error + +// MarshalLogObject calls the underlying function. +func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error { + return f(enc) +} + +// ArrayMarshaler allows user-defined types to efficiently add themselves to the +// logging context, and to selectively omit information which shouldn't be +// included in logs (e.g., passwords). +type ArrayMarshaler interface { + MarshalLogArray(ArrayEncoder) error +} + +// ArrayMarshalerFunc is a type adapter that turns a function into an +// ArrayMarshaler. +type ArrayMarshalerFunc func(ArrayEncoder) error + +// MarshalLogArray calls the underlying function. +func (f ArrayMarshalerFunc) MarshalLogArray(enc ArrayEncoder) error { + return f(enc) +} diff --git a/vendor/go.uber.org/zap/zapcore/memory_encoder.go b/vendor/go.uber.org/zap/zapcore/memory_encoder.go new file mode 100644 index 000000000..dfead0829 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/memory_encoder.go @@ -0,0 +1,179 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "time" + +// MapObjectEncoder is an ObjectEncoder backed by a simple +// map[string]interface{}. It's not fast enough for production use, but it's +// helpful in tests. +type MapObjectEncoder struct { + // Fields contains the entire encoded log context. + Fields map[string]interface{} + // cur is a pointer to the namespace we're currently writing to. + cur map[string]interface{} +} + +// NewMapObjectEncoder creates a new map-backed ObjectEncoder. +func NewMapObjectEncoder() *MapObjectEncoder { + m := make(map[string]interface{}) + return &MapObjectEncoder{ + Fields: m, + cur: m, + } +} + +// AddArray implements ObjectEncoder. +func (m *MapObjectEncoder) AddArray(key string, v ArrayMarshaler) error { + arr := &sliceArrayEncoder{elems: make([]interface{}, 0)} + err := v.MarshalLogArray(arr) + m.cur[key] = arr.elems + return err +} + +// AddObject implements ObjectEncoder. +func (m *MapObjectEncoder) AddObject(k string, v ObjectMarshaler) error { + newMap := NewMapObjectEncoder() + m.cur[k] = newMap.Fields + return v.MarshalLogObject(newMap) +} + +// AddBinary implements ObjectEncoder. +func (m *MapObjectEncoder) AddBinary(k string, v []byte) { m.cur[k] = v } + +// AddByteString implements ObjectEncoder. +func (m *MapObjectEncoder) AddByteString(k string, v []byte) { m.cur[k] = string(v) } + +// AddBool implements ObjectEncoder. +func (m *MapObjectEncoder) AddBool(k string, v bool) { m.cur[k] = v } + +// AddDuration implements ObjectEncoder. +func (m MapObjectEncoder) AddDuration(k string, v time.Duration) { m.cur[k] = v } + +// AddComplex128 implements ObjectEncoder. +func (m *MapObjectEncoder) AddComplex128(k string, v complex128) { m.cur[k] = v } + +// AddComplex64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddComplex64(k string, v complex64) { m.cur[k] = v } + +// AddFloat64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddFloat64(k string, v float64) { m.cur[k] = v } + +// AddFloat32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddFloat32(k string, v float32) { m.cur[k] = v } + +// AddInt implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt(k string, v int) { m.cur[k] = v } + +// AddInt64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt64(k string, v int64) { m.cur[k] = v } + +// AddInt32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt32(k string, v int32) { m.cur[k] = v } + +// AddInt16 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt16(k string, v int16) { m.cur[k] = v } + +// AddInt8 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt8(k string, v int8) { m.cur[k] = v } + +// AddString implements ObjectEncoder. +func (m *MapObjectEncoder) AddString(k string, v string) { m.cur[k] = v } + +// AddTime implements ObjectEncoder. +func (m MapObjectEncoder) AddTime(k string, v time.Time) { m.cur[k] = v } + +// AddUint implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint(k string, v uint) { m.cur[k] = v } + +// AddUint64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint64(k string, v uint64) { m.cur[k] = v } + +// AddUint32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint32(k string, v uint32) { m.cur[k] = v } + +// AddUint16 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint16(k string, v uint16) { m.cur[k] = v } + +// AddUint8 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint8(k string, v uint8) { m.cur[k] = v } + +// AddUintptr implements ObjectEncoder. +func (m *MapObjectEncoder) AddUintptr(k string, v uintptr) { m.cur[k] = v } + +// AddReflected implements ObjectEncoder. +func (m *MapObjectEncoder) AddReflected(k string, v interface{}) error { + m.cur[k] = v + return nil +} + +// OpenNamespace implements ObjectEncoder. +func (m *MapObjectEncoder) OpenNamespace(k string) { + ns := make(map[string]interface{}) + m.cur[k] = ns + m.cur = ns +} + +// sliceArrayEncoder is an ArrayEncoder backed by a simple []interface{}. Like +// the MapObjectEncoder, it's not designed for production use. +type sliceArrayEncoder struct { + elems []interface{} +} + +func (s *sliceArrayEncoder) AppendArray(v ArrayMarshaler) error { + enc := &sliceArrayEncoder{} + err := v.MarshalLogArray(enc) + s.elems = append(s.elems, enc.elems) + return err +} + +func (s *sliceArrayEncoder) AppendObject(v ObjectMarshaler) error { + m := NewMapObjectEncoder() + err := v.MarshalLogObject(m) + s.elems = append(s.elems, m.Fields) + return err +} + +func (s *sliceArrayEncoder) AppendReflected(v interface{}) error { + s.elems = append(s.elems, v) + return nil +} + +func (s *sliceArrayEncoder) AppendBool(v bool) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, string(v)) } +func (s *sliceArrayEncoder) AppendComplex128(v complex128) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendComplex64(v complex64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendDuration(v time.Duration) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendFloat64(v float64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendFloat32(v float32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt(v int) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt64(v int64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt32(v int32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt16(v int16) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt8(v int8) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendString(v string) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendTime(v time.Time) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint(v uint) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint64(v uint64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint32(v uint32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint16(v uint16) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint8(v uint8) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUintptr(v uintptr) { s.elems = append(s.elems, v) } diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go new file mode 100644 index 000000000..e31641863 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/sampler.go @@ -0,0 +1,134 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "time" + + "go.uber.org/atomic" +) + +const ( + _numLevels = _maxLevel - _minLevel + 1 + _countersPerLevel = 4096 +) + +type counter struct { + resetAt atomic.Int64 + counter atomic.Uint64 +} + +type counters [_numLevels][_countersPerLevel]counter + +func newCounters() *counters { + return &counters{} +} + +func (cs *counters) get(lvl Level, key string) *counter { + i := lvl - _minLevel + j := fnv32a(key) % _countersPerLevel + return &cs[i][j] +} + +// fnv32a, adapted from "hash/fnv", but without a []byte(string) alloc +func fnv32a(s string) uint32 { + const ( + offset32 = 2166136261 + prime32 = 16777619 + ) + hash := uint32(offset32) + for i := 0; i < len(s); i++ { + hash ^= uint32(s[i]) + hash *= prime32 + } + return hash +} + +func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 { + tn := t.UnixNano() + resetAfter := c.resetAt.Load() + if resetAfter > tn { + return c.counter.Inc() + } + + c.counter.Store(1) + + newResetAfter := tn + tick.Nanoseconds() + if !c.resetAt.CAS(resetAfter, newResetAfter) { + // We raced with another goroutine trying to reset, and it also reset + // the counter to 1, so we need to reincrement the counter. + return c.counter.Inc() + } + + return 1 +} + +type sampler struct { + Core + + counts *counters + tick time.Duration + first, thereafter uint64 +} + +// NewSampler creates a Core that samples incoming entries, which caps the CPU +// and I/O load of logging while attempting to preserve a representative subset +// of your logs. +// +// Zap samples by logging the first N entries with a given level and message +// each tick. If more Entries with the same level and message are seen during +// the same interval, every Mth message is logged and the rest are dropped. +// +// Keep in mind that zap's sampling implementation is optimized for speed over +// absolute precision; under load, each tick may be slightly over- or +// under-sampled. +func NewSampler(core Core, tick time.Duration, first, thereafter int) Core { + return &sampler{ + Core: core, + tick: tick, + counts: newCounters(), + first: uint64(first), + thereafter: uint64(thereafter), + } +} + +func (s *sampler) With(fields []Field) Core { + return &sampler{ + Core: s.Core.With(fields), + tick: s.tick, + counts: s.counts, + first: s.first, + thereafter: s.thereafter, + } +} + +func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if !s.Enabled(ent.Level) { + return ce + } + + counter := s.counts.get(ent.Level, ent.Message) + n := counter.IncCheckReset(ent.Time, s.tick) + if n > s.first && (n-s.first)%s.thereafter != 0 { + return ce + } + return s.Core.Check(ent, ce) +} diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go new file mode 100644 index 000000000..07a32eef9 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/tee.go @@ -0,0 +1,81 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/multierr" + +type multiCore []Core + +// NewTee creates a Core that duplicates log entries into two or more +// underlying Cores. +// +// Calling it with a single Core returns the input unchanged, and calling +// it with no input returns a no-op Core. +func NewTee(cores ...Core) Core { + switch len(cores) { + case 0: + return NewNopCore() + case 1: + return cores[0] + default: + return multiCore(cores) + } +} + +func (mc multiCore) With(fields []Field) Core { + clone := make(multiCore, len(mc)) + for i := range mc { + clone[i] = mc[i].With(fields) + } + return clone +} + +func (mc multiCore) Enabled(lvl Level) bool { + for i := range mc { + if mc[i].Enabled(lvl) { + return true + } + } + return false +} + +func (mc multiCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + for i := range mc { + ce = mc[i].Check(ent, ce) + } + return ce +} + +func (mc multiCore) Write(ent Entry, fields []Field) error { + var err error + for i := range mc { + err = multierr.Append(err, mc[i].Write(ent, fields)) + } + return err +} + +func (mc multiCore) Sync() error { + var err error + for i := range mc { + err = multierr.Append(err, mc[i].Sync()) + } + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/write_syncer.go b/vendor/go.uber.org/zap/zapcore/write_syncer.go new file mode 100644 index 000000000..209e25fe2 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/write_syncer.go @@ -0,0 +1,123 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "io" + "sync" + + "go.uber.org/multierr" +) + +// A WriteSyncer is an io.Writer that can also flush any buffered data. Note +// that *os.File (and thus, os.Stderr and os.Stdout) implement WriteSyncer. +type WriteSyncer interface { + io.Writer + Sync() error +} + +// AddSync converts an io.Writer to a WriteSyncer. It attempts to be +// intelligent: if the concrete type of the io.Writer implements WriteSyncer, +// we'll use the existing Sync method. If it doesn't, we'll add a no-op Sync. +func AddSync(w io.Writer) WriteSyncer { + switch w := w.(type) { + case WriteSyncer: + return w + default: + return writerWrapper{w} + } +} + +type lockedWriteSyncer struct { + sync.Mutex + ws WriteSyncer +} + +// Lock wraps a WriteSyncer in a mutex to make it safe for concurrent use. In +// particular, *os.Files must be locked before use. +func Lock(ws WriteSyncer) WriteSyncer { + if _, ok := ws.(*lockedWriteSyncer); ok { + // no need to layer on another lock + return ws + } + return &lockedWriteSyncer{ws: ws} +} + +func (s *lockedWriteSyncer) Write(bs []byte) (int, error) { + s.Lock() + n, err := s.ws.Write(bs) + s.Unlock() + return n, err +} + +func (s *lockedWriteSyncer) Sync() error { + s.Lock() + err := s.ws.Sync() + s.Unlock() + return err +} + +type writerWrapper struct { + io.Writer +} + +func (w writerWrapper) Sync() error { + return nil +} + +type multiWriteSyncer []WriteSyncer + +// NewMultiWriteSyncer creates a WriteSyncer that duplicates its writes +// and sync calls, much like io.MultiWriter. +func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer { + if len(ws) == 1 { + return ws[0] + } + // Copy to protect against https://github.com/golang/go/issues/7809 + return multiWriteSyncer(append([]WriteSyncer(nil), ws...)) +} + +// See https://golang.org/src/io/multi.go +// When not all underlying syncers write the same number of bytes, +// the smallest number is returned even though Write() is called on +// all of them. +func (ws multiWriteSyncer) Write(p []byte) (int, error) { + var writeErr error + nWritten := 0 + for _, w := range ws { + n, err := w.Write(p) + writeErr = multierr.Append(writeErr, err) + if nWritten == 0 && n != 0 { + nWritten = n + } else if n < nWritten { + nWritten = n + } + } + return nWritten, writeErr +} + +func (ws multiWriteSyncer) Sync() error { + var err error + for _, w := range ws { + err = multierr.Append(err, w.Sync()) + } + return err +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1.go new file mode 100644 index 000000000..d3596ee66 --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1.go @@ -0,0 +1,752 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + encoding_asn1 "encoding/asn1" + "fmt" + "math/big" + "reflect" + "time" + + "golang.org/x/crypto/cryptobyte/asn1" +) + +// This file contains ASN.1-related methods for String and Builder. + +// Builder + +// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Int64(v int64) { + b.addASN1Signed(asn1.INTEGER, v) +} + +// AddASN1Int64WithTag appends a DER-encoded ASN.1 INTEGER with the +// given tag. +func (b *Builder) AddASN1Int64WithTag(v int64, tag asn1.Tag) { + b.addASN1Signed(tag, v) +} + +// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION. +func (b *Builder) AddASN1Enum(v int64) { + b.addASN1Signed(asn1.ENUM, v) +} + +func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) { + b.AddASN1(tag, func(c *Builder) { + length := 1 + for i := v; i >= 0x80 || i < -0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Uint64(v uint64) { + b.AddASN1(asn1.INTEGER, func(c *Builder) { + length := 1 + for i := v; i >= 0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1BigInt(n *big.Int) { + if b.err != nil { + return + } + + b.AddASN1(asn1.INTEGER, func(c *Builder) { + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement form. So we + // invert and subtract 1. If the most-significant-bit isn't set then + // we'll need to pad the beginning with 0xff in order to keep the number + // negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + c.add(0xff) + } + c.add(bytes...) + } else if n.Sign() == 0 { + c.add(0) + } else { + bytes := n.Bytes() + if bytes[0]&0x80 != 0 { + c.add(0) + } + c.add(bytes...) + } + }) +} + +// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING. +func (b *Builder) AddASN1OctetString(bytes []byte) { + b.AddASN1(asn1.OCTET_STRING, func(c *Builder) { + c.AddBytes(bytes) + }) +} + +const generalizedTimeFormatStr = "20060102150405Z0700" + +// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME. +func (b *Builder) AddASN1GeneralizedTime(t time.Time) { + if t.Year() < 0 || t.Year() > 9999 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t) + return + } + b.AddASN1(asn1.GeneralizedTime, func(c *Builder) { + c.AddBytes([]byte(t.Format(generalizedTimeFormatStr))) + }) +} + +// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not +// support BIT STRINGs that are not a whole number of bytes. +func (b *Builder) AddASN1BitString(data []byte) { + b.AddASN1(asn1.BIT_STRING, func(b *Builder) { + b.AddUint8(0) + b.AddBytes(data) + }) +} + +func (b *Builder) addBase128Int(n int64) { + var length int + if n == 0 { + length = 1 + } else { + for i := n; i > 0; i >>= 7 { + length++ + } + } + + for i := length - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + + b.add(o) + } +} + +func isValidOID(oid encoding_asn1.ObjectIdentifier) bool { + if len(oid) < 2 { + return false + } + + if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) { + return false + } + + for _, v := range oid { + if v < 0 { + return false + } + } + + return true +} + +func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) { + b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) { + if !isValidOID(oid) { + b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid) + return + } + + b.addBase128Int(int64(oid[0])*40 + int64(oid[1])) + for _, v := range oid[2:] { + b.addBase128Int(int64(v)) + } + }) +} + +func (b *Builder) AddASN1Boolean(v bool) { + b.AddASN1(asn1.BOOLEAN, func(b *Builder) { + if v { + b.AddUint8(0xff) + } else { + b.AddUint8(0) + } + }) +} + +func (b *Builder) AddASN1NULL() { + b.add(uint8(asn1.NULL), 0) +} + +// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if +// successful or records an error if one occurred. +func (b *Builder) MarshalASN1(v interface{}) { + // NOTE(martinkr): This is somewhat of a hack to allow propagation of + // encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a + // value embedded into a struct, its tag information is lost. + if b.err != nil { + return + } + bytes, err := encoding_asn1.Marshal(v) + if err != nil { + b.err = err + return + } + b.AddBytes(bytes) +} + +// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag. +// Tags greater than 30 are not supported and result in an error (i.e. +// low-tag-number form only). The child builder passed to the +// BuilderContinuation can be used to build the content of the ASN.1 object. +func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) { + if b.err != nil { + return + } + // Identifiers with the low five bits set indicate high-tag-number format + // (two or more octets), which we don't support. + if tag&0x1f == 0x1f { + b.err = fmt.Errorf("cryptobyte: high-tag number identifier octects not supported: 0x%x", tag) + return + } + b.AddUint8(uint8(tag)) + b.addLengthPrefixed(1, true, f) +} + +// String + +// ReadASN1Boolean decodes an ASN.1 BOOLEAN and converts it to a boolean +// representation into out and advances. It reports whether the read +// was successful. +func (s *String) ReadASN1Boolean(out *bool) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BOOLEAN) || len(bytes) != 1 { + return false + } + + switch bytes[0] { + case 0: + *out = false + case 0xff: + *out = true + default: + return false + } + + return true +} + +var bigIntType = reflect.TypeOf((*big.Int)(nil)).Elem() + +// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does +// not point to an integer or to a big.Int, it panics. It reports whether the +// read was successful. +func (s *String) ReadASN1Integer(out interface{}) bool { + if reflect.TypeOf(out).Kind() != reflect.Ptr { + panic("out is not a pointer") + } + switch reflect.ValueOf(out).Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + var i int64 + if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) { + return false + } + reflect.ValueOf(out).Elem().SetInt(i) + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + var u uint64 + if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) { + return false + } + reflect.ValueOf(out).Elem().SetUint(u) + return true + case reflect.Struct: + if reflect.TypeOf(out).Elem() == bigIntType { + return s.readASN1BigInt(out.(*big.Int)) + } + } + panic("out does not point to an integer type") +} + +func checkASN1Integer(bytes []byte) bool { + if len(bytes) == 0 { + // An INTEGER is encoded with at least one octet. + return false + } + if len(bytes) == 1 { + return true + } + if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 { + // Value is not minimally encoded. + return false + } + return true +} + +var bigOne = big.NewInt(1) + +func (s *String) readASN1BigInt(out *big.Int) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + // Negative number. + neg := make([]byte, len(bytes)) + for i, b := range bytes { + neg[i] = ^b + } + out.SetBytes(neg) + out.Add(out, bigOne) + out.Neg(out) + } else { + out.SetBytes(bytes) + } + return true +} + +func (s *String) readASN1Int64(out *int64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) { + return false + } + return true +} + +func asn1Signed(out *int64, n []byte) bool { + length := len(n) + if length > 8 { + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= int64(n[i]) + } + // Shift up and down in order to sign extend the result. + *out <<= 64 - uint8(length)*8 + *out >>= 64 - uint8(length)*8 + return true +} + +func (s *String) readASN1Uint64(out *uint64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) { + return false + } + return true +} + +func asn1Unsigned(out *uint64, n []byte) bool { + length := len(n) + if length > 9 || length == 9 && n[0] != 0 { + // Too large for uint64. + return false + } + if n[0]&0x80 != 0 { + // Negative number. + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= uint64(n[i]) + } + return true +} + +// ReadASN1Int64WithTag decodes an ASN.1 INTEGER with the given tag into out +// and advances. It reports whether the read was successful and resulted in a +// value that can be represented in an int64. +func (s *String) ReadASN1Int64WithTag(out *int64, tag asn1.Tag) bool { + var bytes String + return s.ReadASN1(&bytes, tag) && checkASN1Integer(bytes) && asn1Signed(out, bytes) +} + +// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It reports +// whether the read was successful. +func (s *String) ReadASN1Enum(out *int) bool { + var bytes String + var i int64 + if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) { + return false + } + if int64(int(i)) != i { + return false + } + *out = int(i) + return true +} + +func (s *String) readBase128Int(out *int) bool { + ret := 0 + for i := 0; len(*s) > 0; i++ { + if i == 4 { + return false + } + ret <<= 7 + b := s.read(1)[0] + ret |= int(b & 0x7f) + if b&0x80 == 0 { + *out = ret + return true + } + } + return false // truncated +} + +// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 { + return false + } + + // In the worst case, we get two elements from the first byte (which is + // encoded differently) and then every varint is a single byte long. + components := make([]int, len(bytes)+1) + + // The first varint is 40*value1 + value2: + // According to this packing, value1 can take the values 0, 1 and 2 only. + // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, + // then there are no restrictions on value2. + var v int + if !bytes.readBase128Int(&v) { + return false + } + if v < 80 { + components[0] = v / 40 + components[1] = v % 40 + } else { + components[0] = 2 + components[1] = v - 80 + } + + i := 2 + for ; len(bytes) > 0; i++ { + if !bytes.readBase128Int(&v) { + return false + } + components[i] = v + } + *out = components[:i] + return true +} + +// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.GeneralizedTime) { + return false + } + t := string(bytes) + res, err := time.Parse(generalizedTimeFormatStr, t) + if err != nil { + return false + } + if serialized := res.Format(generalizedTimeFormatStr); serialized != t { + return false + } + *out = res + return true +} + +// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 || + len(bytes)*8/8 != len(bytes) { + return false + } + + paddingBits := uint8(bytes[0]) + bytes = bytes[1:] + if paddingBits > 7 || + len(bytes) == 0 && paddingBits != 0 || + len(bytes) > 0 && bytes[len(bytes)-1]&(1< 4 || len(*s) < int(2+lenLen) { + return false + } + + lenBytes := String((*s)[2 : 2+lenLen]) + if !lenBytes.readUnsigned(&len32, int(lenLen)) { + return false + } + + // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length + // with the minimum number of octets. + if len32 < 128 { + // Length should have used short-form encoding. + return false + } + if len32>>((lenLen-1)*8) == 0 { + // Leading octet is 0. Length should have been at least one byte shorter. + return false + } + + headerLen = 2 + uint32(lenLen) + if headerLen+len32 < len32 { + // Overflow. + return false + } + length = headerLen + len32 + } + + if int(length) < 0 || !s.ReadBytes((*[]byte)(out), int(length)) { + return false + } + if skipHeader && !out.Skip(int(headerLen)) { + panic("cryptobyte: internal error") + } + + return true +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go new file mode 100644 index 000000000..cda8e3edf --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go @@ -0,0 +1,46 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asn1 contains supporting types for parsing and building ASN.1 +// messages with the cryptobyte package. +package asn1 // import "golang.org/x/crypto/cryptobyte/asn1" + +// Tag represents an ASN.1 identifier octet, consisting of a tag number +// (indicating a type) and class (such as context-specific or constructed). +// +// Methods in the cryptobyte package only support the low-tag-number form, i.e. +// a single identifier octet with bits 7-8 encoding the class and bits 1-6 +// encoding the tag number. +type Tag uint8 + +const ( + classConstructed = 0x20 + classContextSpecific = 0x80 +) + +// Constructed returns t with the constructed class bit set. +func (t Tag) Constructed() Tag { return t | classConstructed } + +// ContextSpecific returns t with the context-specific class bit set. +func (t Tag) ContextSpecific() Tag { return t | classContextSpecific } + +// The following is a list of standard tag and class combinations. +const ( + BOOLEAN = Tag(1) + INTEGER = Tag(2) + BIT_STRING = Tag(3) + OCTET_STRING = Tag(4) + NULL = Tag(5) + OBJECT_IDENTIFIER = Tag(6) + ENUM = Tag(10) + UTF8String = Tag(12) + SEQUENCE = Tag(16 | classConstructed) + SET = Tag(17 | classConstructed) + PrintableString = Tag(19) + T61String = Tag(20) + IA5String = Tag(22) + UTCTime = Tag(23) + GeneralizedTime = Tag(24) + GeneralString = Tag(27) +) diff --git a/vendor/golang.org/x/crypto/cryptobyte/builder.go b/vendor/golang.org/x/crypto/cryptobyte/builder.go new file mode 100644 index 000000000..ca7b1db5c --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/builder.go @@ -0,0 +1,337 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + "errors" + "fmt" +) + +// A Builder builds byte strings from fixed-length and length-prefixed values. +// Builders either allocate space as needed, or are ‘fixed’, which means that +// they write into a given buffer and produce an error if it's exhausted. +// +// The zero value is a usable Builder that allocates space as needed. +// +// Simple values are marshaled and appended to a Builder using methods on the +// Builder. Length-prefixed values are marshaled by providing a +// BuilderContinuation, which is a function that writes the inner contents of +// the value to a given Builder. See the documentation for BuilderContinuation +// for details. +type Builder struct { + err error + result []byte + fixedSize bool + child *Builder + offset int + pendingLenLen int + pendingIsASN1 bool + inContinuation *bool +} + +// NewBuilder creates a Builder that appends its output to the given buffer. +// Like append(), the slice will be reallocated if its capacity is exceeded. +// Use Bytes to get the final buffer. +func NewBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + } +} + +// NewFixedBuilder creates a Builder that appends its output into the given +// buffer. This builder does not reallocate the output buffer. Writes that +// would exceed the buffer's capacity are treated as an error. +func NewFixedBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + fixedSize: true, + } +} + +// SetError sets the value to be returned as the error from Bytes. Writes +// performed after calling SetError are ignored. +func (b *Builder) SetError(err error) { + b.err = err +} + +// Bytes returns the bytes written by the builder or an error if one has +// occurred during building. +func (b *Builder) Bytes() ([]byte, error) { + if b.err != nil { + return nil, b.err + } + return b.result[b.offset:], nil +} + +// BytesOrPanic returns the bytes written by the builder or panics if an error +// has occurred during building. +func (b *Builder) BytesOrPanic() []byte { + if b.err != nil { + panic(b.err) + } + return b.result[b.offset:] +} + +// AddUint8 appends an 8-bit value to the byte string. +func (b *Builder) AddUint8(v uint8) { + b.add(byte(v)) +} + +// AddUint16 appends a big-endian, 16-bit value to the byte string. +func (b *Builder) AddUint16(v uint16) { + b.add(byte(v>>8), byte(v)) +} + +// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest +// byte of the 32-bit input value is silently truncated. +func (b *Builder) AddUint24(v uint32) { + b.add(byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint32 appends a big-endian, 32-bit value to the byte string. +func (b *Builder) AddUint32(v uint32) { + b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddBytes appends a sequence of bytes to the byte string. +func (b *Builder) AddBytes(v []byte) { + b.add(v...) +} + +// BuilderContinuation is a continuation-passing interface for building +// length-prefixed byte sequences. Builder methods for length-prefixed +// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation +// supplied to them. The child builder passed to the continuation can be used +// to build the content of the length-prefixed sequence. For example: +// +// parent := cryptobyte.NewBuilder() +// parent.AddUint8LengthPrefixed(func (child *Builder) { +// child.AddUint8(42) +// child.AddUint8LengthPrefixed(func (grandchild *Builder) { +// grandchild.AddUint8(5) +// }) +// }) +// +// It is an error to write more bytes to the child than allowed by the reserved +// length prefix. After the continuation returns, the child must be considered +// invalid, i.e. users must not store any copies or references of the child +// that outlive the continuation. +// +// If the continuation panics with a value of type BuildError then the inner +// error will be returned as the error from Bytes. If the child panics +// otherwise then Bytes will repanic with the same value. +type BuilderContinuation func(child *Builder) + +// BuildError wraps an error. If a BuilderContinuation panics with this value, +// the panic will be recovered and the inner error will be returned from +// Builder.Bytes. +type BuildError struct { + Err error +} + +// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence. +func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(1, false, f) +} + +// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence. +func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(2, false, f) +} + +// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence. +func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(3, false, f) +} + +// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence. +func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(4, false, f) +} + +func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) { + if !*b.inContinuation { + *b.inContinuation = true + + defer func() { + *b.inContinuation = false + + r := recover() + if r == nil { + return + } + + if buildError, ok := r.(BuildError); ok { + b.err = buildError.Err + } else { + panic(r) + } + }() + } + + f(arg) +} + +func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) { + // Subsequent writes can be ignored if the builder has encountered an error. + if b.err != nil { + return + } + + offset := len(b.result) + b.add(make([]byte, lenLen)...) + + if b.inContinuation == nil { + b.inContinuation = new(bool) + } + + b.child = &Builder{ + result: b.result, + fixedSize: b.fixedSize, + offset: offset, + pendingLenLen: lenLen, + pendingIsASN1: isASN1, + inContinuation: b.inContinuation, + } + + b.callContinuation(f, b.child) + b.flushChild() + if b.child != nil { + panic("cryptobyte: internal error") + } +} + +func (b *Builder) flushChild() { + if b.child == nil { + return + } + b.child.flushChild() + child := b.child + b.child = nil + + if child.err != nil { + b.err = child.err + return + } + + length := len(child.result) - child.pendingLenLen - child.offset + + if length < 0 { + panic("cryptobyte: internal error") // result unexpectedly shrunk + } + + if child.pendingIsASN1 { + // For ASN.1, we reserved a single byte for the length. If that turned out + // to be incorrect, we have to move the contents along in order to make + // space. + if child.pendingLenLen != 1 { + panic("cryptobyte: internal error") + } + var lenLen, lenByte uint8 + if int64(length) > 0xfffffffe { + b.err = errors.New("pending ASN.1 child too long") + return + } else if length > 0xffffff { + lenLen = 5 + lenByte = 0x80 | 4 + } else if length > 0xffff { + lenLen = 4 + lenByte = 0x80 | 3 + } else if length > 0xff { + lenLen = 3 + lenByte = 0x80 | 2 + } else if length > 0x7f { + lenLen = 2 + lenByte = 0x80 | 1 + } else { + lenLen = 1 + lenByte = uint8(length) + length = 0 + } + + // Insert the initial length byte, make space for successive length bytes, + // and adjust the offset. + child.result[child.offset] = lenByte + extraBytes := int(lenLen - 1) + if extraBytes != 0 { + child.add(make([]byte, extraBytes)...) + childStart := child.offset + child.pendingLenLen + copy(child.result[childStart+extraBytes:], child.result[childStart:]) + } + child.offset++ + child.pendingLenLen = extraBytes + } + + l := length + for i := child.pendingLenLen - 1; i >= 0; i-- { + child.result[child.offset+i] = uint8(l) + l >>= 8 + } + if l != 0 { + b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen) + return + } + + if b.fixedSize && &b.result[0] != &child.result[0] { + panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer") + } + + b.result = child.result +} + +func (b *Builder) add(bytes ...byte) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted write while child is pending") + } + if len(b.result)+len(bytes) < len(bytes) { + b.err = errors.New("cryptobyte: length overflow") + } + if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) { + b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer") + return + } + b.result = append(b.result, bytes...) +} + +// Unwrite rolls back n bytes written directly to the Builder. An attempt by a +// child builder passed to a continuation to unwrite bytes from its parent will +// panic. +func (b *Builder) Unwrite(n int) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted unwrite while child is pending") + } + length := len(b.result) - b.pendingLenLen - b.offset + if length < 0 { + panic("cryptobyte: internal error") + } + if n > length { + panic("cryptobyte: attempted to unwrite more than was written") + } + b.result = b.result[:len(b.result)-n] +} + +// A MarshalingValue marshals itself into a Builder. +type MarshalingValue interface { + // Marshal is called by Builder.AddValue. It receives a pointer to a builder + // to marshal itself into. It may return an error that occurred during + // marshaling, such as unset or invalid values. + Marshal(b *Builder) error +} + +// AddValue calls Marshal on v, passing a pointer to the builder to append to. +// If Marshal returns an error, it is set on the Builder so that subsequent +// appends don't have an effect. +func (b *Builder) AddValue(v MarshalingValue) { + err := v.Marshal(b) + if err != nil { + b.err = err + } +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/string.go b/vendor/golang.org/x/crypto/cryptobyte/string.go new file mode 100644 index 000000000..589d297e6 --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/string.go @@ -0,0 +1,161 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cryptobyte contains types that help with parsing and constructing +// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage +// contains useful ASN.1 constants.) +// +// The String type is for parsing. It wraps a []byte slice and provides helper +// functions for consuming structures, value by value. +// +// The Builder type is for constructing messages. It providers helper functions +// for appending values and also for appending length-prefixed submessages – +// without having to worry about calculating the length prefix ahead of time. +// +// See the documentation and examples for the Builder and String types to get +// started. +package cryptobyte // import "golang.org/x/crypto/cryptobyte" + +// String represents a string of bytes. It provides methods for parsing +// fixed-length and length-prefixed values from it. +type String []byte + +// read advances a String by n bytes and returns them. If less than n bytes +// remain, it returns nil. +func (s *String) read(n int) []byte { + if len(*s) < n || n < 0 { + return nil + } + v := (*s)[:n] + *s = (*s)[n:] + return v +} + +// Skip advances the String by n byte and reports whether it was successful. +func (s *String) Skip(n int) bool { + return s.read(n) != nil +} + +// ReadUint8 decodes an 8-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint8(out *uint8) bool { + v := s.read(1) + if v == nil { + return false + } + *out = uint8(v[0]) + return true +} + +// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint16(out *uint16) bool { + v := s.read(2) + if v == nil { + return false + } + *out = uint16(v[0])<<8 | uint16(v[1]) + return true +} + +// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint24(out *uint32) bool { + v := s.read(3) + if v == nil { + return false + } + *out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2]) + return true +} + +// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint32(out *uint32) bool { + v := s.read(4) + if v == nil { + return false + } + *out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3]) + return true +} + +func (s *String) readUnsigned(out *uint32, length int) bool { + v := s.read(length) + if v == nil { + return false + } + var result uint32 + for i := 0; i < length; i++ { + result <<= 8 + result |= uint32(v[i]) + } + *out = result + return true +} + +func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool { + lenBytes := s.read(lenLen) + if lenBytes == nil { + return false + } + var length uint32 + for _, b := range lenBytes { + length = length << 8 + length = length | uint32(b) + } + v := s.read(int(length)) + if v == nil { + return false + } + *outChild = v + return true +} + +// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value +// into out and advances over it. It reports whether the read was successful. +func (s *String) ReadUint8LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(1, out) +} + +// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit +// length-prefixed value into out and advances over it. It reports whether the +// read was successful. +func (s *String) ReadUint16LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(2, out) +} + +// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit +// length-prefixed value into out and advances over it. It reports whether +// the read was successful. +func (s *String) ReadUint24LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(3, out) +} + +// ReadBytes reads n bytes into out and advances over them. It reports +// whether the read was successful. +func (s *String) ReadBytes(out *[]byte, n int) bool { + v := s.read(n) + if v == nil { + return false + } + *out = v + return true +} + +// CopyBytes copies len(out) bytes into out and advances over them. It reports +// whether the copy operation was successful +func (s *String) CopyBytes(out []byte) bool { + n := len(out) + v := s.read(n) + if v == nil { + return false + } + return copy(out, v) == n +} + +// Empty reports whether the string does not contain any bytes. +func (s String) Empty() bool { + return len(s) == 0 +} diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing.go b/vendor/golang.org/x/crypto/internal/subtle/aliasing.go new file mode 100644 index 000000000..f38797bfa --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/subtle/aliasing.go @@ -0,0 +1,32 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine + +// Package subtle implements functions that are often useful in cryptographic +// code but require careful thought to use correctly. +package subtle // import "golang.org/x/crypto/internal/subtle" + +import "unsafe" + +// AnyOverlap reports whether x and y share memory at any (not necessarily +// corresponding) index. The memory beyond the slice length is ignored. +func AnyOverlap(x, y []byte) bool { + return len(x) > 0 && len(y) > 0 && + uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) && + uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1])) +} + +// InexactOverlap reports whether x and y share memory at any non-corresponding +// index. The memory beyond the slice length is ignored. Note that x and y can +// have different lengths and still not have any inexact overlap. +// +// InexactOverlap can be used to implement the requirements of the crypto/cipher +// AEAD, Block, BlockMode and Stream interfaces. +func InexactOverlap(x, y []byte) bool { + if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { + return false + } + return AnyOverlap(x, y) +} diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go b/vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go new file mode 100644 index 000000000..0cc4a8a64 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go @@ -0,0 +1,35 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +// Package subtle implements functions that are often useful in cryptographic +// code but require careful thought to use correctly. +package subtle // import "golang.org/x/crypto/internal/subtle" + +// This is the Google App Engine standard variant based on reflect +// because the unsafe package and cgo are disallowed. + +import "reflect" + +// AnyOverlap reports whether x and y share memory at any (not necessarily +// corresponding) index. The memory beyond the slice length is ignored. +func AnyOverlap(x, y []byte) bool { + return len(x) > 0 && len(y) > 0 && + reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() && + reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer() +} + +// InexactOverlap reports whether x and y share memory at any non-corresponding +// index. The memory beyond the slice length is ignored. Note that x and y can +// have different lengths and still not have any inexact overlap. +// +// InexactOverlap can be used to implement the requirements of the crypto/cipher +// AEAD, Block, BlockMode and Stream interfaces. +func InexactOverlap(x, y []byte) bool { + if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { + return false + } + return AnyOverlap(x, y) +} diff --git a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go new file mode 100644 index 000000000..a98d1bd45 --- /dev/null +++ b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go @@ -0,0 +1,173 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package secretbox encrypts and authenticates small messages. + +Secretbox uses XSalsa20 and Poly1305 to encrypt and authenticate messages with +secret-key cryptography. The length of messages is not hidden. + +It is the caller's responsibility to ensure the uniqueness of nonces—for +example, by using nonce 1 for the first message, nonce 2 for the second +message, etc. Nonces are long enough that randomly generated nonces have +negligible risk of collision. + +Messages should be small because: + +1. The whole message needs to be held in memory to be processed. + +2. Using large messages pressures implementations on small machines to decrypt +and process plaintext before authenticating it. This is very dangerous, and +this API does not allow it, but a protocol that uses excessive message sizes +might present some implementations with no other choice. + +3. Fixed overheads will be sufficiently amortised by messages as small as 8KB. + +4. Performance may be improved by working with messages that fit into data caches. + +Thus large amounts of data should be chunked so that each message is small. +(Each message still needs a unique nonce.) If in doubt, 16KB is a reasonable +chunk size. + +This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html. +*/ +package secretbox // import "golang.org/x/crypto/nacl/secretbox" + +import ( + "golang.org/x/crypto/internal/subtle" + "golang.org/x/crypto/poly1305" + "golang.org/x/crypto/salsa20/salsa" +) + +// Overhead is the number of bytes of overhead when boxing a message. +const Overhead = poly1305.TagSize + +// setup produces a sub-key and Salsa20 counter given a nonce and key. +func setup(subKey *[32]byte, counter *[16]byte, nonce *[24]byte, key *[32]byte) { + // We use XSalsa20 for encryption so first we need to generate a + // key and nonce with HSalsa20. + var hNonce [16]byte + copy(hNonce[:], nonce[:]) + salsa.HSalsa20(subKey, &hNonce, key, &salsa.Sigma) + + // The final 8 bytes of the original nonce form the new nonce. + copy(counter[:], nonce[16:]) +} + +// sliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func sliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return +} + +// Seal appends an encrypted and authenticated copy of message to out, which +// must not overlap message. The key and nonce pair must be unique for each +// distinct message and the output will be Overhead bytes longer than message. +func Seal(out, message []byte, nonce *[24]byte, key *[32]byte) []byte { + var subKey [32]byte + var counter [16]byte + setup(&subKey, &counter, nonce, key) + + // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since + // Salsa20 works with 64-byte blocks, we also generate 32 bytes of + // keystream as a side effect. + var firstBlock [64]byte + salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey) + + var poly1305Key [32]byte + copy(poly1305Key[:], firstBlock[:]) + + ret, out := sliceForAppend(out, len(message)+poly1305.TagSize) + if subtle.AnyOverlap(out, message) { + panic("nacl: invalid buffer overlap") + } + + // We XOR up to 32 bytes of message with the keystream generated from + // the first block. + firstMessageBlock := message + if len(firstMessageBlock) > 32 { + firstMessageBlock = firstMessageBlock[:32] + } + + tagOut := out + out = out[poly1305.TagSize:] + for i, x := range firstMessageBlock { + out[i] = firstBlock[32+i] ^ x + } + message = message[len(firstMessageBlock):] + ciphertext := out + out = out[len(firstMessageBlock):] + + // Now encrypt the rest. + counter[8] = 1 + salsa.XORKeyStream(out, message, &counter, &subKey) + + var tag [poly1305.TagSize]byte + poly1305.Sum(&tag, ciphertext, &poly1305Key) + copy(tagOut, tag[:]) + + return ret +} + +// Open authenticates and decrypts a box produced by Seal and appends the +// message to out, which must not overlap box. The output will be Overhead +// bytes smaller than box. +func Open(out, box []byte, nonce *[24]byte, key *[32]byte) ([]byte, bool) { + if len(box) < Overhead { + return nil, false + } + + var subKey [32]byte + var counter [16]byte + setup(&subKey, &counter, nonce, key) + + // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since + // Salsa20 works with 64-byte blocks, we also generate 32 bytes of + // keystream as a side effect. + var firstBlock [64]byte + salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey) + + var poly1305Key [32]byte + copy(poly1305Key[:], firstBlock[:]) + var tag [poly1305.TagSize]byte + copy(tag[:], box) + + if !poly1305.Verify(&tag, box[poly1305.TagSize:], &poly1305Key) { + return nil, false + } + + ret, out := sliceForAppend(out, len(box)-Overhead) + if subtle.AnyOverlap(out, box) { + panic("nacl: invalid buffer overlap") + } + + // We XOR up to 32 bytes of box with the keystream generated from + // the first block. + box = box[Overhead:] + firstMessageBlock := box + if len(firstMessageBlock) > 32 { + firstMessageBlock = firstMessageBlock[:32] + } + for i, x := range firstMessageBlock { + out[i] = firstBlock[32+i] ^ x + } + + box = box[len(firstMessageBlock):] + out = out[len(firstMessageBlock):] + + // Now decrypt the rest. + counter[8] = 1 + salsa.XORKeyStream(out, box, &counter, &subKey) + + return ret, true +} diff --git a/vendor/golang.org/x/crypto/poly1305/bits_compat.go b/vendor/golang.org/x/crypto/poly1305/bits_compat.go new file mode 100644 index 000000000..157a69f61 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/bits_compat.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.13 + +package poly1305 + +// Generic fallbacks for the math/bits intrinsics, copied from +// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had +// variable time fallbacks until Go 1.13. + +func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { + sum = x + y + carry + carryOut = ((x & y) | ((x | y) &^ sum)) >> 63 + return +} + +func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { + diff = x - y - borrow + borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63 + return +} + +func bitsMul64(x, y uint64) (hi, lo uint64) { + const mask32 = 1<<32 - 1 + x0 := x & mask32 + x1 := x >> 32 + y0 := y & mask32 + y1 := y >> 32 + w0 := x0 * y0 + t := x1*y0 + w0>>32 + w1 := t & mask32 + w2 := t >> 32 + w1 += x0 * y1 + hi = x1*y1 + w2 + w1>>32 + lo = x * y + return +} diff --git a/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go b/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go new file mode 100644 index 000000000..a0a185f0f --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.13 + +package poly1305 + +import "math/bits" + +func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { + return bits.Add64(x, y, carry) +} + +func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { + return bits.Sub64(x, y, borrow) +} + +func bitsMul64(x, y uint64) (hi, lo uint64) { + return bits.Mul64(x, y) +} diff --git a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go new file mode 100644 index 000000000..d118f30ed --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64,!ppc64le,!s390x gccgo purego + +package poly1305 + +type mac struct{ macGeneric } diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305.go b/vendor/golang.org/x/crypto/poly1305/poly1305.go new file mode 100644 index 000000000..9d7a6af09 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/poly1305.go @@ -0,0 +1,99 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package poly1305 implements Poly1305 one-time message authentication code as +// specified in https://cr.yp.to/mac/poly1305-20050329.pdf. +// +// Poly1305 is a fast, one-time authentication function. It is infeasible for an +// attacker to generate an authenticator for a message without the key. However, a +// key must only be used for a single message. Authenticating two different +// messages with the same key allows an attacker to forge authenticators for other +// messages with the same key. +// +// Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was +// used with a fixed key in order to generate one-time keys from an nonce. +// However, in this package AES isn't used and the one-time key is specified +// directly. +package poly1305 // import "golang.org/x/crypto/poly1305" + +import "crypto/subtle" + +// TagSize is the size, in bytes, of a poly1305 authenticator. +const TagSize = 16 + +// Sum generates an authenticator for msg using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[16]byte, m []byte, key *[32]byte) { + h := New(key) + h.Write(m) + h.Sum(out[:0]) +} + +// Verify returns true if mac is a valid authenticator for m with the given key. +func Verify(mac *[16]byte, m []byte, key *[32]byte) bool { + var tmp [16]byte + Sum(&tmp, m, key) + return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1 +} + +// New returns a new MAC computing an authentication +// tag of all data written to it with the given key. +// This allows writing the message progressively instead +// of passing it as a single slice. Common users should use +// the Sum function instead. +// +// The key must be unique for each message, as authenticating +// two different messages with the same key allows an attacker +// to forge messages at will. +func New(key *[32]byte) *MAC { + m := &MAC{} + initialize(key, &m.macState) + return m +} + +// MAC is an io.Writer computing an authentication tag +// of the data written to it. +// +// MAC cannot be used like common hash.Hash implementations, +// because using a poly1305 key twice breaks its security. +// Therefore writing data to a running MAC after calling +// Sum or Verify causes it to panic. +type MAC struct { + mac // platform-dependent implementation + + finalized bool +} + +// Size returns the number of bytes Sum will return. +func (h *MAC) Size() int { return TagSize } + +// Write adds more data to the running message authentication code. +// It never returns an error. +// +// It must not be called after the first call of Sum or Verify. +func (h *MAC) Write(p []byte) (n int, err error) { + if h.finalized { + panic("poly1305: write to MAC after Sum or Verify") + } + return h.mac.Write(p) +} + +// Sum computes the authenticator of all data written to the +// message authentication code. +func (h *MAC) Sum(b []byte) []byte { + var mac [TagSize]byte + h.mac.Sum(&mac) + h.finalized = true + return append(b, mac[:]...) +} + +// Verify returns whether the authenticator of all data written to +// the message authentication code matches the expected value. +func (h *MAC) Verify(expected []byte) bool { + var mac [TagSize]byte + h.mac.Sum(&mac) + h.finalized = true + return subtle.ConstantTimeCompare(expected, mac[:]) == 1 +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go new file mode 100644 index 000000000..99e5a1d50 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go @@ -0,0 +1,47 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo,!purego + +package poly1305 + +//go:noescape +func update(state *macState, msg []byte) + +// mac is a wrapper for macGeneric that redirects calls that would have gone to +// updateGeneric to update. +// +// Its Write and Sum methods are otherwise identical to the macGeneric ones, but +// using function pointers would carry a major performance cost. +type mac struct{ macGeneric } + +func (h *mac) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < TagSize { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + update(&h.macState, h.buffer[:]) + } + if n := len(p) - (len(p) % TagSize); n > 0 { + update(&h.macState, p[:n]) + p = p[n:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return nn, nil +} + +func (h *mac) Sum(out *[16]byte) { + state := h.macState + if h.offset > 0 { + update(&state, h.buffer[:h.offset]) + } + finalize(out, &state.h, &state.s) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s new file mode 100644 index 000000000..8d394a212 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s @@ -0,0 +1,108 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo,!purego + +#include "textflag.h" + +#define POLY1305_ADD(msg, h0, h1, h2) \ + ADDQ 0(msg), h0; \ + ADCQ 8(msg), h1; \ + ADCQ $1, h2; \ + LEAQ 16(msg), msg + +#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \ + MOVQ r0, AX; \ + MULQ h0; \ + MOVQ AX, t0; \ + MOVQ DX, t1; \ + MOVQ r0, AX; \ + MULQ h1; \ + ADDQ AX, t1; \ + ADCQ $0, DX; \ + MOVQ r0, t2; \ + IMULQ h2, t2; \ + ADDQ DX, t2; \ + \ + MOVQ r1, AX; \ + MULQ h0; \ + ADDQ AX, t1; \ + ADCQ $0, DX; \ + MOVQ DX, h0; \ + MOVQ r1, t3; \ + IMULQ h2, t3; \ + MOVQ r1, AX; \ + MULQ h1; \ + ADDQ AX, t2; \ + ADCQ DX, t3; \ + ADDQ h0, t2; \ + ADCQ $0, t3; \ + \ + MOVQ t0, h0; \ + MOVQ t1, h1; \ + MOVQ t2, h2; \ + ANDQ $3, h2; \ + MOVQ t2, t0; \ + ANDQ $0xFFFFFFFFFFFFFFFC, t0; \ + ADDQ t0, h0; \ + ADCQ t3, h1; \ + ADCQ $0, h2; \ + SHRQ $2, t3, t2; \ + SHRQ $2, t3; \ + ADDQ t2, h0; \ + ADCQ t3, h1; \ + ADCQ $0, h2 + +// func update(state *[7]uint64, msg []byte) +TEXT ·update(SB), $0-32 + MOVQ state+0(FP), DI + MOVQ msg_base+8(FP), SI + MOVQ msg_len+16(FP), R15 + + MOVQ 0(DI), R8 // h0 + MOVQ 8(DI), R9 // h1 + MOVQ 16(DI), R10 // h2 + MOVQ 24(DI), R11 // r0 + MOVQ 32(DI), R12 // r1 + + CMPQ R15, $16 + JB bytes_between_0_and_15 + +loop: + POLY1305_ADD(SI, R8, R9, R10) + +multiply: + POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14) + SUBQ $16, R15 + CMPQ R15, $16 + JAE loop + +bytes_between_0_and_15: + TESTQ R15, R15 + JZ done + MOVQ $1, BX + XORQ CX, CX + XORQ R13, R13 + ADDQ R15, SI + +flush_buffer: + SHLQ $8, BX, CX + SHLQ $8, BX + MOVB -1(SI), R13 + XORQ R13, BX + DECQ SI + DECQ R15 + JNZ flush_buffer + + ADDQ BX, R8 + ADCQ CX, R9 + ADCQ $0, R10 + MOVQ $16, R15 + JMP multiply + +done: + MOVQ R8, 0(DI) + MOVQ R9, 8(DI) + MOVQ R10, 16(DI) + RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/poly1305/sum_generic.go new file mode 100644 index 000000000..c942a6590 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_generic.go @@ -0,0 +1,310 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file provides the generic implementation of Sum and MAC. Other files +// might provide optimized assembly implementations of some of this code. + +package poly1305 + +import "encoding/binary" + +// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag +// for a 64 bytes message is approximately +// +// s + m[0:16] * r⁴ + m[16:32] * r³ + m[32:48] * r² + m[48:64] * r mod 2¹³⁰ - 5 +// +// for some secret r and s. It can be computed sequentially like +// +// for len(msg) > 0: +// h += read(msg, 16) +// h *= r +// h %= 2¹³⁰ - 5 +// return h + s +// +// All the complexity is about doing performant constant-time math on numbers +// larger than any available numeric type. + +func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) { + h := newMACGeneric(key) + h.Write(msg) + h.Sum(out) +} + +func newMACGeneric(key *[32]byte) macGeneric { + m := macGeneric{} + initialize(key, &m.macState) + return m +} + +// macState holds numbers in saturated 64-bit little-endian limbs. That is, +// the value of [x0, x1, x2] is x[0] + x[1] * 2⁶⁴ + x[2] * 2¹²⁸. +type macState struct { + // h is the main accumulator. It is to be interpreted modulo 2¹³⁰ - 5, but + // can grow larger during and after rounds. It must, however, remain below + // 2 * (2¹³⁰ - 5). + h [3]uint64 + // r and s are the private key components. + r [2]uint64 + s [2]uint64 +} + +type macGeneric struct { + macState + + buffer [TagSize]byte + offset int +} + +// Write splits the incoming message into TagSize chunks, and passes them to +// update. It buffers incomplete chunks. +func (h *macGeneric) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < TagSize { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + updateGeneric(&h.macState, h.buffer[:]) + } + if n := len(p) - (len(p) % TagSize); n > 0 { + updateGeneric(&h.macState, p[:n]) + p = p[n:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return nn, nil +} + +// Sum flushes the last incomplete chunk from the buffer, if any, and generates +// the MAC output. It does not modify its state, in order to allow for multiple +// calls to Sum, even if no Write is allowed after Sum. +func (h *macGeneric) Sum(out *[TagSize]byte) { + state := h.macState + if h.offset > 0 { + updateGeneric(&state, h.buffer[:h.offset]) + } + finalize(out, &state.h, &state.s) +} + +// [rMask0, rMask1] is the specified Poly1305 clamping mask in little-endian. It +// clears some bits of the secret coefficient to make it possible to implement +// multiplication more efficiently. +const ( + rMask0 = 0x0FFFFFFC0FFFFFFF + rMask1 = 0x0FFFFFFC0FFFFFFC +) + +// initialize loads the 256-bit key into the two 128-bit secret values r and s. +func initialize(key *[32]byte, m *macState) { + m.r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0 + m.r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1 + m.s[0] = binary.LittleEndian.Uint64(key[16:24]) + m.s[1] = binary.LittleEndian.Uint64(key[24:32]) +} + +// uint128 holds a 128-bit number as two 64-bit limbs, for use with the +// bits.Mul64 and bits.Add64 intrinsics. +type uint128 struct { + lo, hi uint64 +} + +func mul64(a, b uint64) uint128 { + hi, lo := bitsMul64(a, b) + return uint128{lo, hi} +} + +func add128(a, b uint128) uint128 { + lo, c := bitsAdd64(a.lo, b.lo, 0) + hi, c := bitsAdd64(a.hi, b.hi, c) + if c != 0 { + panic("poly1305: unexpected overflow") + } + return uint128{lo, hi} +} + +func shiftRightBy2(a uint128) uint128 { + a.lo = a.lo>>2 | (a.hi&3)<<62 + a.hi = a.hi >> 2 + return a +} + +// updateGeneric absorbs msg into the state.h accumulator. For each chunk m of +// 128 bits of message, it computes +// +// h₊ = (h + m) * r mod 2¹³⁰ - 5 +// +// If the msg length is not a multiple of TagSize, it assumes the last +// incomplete chunk is the final one. +func updateGeneric(state *macState, msg []byte) { + h0, h1, h2 := state.h[0], state.h[1], state.h[2] + r0, r1 := state.r[0], state.r[1] + + for len(msg) > 0 { + var c uint64 + + // For the first step, h + m, we use a chain of bits.Add64 intrinsics. + // The resulting value of h might exceed 2¹³⁰ - 5, but will be partially + // reduced at the end of the multiplication below. + // + // The spec requires us to set a bit just above the message size, not to + // hide leading zeroes. For full chunks, that's 1 << 128, so we can just + // add 1 to the most significant (2¹²⁸) limb, h2. + if len(msg) >= TagSize { + h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0) + h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c) + h2 += c + 1 + + msg = msg[TagSize:] + } else { + var buf [TagSize]byte + copy(buf[:], msg) + buf[len(msg)] = 1 + + h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0) + h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c) + h2 += c + + msg = nil + } + + // Multiplication of big number limbs is similar to elementary school + // columnar multiplication. Instead of digits, there are 64-bit limbs. + // + // We are multiplying a 3 limbs number, h, by a 2 limbs number, r. + // + // h2 h1 h0 x + // r1 r0 = + // ---------------- + // h2r0 h1r0 h0r0 <-- individual 128-bit products + // + h2r1 h1r1 h0r1 + // ------------------------ + // m3 m2 m1 m0 <-- result in 128-bit overlapping limbs + // ------------------------ + // m3.hi m2.hi m1.hi m0.hi <-- carry propagation + // + m3.lo m2.lo m1.lo m0.lo + // ------------------------------- + // t4 t3 t2 t1 t0 <-- final result in 64-bit limbs + // + // The main difference from pen-and-paper multiplication is that we do + // carry propagation in a separate step, as if we wrote two digit sums + // at first (the 128-bit limbs), and then carried the tens all at once. + + h0r0 := mul64(h0, r0) + h1r0 := mul64(h1, r0) + h2r0 := mul64(h2, r0) + h0r1 := mul64(h0, r1) + h1r1 := mul64(h1, r1) + h2r1 := mul64(h2, r1) + + // Since h2 is known to be at most 7 (5 + 1 + 1), and r0 and r1 have their + // top 4 bits cleared by rMask{0,1}, we know that their product is not going + // to overflow 64 bits, so we can ignore the high part of the products. + // + // This also means that the product doesn't have a fifth limb (t4). + if h2r0.hi != 0 { + panic("poly1305: unexpected overflow") + } + if h2r1.hi != 0 { + panic("poly1305: unexpected overflow") + } + + m0 := h0r0 + m1 := add128(h1r0, h0r1) // These two additions don't overflow thanks again + m2 := add128(h2r0, h1r1) // to the 4 masked bits at the top of r0 and r1. + m3 := h2r1 + + t0 := m0.lo + t1, c := bitsAdd64(m1.lo, m0.hi, 0) + t2, c := bitsAdd64(m2.lo, m1.hi, c) + t3, _ := bitsAdd64(m3.lo, m2.hi, c) + + // Now we have the result as 4 64-bit limbs, and we need to reduce it + // modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do + // a cheap partial reduction according to the reduction identity + // + // c * 2¹³⁰ + n = c * 5 + n mod 2¹³⁰ - 5 + // + // because 2¹³⁰ = 5 mod 2¹³⁰ - 5. Partial reduction since the result is + // likely to be larger than 2¹³⁰ - 5, but still small enough to fit the + // assumptions we make about h in the rest of the code. + // + // See also https://speakerdeck.com/gtank/engineering-prime-numbers?slide=23 + + // We split the final result at the 2¹³⁰ mark into h and cc, the carry. + // Note that the carry bits are effectively shifted left by 2, in other + // words, cc = c * 4 for the c in the reduction identity. + h0, h1, h2 = t0, t1, t2&maskLow2Bits + cc := uint128{t2 & maskNotLow2Bits, t3} + + // To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c. + + h0, c = bitsAdd64(h0, cc.lo, 0) + h1, c = bitsAdd64(h1, cc.hi, c) + h2 += c + + cc = shiftRightBy2(cc) + + h0, c = bitsAdd64(h0, cc.lo, 0) + h1, c = bitsAdd64(h1, cc.hi, c) + h2 += c + + // h2 is at most 3 + 1 + 1 = 5, making the whole of h at most + // + // 5 * 2¹²⁸ + (2¹²⁸ - 1) = 6 * 2¹²⁸ - 1 + } + + state.h[0], state.h[1], state.h[2] = h0, h1, h2 +} + +const ( + maskLow2Bits uint64 = 0x0000000000000003 + maskNotLow2Bits uint64 = ^maskLow2Bits +) + +// select64 returns x if v == 1 and y if v == 0, in constant time. +func select64(v, x, y uint64) uint64 { return ^(v-1)&x | (v-1)&y } + +// [p0, p1, p2] is 2¹³⁰ - 5 in little endian order. +const ( + p0 = 0xFFFFFFFFFFFFFFFB + p1 = 0xFFFFFFFFFFFFFFFF + p2 = 0x0000000000000003 +) + +// finalize completes the modular reduction of h and computes +// +// out = h + s mod 2¹²⁸ +// +func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { + h0, h1, h2 := h[0], h[1], h[2] + + // After the partial reduction in updateGeneric, h might be more than + // 2¹³⁰ - 5, but will be less than 2 * (2¹³⁰ - 5). To complete the reduction + // in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the + // result if the subtraction underflows, and t otherwise. + + hMinusP0, b := bitsSub64(h0, p0, 0) + hMinusP1, b := bitsSub64(h1, p1, b) + _, b = bitsSub64(h2, p2, b) + + // h = h if h < p else h - p + h0 = select64(b, h0, hMinusP0) + h1 = select64(b, h1, hMinusP1) + + // Finally, we compute the last Poly1305 step + // + // tag = h + s mod 2¹²⁸ + // + // by just doing a wide addition with the 128 low bits of h and discarding + // the overflow. + h0, c := bitsAdd64(h0, s[0], 0) + h1, _ = bitsAdd64(h1, s[1], c) + + binary.LittleEndian.PutUint64(out[0:8], h0) + binary.LittleEndian.PutUint64(out[8:16], h1) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go new file mode 100644 index 000000000..2e7a120b1 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go @@ -0,0 +1,47 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo,!purego + +package poly1305 + +//go:noescape +func update(state *macState, msg []byte) + +// mac is a wrapper for macGeneric that redirects calls that would have gone to +// updateGeneric to update. +// +// Its Write and Sum methods are otherwise identical to the macGeneric ones, but +// using function pointers would carry a major performance cost. +type mac struct{ macGeneric } + +func (h *mac) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < TagSize { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + update(&h.macState, h.buffer[:]) + } + if n := len(p) - (len(p) % TagSize); n > 0 { + update(&h.macState, p[:n]) + p = p[n:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return nn, nil +} + +func (h *mac) Sum(out *[16]byte) { + state := h.macState + if h.offset > 0 { + update(&state, h.buffer[:h.offset]) + } + finalize(out, &state.h, &state.s) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s new file mode 100644 index 000000000..4e0281387 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s @@ -0,0 +1,181 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo,!purego + +#include "textflag.h" + +// This was ported from the amd64 implementation. + +#define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \ + MOVD (msg), t0; \ + MOVD 8(msg), t1; \ + MOVD $1, t2; \ + ADDC t0, h0, h0; \ + ADDE t1, h1, h1; \ + ADDE t2, h2; \ + ADD $16, msg + +#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ + MULLD r0, h0, t0; \ + MULLD r0, h1, t4; \ + MULHDU r0, h0, t1; \ + MULHDU r0, h1, t5; \ + ADDC t4, t1, t1; \ + MULLD r0, h2, t2; \ + ADDZE t5; \ + MULHDU r1, h0, t4; \ + MULLD r1, h0, h0; \ + ADD t5, t2, t2; \ + ADDC h0, t1, t1; \ + MULLD h2, r1, t3; \ + ADDZE t4, h0; \ + MULHDU r1, h1, t5; \ + MULLD r1, h1, t4; \ + ADDC t4, t2, t2; \ + ADDE t5, t3, t3; \ + ADDC h0, t2, t2; \ + MOVD $-4, t4; \ + MOVD t0, h0; \ + MOVD t1, h1; \ + ADDZE t3; \ + ANDCC $3, t2, h2; \ + AND t2, t4, t0; \ + ADDC t0, h0, h0; \ + ADDE t3, h1, h1; \ + SLD $62, t3, t4; \ + SRD $2, t2; \ + ADDZE h2; \ + OR t4, t2, t2; \ + SRD $2, t3; \ + ADDC t2, h0, h0; \ + ADDE t3, h1, h1; \ + ADDZE h2 + +DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF +DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC +GLOBL ·poly1305Mask<>(SB), RODATA, $16 + +// func update(state *[7]uint64, msg []byte) +TEXT ·update(SB), $0-32 + MOVD state+0(FP), R3 + MOVD msg_base+8(FP), R4 + MOVD msg_len+16(FP), R5 + + MOVD 0(R3), R8 // h0 + MOVD 8(R3), R9 // h1 + MOVD 16(R3), R10 // h2 + MOVD 24(R3), R11 // r0 + MOVD 32(R3), R12 // r1 + + CMP R5, $16 + BLT bytes_between_0_and_15 + +loop: + POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) + +multiply: + POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) + ADD $-16, R5 + CMP R5, $16 + BGE loop + +bytes_between_0_and_15: + CMP $0, R5 + BEQ done + MOVD $0, R16 // h0 + MOVD $0, R17 // h1 + +flush_buffer: + CMP R5, $8 + BLE just1 + + MOVD $8, R21 + SUB R21, R5, R21 + + // Greater than 8 -- load the rightmost remaining bytes in msg + // and put into R17 (h1) + MOVD (R4)(R21), R17 + MOVD $16, R22 + + // Find the offset to those bytes + SUB R5, R22, R22 + SLD $3, R22 + + // Shift to get only the bytes in msg + SRD R22, R17, R17 + + // Put 1 at high end + MOVD $1, R23 + SLD $3, R21 + SLD R21, R23, R23 + OR R23, R17, R17 + + // Remainder is 8 + MOVD $8, R5 + +just1: + CMP R5, $8 + BLT less8 + + // Exactly 8 + MOVD (R4), R16 + + CMP $0, R17 + + // Check if we've already set R17; if not + // set 1 to indicate end of msg. + BNE carry + MOVD $1, R17 + BR carry + +less8: + MOVD $0, R16 // h0 + MOVD $0, R22 // shift count + CMP R5, $4 + BLT less4 + MOVWZ (R4), R16 + ADD $4, R4 + ADD $-4, R5 + MOVD $32, R22 + +less4: + CMP R5, $2 + BLT less2 + MOVHZ (R4), R21 + SLD R22, R21, R21 + OR R16, R21, R16 + ADD $16, R22 + ADD $-2, R5 + ADD $2, R4 + +less2: + CMP $0, R5 + BEQ insert1 + MOVBZ (R4), R21 + SLD R22, R21, R21 + OR R16, R21, R16 + ADD $8, R22 + +insert1: + // Insert 1 at end of msg + MOVD $1, R21 + SLD R22, R21, R21 + OR R16, R21, R16 + +carry: + // Add new values to h0, h1, h2 + ADDC R16, R8 + ADDE R17, R9 + ADDE $0, R10 + MOVD $16, R5 + ADD R5, R4 + BR multiply + +done: + // Save h0, h1, h2 in state + MOVD R8, 0(R3) + MOVD R9, 8(R3) + MOVD R10, 16(R3) + RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go new file mode 100644 index 000000000..958fedc07 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go @@ -0,0 +1,75 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo,!purego + +package poly1305 + +import ( + "golang.org/x/sys/cpu" +) + +// updateVX is an assembly implementation of Poly1305 that uses vector +// instructions. It must only be called if the vector facility (vx) is +// available. +//go:noescape +func updateVX(state *macState, msg []byte) + +// mac is a replacement for macGeneric that uses a larger buffer and redirects +// calls that would have gone to updateGeneric to updateVX if the vector +// facility is installed. +// +// A larger buffer is required for good performance because the vector +// implementation has a higher fixed cost per call than the generic +// implementation. +type mac struct { + macState + + buffer [16 * TagSize]byte // size must be a multiple of block size (16) + offset int +} + +func (h *mac) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < len(h.buffer) { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + if cpu.S390X.HasVX { + updateVX(&h.macState, h.buffer[:]) + } else { + updateGeneric(&h.macState, h.buffer[:]) + } + } + + tail := len(p) % len(h.buffer) // number of bytes to copy into buffer + body := len(p) - tail // number of bytes to process now + if body > 0 { + if cpu.S390X.HasVX { + updateVX(&h.macState, p[:body]) + } else { + updateGeneric(&h.macState, p[:body]) + } + } + h.offset = copy(h.buffer[:], p[body:]) // copy tail bytes - can be 0 + return nn, nil +} + +func (h *mac) Sum(out *[TagSize]byte) { + state := h.macState + remainder := h.buffer[:h.offset] + + // Use the generic implementation if we have 2 or fewer blocks left + // to sum. The vector implementation has a higher startup time. + if cpu.S390X.HasVX && len(remainder) > 2*TagSize { + updateVX(&state, remainder) + } else if len(remainder) > 0 { + updateGeneric(&state, remainder) + } + finalize(out, &state.h, &state.s) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s new file mode 100644 index 000000000..0fa9ee6e0 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s @@ -0,0 +1,503 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo,!purego + +#include "textflag.h" + +// This implementation of Poly1305 uses the vector facility (vx) +// to process up to 2 blocks (32 bytes) per iteration using an +// algorithm based on the one described in: +// +// NEON crypto, Daniel J. Bernstein & Peter Schwabe +// https://cryptojedi.org/papers/neoncrypto-20120320.pdf +// +// This algorithm uses 5 26-bit limbs to represent a 130-bit +// value. These limbs are, for the most part, zero extended and +// placed into 64-bit vector register elements. Each vector +// register is 128-bits wide and so holds 2 of these elements. +// Using 26-bit limbs allows us plenty of headroom to accomodate +// accumulations before and after multiplication without +// overflowing either 32-bits (before multiplication) or 64-bits +// (after multiplication). +// +// In order to parallelise the operations required to calculate +// the sum we use two separate accumulators and then sum those +// in an extra final step. For compatibility with the generic +// implementation we perform this summation at the end of every +// updateVX call. +// +// To use two accumulators we must multiply the message blocks +// by r² rather than r. Only the final message block should be +// multiplied by r. +// +// Example: +// +// We want to calculate the sum (h) for a 64 byte message (m): +// +// h = m[0:16]r⁴ + m[16:32]r³ + m[32:48]r² + m[48:64]r +// +// To do this we split the calculation into the even indices +// and odd indices of the message. These form our SIMD 'lanes': +// +// h = m[ 0:16]r⁴ + m[32:48]r² + <- lane 0 +// m[16:32]r³ + m[48:64]r <- lane 1 +// +// To calculate this iteratively we refactor so that both lanes +// are written in terms of r² and r: +// +// h = (m[ 0:16]r² + m[32:48])r² + <- lane 0 +// (m[16:32]r² + m[48:64])r <- lane 1 +// ^ ^ +// | coefficients for second iteration +// coefficients for first iteration +// +// So in this case we would have two iterations. In the first +// both lanes are multiplied by r². In the second only the +// first lane is multiplied by r² and the second lane is +// instead multiplied by r. This gives use the odd and even +// powers of r that we need from the original equation. +// +// Notation: +// +// h - accumulator +// r - key +// m - message +// +// [a, b] - SIMD register holding two 64-bit values +// [a, b, c, d] - SIMD register holding four 32-bit values +// xᵢ[n] - limb n of variable x with bit width i +// +// Limbs are expressed in little endian order, so for 26-bit +// limbs x₂₆[4] will be the most significant limb and x₂₆[0] +// will be the least significant limb. + +// masking constants +#define MOD24 V0 // [0x0000000000ffffff, 0x0000000000ffffff] - mask low 24-bits +#define MOD26 V1 // [0x0000000003ffffff, 0x0000000003ffffff] - mask low 26-bits + +// expansion constants (see EXPAND macro) +#define EX0 V2 +#define EX1 V3 +#define EX2 V4 + +// key (r², r or 1 depending on context) +#define R_0 V5 +#define R_1 V6 +#define R_2 V7 +#define R_3 V8 +#define R_4 V9 + +// precalculated coefficients (5r², 5r or 0 depending on context) +#define R5_1 V10 +#define R5_2 V11 +#define R5_3 V12 +#define R5_4 V13 + +// message block (m) +#define M_0 V14 +#define M_1 V15 +#define M_2 V16 +#define M_3 V17 +#define M_4 V18 + +// accumulator (h) +#define H_0 V19 +#define H_1 V20 +#define H_2 V21 +#define H_3 V22 +#define H_4 V23 + +// temporary registers (for short-lived values) +#define T_0 V24 +#define T_1 V25 +#define T_2 V26 +#define T_3 V27 +#define T_4 V28 + +GLOBL ·constants<>(SB), RODATA, $0x30 +// EX0 +DATA ·constants<>+0x00(SB)/8, $0x0006050403020100 +DATA ·constants<>+0x08(SB)/8, $0x1016151413121110 +// EX1 +DATA ·constants<>+0x10(SB)/8, $0x060c0b0a09080706 +DATA ·constants<>+0x18(SB)/8, $0x161c1b1a19181716 +// EX2 +DATA ·constants<>+0x20(SB)/8, $0x0d0d0d0d0d0f0e0d +DATA ·constants<>+0x28(SB)/8, $0x1d1d1d1d1d1f1e1d + +// MULTIPLY multiplies each lane of f and g, partially reduced +// modulo 2¹³⁰ - 5. The result, h, consists of partial products +// in each lane that need to be reduced further to produce the +// final result. +// +// h₁₃₀ = (f₁₃₀g₁₃₀) % 2¹³⁰ + (5f₁₃₀g₁₃₀) / 2¹³⁰ +// +// Note that the multiplication by 5 of the high bits is +// achieved by precalculating the multiplication of four of the +// g coefficients by 5. These are g51-g54. +#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \ + VMLOF f0, g0, h0 \ + VMLOF f0, g3, h3 \ + VMLOF f0, g1, h1 \ + VMLOF f0, g4, h4 \ + VMLOF f0, g2, h2 \ + VMLOF f1, g54, T_0 \ + VMLOF f1, g2, T_3 \ + VMLOF f1, g0, T_1 \ + VMLOF f1, g3, T_4 \ + VMLOF f1, g1, T_2 \ + VMALOF f2, g53, h0, h0 \ + VMALOF f2, g1, h3, h3 \ + VMALOF f2, g54, h1, h1 \ + VMALOF f2, g2, h4, h4 \ + VMALOF f2, g0, h2, h2 \ + VMALOF f3, g52, T_0, T_0 \ + VMALOF f3, g0, T_3, T_3 \ + VMALOF f3, g53, T_1, T_1 \ + VMALOF f3, g1, T_4, T_4 \ + VMALOF f3, g54, T_2, T_2 \ + VMALOF f4, g51, h0, h0 \ + VMALOF f4, g54, h3, h3 \ + VMALOF f4, g52, h1, h1 \ + VMALOF f4, g0, h4, h4 \ + VMALOF f4, g53, h2, h2 \ + VAG T_0, h0, h0 \ + VAG T_3, h3, h3 \ + VAG T_1, h1, h1 \ + VAG T_4, h4, h4 \ + VAG T_2, h2, h2 + +// REDUCE performs the following carry operations in four +// stages, as specified in Bernstein & Schwabe: +// +// 1: h₂₆[0]->h₂₆[1] h₂₆[3]->h₂₆[4] +// 2: h₂₆[1]->h₂₆[2] h₂₆[4]->h₂₆[0] +// 3: h₂₆[0]->h₂₆[1] h₂₆[2]->h₂₆[3] +// 4: h₂₆[3]->h₂₆[4] +// +// The result is that all of the limbs are limited to 26-bits +// except for h₂₆[1] and h₂₆[4] which are limited to 27-bits. +// +// Note that although each limb is aligned at 26-bit intervals +// they may contain values that exceed 2²⁶ - 1, hence the need +// to carry the excess bits in each limb. +#define REDUCE(h0, h1, h2, h3, h4) \ + VESRLG $26, h0, T_0 \ + VESRLG $26, h3, T_1 \ + VN MOD26, h0, h0 \ + VN MOD26, h3, h3 \ + VAG T_0, h1, h1 \ + VAG T_1, h4, h4 \ + VESRLG $26, h1, T_2 \ + VESRLG $26, h4, T_3 \ + VN MOD26, h1, h1 \ + VN MOD26, h4, h4 \ + VESLG $2, T_3, T_4 \ + VAG T_3, T_4, T_4 \ + VAG T_2, h2, h2 \ + VAG T_4, h0, h0 \ + VESRLG $26, h2, T_0 \ + VESRLG $26, h0, T_1 \ + VN MOD26, h2, h2 \ + VN MOD26, h0, h0 \ + VAG T_0, h3, h3 \ + VAG T_1, h1, h1 \ + VESRLG $26, h3, T_2 \ + VN MOD26, h3, h3 \ + VAG T_2, h4, h4 + +// EXPAND splits the 128-bit little-endian values in0 and in1 +// into 26-bit big-endian limbs and places the results into +// the first and second lane of d₂₆[0:4] respectively. +// +// The EX0, EX1 and EX2 constants are arrays of byte indices +// for permutation. The permutation both reverses the bytes +// in the input and ensures the bytes are copied into the +// destination limb ready to be shifted into their final +// position. +#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \ + VPERM in0, in1, EX0, d0 \ + VPERM in0, in1, EX1, d2 \ + VPERM in0, in1, EX2, d4 \ + VESRLG $26, d0, d1 \ + VESRLG $30, d2, d3 \ + VESRLG $4, d2, d2 \ + VN MOD26, d0, d0 \ // [in0₂₆[0], in1₂₆[0]] + VN MOD26, d3, d3 \ // [in0₂₆[3], in1₂₆[3]] + VN MOD26, d1, d1 \ // [in0₂₆[1], in1₂₆[1]] + VN MOD24, d4, d4 \ // [in0₂₆[4], in1₂₆[4]] + VN MOD26, d2, d2 // [in0₂₆[2], in1₂₆[2]] + +// func updateVX(state *macState, msg []byte) +TEXT ·updateVX(SB), NOSPLIT, $0 + MOVD state+0(FP), R1 + LMG msg+8(FP), R2, R3 // R2=msg_base, R3=msg_len + + // load EX0, EX1 and EX2 + MOVD $·constants<>(SB), R5 + VLM (R5), EX0, EX2 + + // generate masks + VGMG $(64-24), $63, MOD24 // [0x00ffffff, 0x00ffffff] + VGMG $(64-26), $63, MOD26 // [0x03ffffff, 0x03ffffff] + + // load h (accumulator) and r (key) from state + VZERO T_1 // [0, 0] + VL 0(R1), T_0 // [h₆₄[0], h₆₄[1]] + VLEG $0, 16(R1), T_1 // [h₆₄[2], 0] + VL 24(R1), T_2 // [r₆₄[0], r₆₄[1]] + VPDI $0, T_0, T_2, T_3 // [h₆₄[0], r₆₄[0]] + VPDI $5, T_0, T_2, T_4 // [h₆₄[1], r₆₄[1]] + + // unpack h and r into 26-bit limbs + // note: h₆₄[2] may have the low 3 bits set, so h₂₆[4] is a 27-bit value + VN MOD26, T_3, H_0 // [h₂₆[0], r₂₆[0]] + VZERO H_1 // [0, 0] + VZERO H_3 // [0, 0] + VGMG $(64-12-14), $(63-12), T_0 // [0x03fff000, 0x03fff000] - 26-bit mask with low 12 bits masked out + VESLG $24, T_1, T_1 // [h₆₄[2]<<24, 0] + VERIMG $-26&63, T_3, MOD26, H_1 // [h₂₆[1], r₂₆[1]] + VESRLG $+52&63, T_3, H_2 // [h₂₆[2], r₂₆[2]] - low 12 bits only + VERIMG $-14&63, T_4, MOD26, H_3 // [h₂₆[1], r₂₆[1]] + VESRLG $40, T_4, H_4 // [h₂₆[4], r₂₆[4]] - low 24 bits only + VERIMG $+12&63, T_4, T_0, H_2 // [h₂₆[2], r₂₆[2]] - complete + VO T_1, H_4, H_4 // [h₂₆[4], r₂₆[4]] - complete + + // replicate r across all 4 vector elements + VREPF $3, H_0, R_0 // [r₂₆[0], r₂₆[0], r₂₆[0], r₂₆[0]] + VREPF $3, H_1, R_1 // [r₂₆[1], r₂₆[1], r₂₆[1], r₂₆[1]] + VREPF $3, H_2, R_2 // [r₂₆[2], r₂₆[2], r₂₆[2], r₂₆[2]] + VREPF $3, H_3, R_3 // [r₂₆[3], r₂₆[3], r₂₆[3], r₂₆[3]] + VREPF $3, H_4, R_4 // [r₂₆[4], r₂₆[4], r₂₆[4], r₂₆[4]] + + // zero out lane 1 of h + VLEIG $1, $0, H_0 // [h₂₆[0], 0] + VLEIG $1, $0, H_1 // [h₂₆[1], 0] + VLEIG $1, $0, H_2 // [h₂₆[2], 0] + VLEIG $1, $0, H_3 // [h₂₆[3], 0] + VLEIG $1, $0, H_4 // [h₂₆[4], 0] + + // calculate 5r (ignore least significant limb) + VREPIF $5, T_0 + VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r₂₆[1], 5r₂₆[1], 5r₂₆[1]] + VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r₂₆[2], 5r₂₆[2], 5r₂₆[2]] + VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r₂₆[3], 5r₂₆[3], 5r₂₆[3]] + VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r₂₆[4], 5r₂₆[4], 5r₂₆[4]] + + // skip r² calculation if we are only calculating one block + CMPBLE R3, $16, skip + + // calculate r² + MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, M_0, M_1, M_2, M_3, M_4) + REDUCE(M_0, M_1, M_2, M_3, M_4) + VGBM $0x0f0f, T_0 + VERIMG $0, M_0, T_0, R_0 // [r₂₆[0], r²₂₆[0], r₂₆[0], r²₂₆[0]] + VERIMG $0, M_1, T_0, R_1 // [r₂₆[1], r²₂₆[1], r₂₆[1], r²₂₆[1]] + VERIMG $0, M_2, T_0, R_2 // [r₂₆[2], r²₂₆[2], r₂₆[2], r²₂₆[2]] + VERIMG $0, M_3, T_0, R_3 // [r₂₆[3], r²₂₆[3], r₂₆[3], r²₂₆[3]] + VERIMG $0, M_4, T_0, R_4 // [r₂₆[4], r²₂₆[4], r₂₆[4], r²₂₆[4]] + + // calculate 5r² (ignore least significant limb) + VREPIF $5, T_0 + VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r²₂₆[1], 5r₂₆[1], 5r²₂₆[1]] + VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r²₂₆[2], 5r₂₆[2], 5r²₂₆[2]] + VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r²₂₆[3], 5r₂₆[3], 5r²₂₆[3]] + VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r²₂₆[4], 5r₂₆[4], 5r²₂₆[4]] + +loop: + CMPBLE R3, $32, b2 // 2 or fewer blocks remaining, need to change key coefficients + + // load next 2 blocks from message + VLM (R2), T_0, T_1 + + // update message slice + SUB $32, R3 + MOVD $32(R2), R2 + + // unpack message blocks into 26-bit big-endian limbs + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // add 2¹²⁸ to each message block value + VLEIB $4, $1, M_4 + VLEIB $12, $1, M_4 + +multiply: + // accumulate the incoming message + VAG H_0, M_0, M_0 + VAG H_3, M_3, M_3 + VAG H_1, M_1, M_1 + VAG H_4, M_4, M_4 + VAG H_2, M_2, M_2 + + // multiply the accumulator by the key coefficient + MULTIPLY(M_0, M_1, M_2, M_3, M_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4) + + // carry and partially reduce the partial products + REDUCE(H_0, H_1, H_2, H_3, H_4) + + CMPBNE R3, $0, loop + +finish: + // sum lane 0 and lane 1 and put the result in lane 1 + VZERO T_0 + VSUMQG H_0, T_0, H_0 + VSUMQG H_3, T_0, H_3 + VSUMQG H_1, T_0, H_1 + VSUMQG H_4, T_0, H_4 + VSUMQG H_2, T_0, H_2 + + // reduce again after summation + // TODO(mundaym): there might be a more efficient way to do this + // now that we only have 1 active lane. For example, we could + // simultaneously pack the values as we reduce them. + REDUCE(H_0, H_1, H_2, H_3, H_4) + + // carry h[1] through to h[4] so that only h[4] can exceed 2²⁶ - 1 + // TODO(mundaym): in testing this final carry was unnecessary. + // Needs a proof before it can be removed though. + VESRLG $26, H_1, T_1 + VN MOD26, H_1, H_1 + VAQ T_1, H_2, H_2 + VESRLG $26, H_2, T_2 + VN MOD26, H_2, H_2 + VAQ T_2, H_3, H_3 + VESRLG $26, H_3, T_3 + VN MOD26, H_3, H_3 + VAQ T_3, H_4, H_4 + + // h is now < 2(2¹³⁰ - 5) + // Pack each lane in h₂₆[0:4] into h₁₂₈[0:1]. + VESLG $26, H_1, H_1 + VESLG $26, H_3, H_3 + VO H_0, H_1, H_0 + VO H_2, H_3, H_2 + VESLG $4, H_2, H_2 + VLEIB $7, $48, H_1 + VSLB H_1, H_2, H_2 + VO H_0, H_2, H_0 + VLEIB $7, $104, H_1 + VSLB H_1, H_4, H_3 + VO H_3, H_0, H_0 + VLEIB $7, $24, H_1 + VSRLB H_1, H_4, H_1 + + // update state + VSTEG $1, H_0, 0(R1) + VSTEG $0, H_0, 8(R1) + VSTEG $1, H_1, 16(R1) + RET + +b2: // 2 or fewer blocks remaining + CMPBLE R3, $16, b1 + + // Load the 2 remaining blocks (17-32 bytes remaining). + MOVD $-17(R3), R0 // index of final byte to load modulo 16 + VL (R2), T_0 // load full 16 byte block + VLL R0, 16(R2), T_1 // load final (possibly partial) block and pad with zeros to 16 bytes + + // The Poly1305 algorithm requires that a 1 bit be appended to + // each message block. If the final block is less than 16 bytes + // long then it is easiest to insert the 1 before the message + // block is split into 26-bit limbs. If, on the other hand, the + // final message block is 16 bytes long then we append the 1 bit + // after expansion as normal. + MOVBZ $1, R0 + MOVD $-16(R3), R3 // index of byte in last block to insert 1 at (could be 16) + CMPBEQ R3, $16, 2(PC) // skip the insertion if the final block is 16 bytes long + VLVGB R3, R0, T_1 // insert 1 into the byte at index R3 + + // Split both blocks into 26-bit limbs in the appropriate lanes. + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // Append a 1 byte to the end of the second to last block. + VLEIB $4, $1, M_4 + + // Append a 1 byte to the end of the last block only if it is a + // full 16 byte block. + CMPBNE R3, $16, 2(PC) + VLEIB $12, $1, M_4 + + // Finally, set up the coefficients for the final multiplication. + // We have previously saved r and 5r in the 32-bit even indexes + // of the R_[0-4] and R5_[1-4] coefficient registers. + // + // We want lane 0 to be multiplied by r² so that can be kept the + // same. We want lane 1 to be multiplied by r so we need to move + // the saved r value into the 32-bit odd index in lane 1 by + // rotating the 64-bit lane by 32. + VGBM $0x00ff, T_0 // [0, 0xffffffffffffffff] - mask lane 1 only + VERIMG $32, R_0, T_0, R_0 // [_, r²₂₆[0], _, r₂₆[0]] + VERIMG $32, R_1, T_0, R_1 // [_, r²₂₆[1], _, r₂₆[1]] + VERIMG $32, R_2, T_0, R_2 // [_, r²₂₆[2], _, r₂₆[2]] + VERIMG $32, R_3, T_0, R_3 // [_, r²₂₆[3], _, r₂₆[3]] + VERIMG $32, R_4, T_0, R_4 // [_, r²₂₆[4], _, r₂₆[4]] + VERIMG $32, R5_1, T_0, R5_1 // [_, 5r²₂₆[1], _, 5r₂₆[1]] + VERIMG $32, R5_2, T_0, R5_2 // [_, 5r²₂₆[2], _, 5r₂₆[2]] + VERIMG $32, R5_3, T_0, R5_3 // [_, 5r²₂₆[3], _, 5r₂₆[3]] + VERIMG $32, R5_4, T_0, R5_4 // [_, 5r²₂₆[4], _, 5r₂₆[4]] + + MOVD $0, R3 + BR multiply + +skip: + CMPBEQ R3, $0, finish + +b1: // 1 block remaining + + // Load the final block (1-16 bytes). This will be placed into + // lane 0. + MOVD $-1(R3), R0 + VLL R0, (R2), T_0 // pad to 16 bytes with zeros + + // The Poly1305 algorithm requires that a 1 bit be appended to + // each message block. If the final block is less than 16 bytes + // long then it is easiest to insert the 1 before the message + // block is split into 26-bit limbs. If, on the other hand, the + // final message block is 16 bytes long then we append the 1 bit + // after expansion as normal. + MOVBZ $1, R0 + CMPBEQ R3, $16, 2(PC) + VLVGB R3, R0, T_0 + + // Set the message block in lane 1 to the value 0 so that it + // can be accumulated without affecting the final result. + VZERO T_1 + + // Split the final message block into 26-bit limbs in lane 0. + // Lane 1 will be contain 0. + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // Append a 1 byte to the end of the last block only if it is a + // full 16 byte block. + CMPBNE R3, $16, 2(PC) + VLEIB $4, $1, M_4 + + // We have previously saved r and 5r in the 32-bit even indexes + // of the R_[0-4] and R5_[1-4] coefficient registers. + // + // We want lane 0 to be multiplied by r so we need to move the + // saved r value into the 32-bit odd index in lane 0. We want + // lane 1 to be set to the value 1. This makes multiplication + // a no-op. We do this by setting lane 1 in every register to 0 + // and then just setting the 32-bit index 3 in R_0 to 1. + VZERO T_0 + MOVD $0, R0 + MOVD $0x10111213, R12 + VLVGP R12, R0, T_1 // [_, 0x10111213, _, 0x00000000] + VPERM T_0, R_0, T_1, R_0 // [_, r₂₆[0], _, 0] + VPERM T_0, R_1, T_1, R_1 // [_, r₂₆[1], _, 0] + VPERM T_0, R_2, T_1, R_2 // [_, r₂₆[2], _, 0] + VPERM T_0, R_3, T_1, R_3 // [_, r₂₆[3], _, 0] + VPERM T_0, R_4, T_1, R_4 // [_, r₂₆[4], _, 0] + VPERM T_0, R5_1, T_1, R5_1 // [_, 5r₂₆[1], _, 0] + VPERM T_0, R5_2, T_1, R5_2 // [_, 5r₂₆[2], _, 0] + VPERM T_0, R5_3, T_1, R5_3 // [_, 5r₂₆[3], _, 0] + VPERM T_0, R5_4, T_1, R5_4 // [_, 5r₂₆[4], _, 0] + + // Set the value of lane 1 to be 1. + VLEIF $3, $1, R_0 // [_, r₂₆[0], _, 1] + + MOVD $0, R3 + BR multiply diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go new file mode 100644 index 000000000..4c96147c8 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go @@ -0,0 +1,144 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package salsa provides low-level access to functions in the Salsa family. +package salsa // import "golang.org/x/crypto/salsa20/salsa" + +// Sigma is the Salsa20 constant for 256-bit keys. +var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'} + +// HSalsa20 applies the HSalsa20 core function to a 16-byte input in, 32-byte +// key k, and 16-byte constant c, and puts the result into the 32-byte array +// out. +func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) { + x0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24 + x1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24 + x2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24 + x3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24 + x4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24 + x5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24 + x6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + x7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + x8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + x9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + x10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24 + x11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24 + x12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24 + x13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24 + x14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24 + x15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24 + + for i := 0; i < 20; i += 2 { + u := x0 + x12 + x4 ^= u<<7 | u>>(32-7) + u = x4 + x0 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x4 + x12 ^= u<<13 | u>>(32-13) + u = x12 + x8 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x1 + x9 ^= u<<7 | u>>(32-7) + u = x9 + x5 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x9 + x1 ^= u<<13 | u>>(32-13) + u = x1 + x13 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x6 + x14 ^= u<<7 | u>>(32-7) + u = x14 + x10 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x14 + x6 ^= u<<13 | u>>(32-13) + u = x6 + x2 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x11 + x3 ^= u<<7 | u>>(32-7) + u = x3 + x15 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x3 + x11 ^= u<<13 | u>>(32-13) + u = x11 + x7 + x15 ^= u<<18 | u>>(32-18) + + u = x0 + x3 + x1 ^= u<<7 | u>>(32-7) + u = x1 + x0 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x1 + x3 ^= u<<13 | u>>(32-13) + u = x3 + x2 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x4 + x6 ^= u<<7 | u>>(32-7) + u = x6 + x5 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x6 + x4 ^= u<<13 | u>>(32-13) + u = x4 + x7 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x9 + x11 ^= u<<7 | u>>(32-7) + u = x11 + x10 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x11 + x9 ^= u<<13 | u>>(32-13) + u = x9 + x8 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x14 + x12 ^= u<<7 | u>>(32-7) + u = x12 + x15 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x12 + x14 ^= u<<13 | u>>(32-13) + u = x14 + x13 + x15 ^= u<<18 | u>>(32-18) + } + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x5) + out[5] = byte(x5 >> 8) + out[6] = byte(x5 >> 16) + out[7] = byte(x5 >> 24) + + out[8] = byte(x10) + out[9] = byte(x10 >> 8) + out[10] = byte(x10 >> 16) + out[11] = byte(x10 >> 24) + + out[12] = byte(x15) + out[13] = byte(x15 >> 8) + out[14] = byte(x15 >> 16) + out[15] = byte(x15 >> 24) + + out[16] = byte(x6) + out[17] = byte(x6 >> 8) + out[18] = byte(x6 >> 16) + out[19] = byte(x6 >> 24) + + out[20] = byte(x7) + out[21] = byte(x7 >> 8) + out[22] = byte(x7 >> 16) + out[23] = byte(x7 >> 24) + + out[24] = byte(x8) + out[25] = byte(x8 >> 8) + out[26] = byte(x8 >> 16) + out[27] = byte(x8 >> 24) + + out[28] = byte(x9) + out[29] = byte(x9 >> 8) + out[30] = byte(x9 >> 16) + out[31] = byte(x9 >> 24) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go new file mode 100644 index 000000000..9bfc0927c --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go @@ -0,0 +1,199 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package salsa + +// Core208 applies the Salsa20/8 core function to the 64-byte array in and puts +// the result into the 64-byte array out. The input and output may be the same array. +func Core208(out *[64]byte, in *[64]byte) { + j0 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + j1 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + j2 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + j3 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + j4 := uint32(in[16]) | uint32(in[17])<<8 | uint32(in[18])<<16 | uint32(in[19])<<24 + j5 := uint32(in[20]) | uint32(in[21])<<8 | uint32(in[22])<<16 | uint32(in[23])<<24 + j6 := uint32(in[24]) | uint32(in[25])<<8 | uint32(in[26])<<16 | uint32(in[27])<<24 + j7 := uint32(in[28]) | uint32(in[29])<<8 | uint32(in[30])<<16 | uint32(in[31])<<24 + j8 := uint32(in[32]) | uint32(in[33])<<8 | uint32(in[34])<<16 | uint32(in[35])<<24 + j9 := uint32(in[36]) | uint32(in[37])<<8 | uint32(in[38])<<16 | uint32(in[39])<<24 + j10 := uint32(in[40]) | uint32(in[41])<<8 | uint32(in[42])<<16 | uint32(in[43])<<24 + j11 := uint32(in[44]) | uint32(in[45])<<8 | uint32(in[46])<<16 | uint32(in[47])<<24 + j12 := uint32(in[48]) | uint32(in[49])<<8 | uint32(in[50])<<16 | uint32(in[51])<<24 + j13 := uint32(in[52]) | uint32(in[53])<<8 | uint32(in[54])<<16 | uint32(in[55])<<24 + j14 := uint32(in[56]) | uint32(in[57])<<8 | uint32(in[58])<<16 | uint32(in[59])<<24 + j15 := uint32(in[60]) | uint32(in[61])<<8 | uint32(in[62])<<16 | uint32(in[63])<<24 + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8 + x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15 + + for i := 0; i < 8; i += 2 { + u := x0 + x12 + x4 ^= u<<7 | u>>(32-7) + u = x4 + x0 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x4 + x12 ^= u<<13 | u>>(32-13) + u = x12 + x8 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x1 + x9 ^= u<<7 | u>>(32-7) + u = x9 + x5 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x9 + x1 ^= u<<13 | u>>(32-13) + u = x1 + x13 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x6 + x14 ^= u<<7 | u>>(32-7) + u = x14 + x10 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x14 + x6 ^= u<<13 | u>>(32-13) + u = x6 + x2 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x11 + x3 ^= u<<7 | u>>(32-7) + u = x3 + x15 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x3 + x11 ^= u<<13 | u>>(32-13) + u = x11 + x7 + x15 ^= u<<18 | u>>(32-18) + + u = x0 + x3 + x1 ^= u<<7 | u>>(32-7) + u = x1 + x0 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x1 + x3 ^= u<<13 | u>>(32-13) + u = x3 + x2 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x4 + x6 ^= u<<7 | u>>(32-7) + u = x6 + x5 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x6 + x4 ^= u<<13 | u>>(32-13) + u = x4 + x7 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x9 + x11 ^= u<<7 | u>>(32-7) + u = x11 + x10 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x11 + x9 ^= u<<13 | u>>(32-13) + u = x9 + x8 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x14 + x12 ^= u<<7 | u>>(32-7) + u = x12 + x15 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x12 + x14 ^= u<<13 | u>>(32-13) + u = x14 + x13 + x15 ^= u<<18 | u>>(32-18) + } + x0 += j0 + x1 += j1 + x2 += j2 + x3 += j3 + x4 += j4 + x5 += j5 + x6 += j6 + x7 += j7 + x8 += j8 + x9 += j9 + x10 += j10 + x11 += j11 + x12 += j12 + x13 += j13 + x14 += j14 + x15 += j15 + + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x1) + out[5] = byte(x1 >> 8) + out[6] = byte(x1 >> 16) + out[7] = byte(x1 >> 24) + + out[8] = byte(x2) + out[9] = byte(x2 >> 8) + out[10] = byte(x2 >> 16) + out[11] = byte(x2 >> 24) + + out[12] = byte(x3) + out[13] = byte(x3 >> 8) + out[14] = byte(x3 >> 16) + out[15] = byte(x3 >> 24) + + out[16] = byte(x4) + out[17] = byte(x4 >> 8) + out[18] = byte(x4 >> 16) + out[19] = byte(x4 >> 24) + + out[20] = byte(x5) + out[21] = byte(x5 >> 8) + out[22] = byte(x5 >> 16) + out[23] = byte(x5 >> 24) + + out[24] = byte(x6) + out[25] = byte(x6 >> 8) + out[26] = byte(x6 >> 16) + out[27] = byte(x6 >> 24) + + out[28] = byte(x7) + out[29] = byte(x7 >> 8) + out[30] = byte(x7 >> 16) + out[31] = byte(x7 >> 24) + + out[32] = byte(x8) + out[33] = byte(x8 >> 8) + out[34] = byte(x8 >> 16) + out[35] = byte(x8 >> 24) + + out[36] = byte(x9) + out[37] = byte(x9 >> 8) + out[38] = byte(x9 >> 16) + out[39] = byte(x9 >> 24) + + out[40] = byte(x10) + out[41] = byte(x10 >> 8) + out[42] = byte(x10 >> 16) + out[43] = byte(x10 >> 24) + + out[44] = byte(x11) + out[45] = byte(x11 >> 8) + out[46] = byte(x11 >> 16) + out[47] = byte(x11 >> 24) + + out[48] = byte(x12) + out[49] = byte(x12 >> 8) + out[50] = byte(x12 >> 16) + out[51] = byte(x12 >> 24) + + out[52] = byte(x13) + out[53] = byte(x13 >> 8) + out[54] = byte(x13 >> 16) + out[55] = byte(x13 >> 24) + + out[56] = byte(x14) + out[57] = byte(x14 >> 8) + out[58] = byte(x14 >> 16) + out[59] = byte(x14 >> 24) + + out[60] = byte(x15) + out[61] = byte(x15 >> 8) + out[62] = byte(x15 >> 16) + out[63] = byte(x15 >> 24) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go new file mode 100644 index 000000000..656e8df94 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!appengine,!gccgo + +package salsa + +//go:noescape + +// salsa2020XORKeyStream is implemented in salsa20_amd64.s. +func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) + +// XORKeyStream crypts bytes from in to out using the given key and counters. +// In and out must overlap entirely or not at all. Counter +// contains the raw salsa20 counter bytes (both nonce and block counter). +func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + if len(in) == 0 { + return + } + _ = out[len(in)-1] + salsa2020XORKeyStream(&out[0], &in[0], uint64(len(in)), &counter[0], &key[0]) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s new file mode 100644 index 000000000..18085d2e8 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s @@ -0,0 +1,883 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!appengine,!gccgo + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) +// This needs up to 64 bytes at 360(SP); hence the non-obvious frame size. +TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment + MOVQ out+0(FP),DI + MOVQ in+8(FP),SI + MOVQ n+16(FP),DX + MOVQ nonce+24(FP),CX + MOVQ key+32(FP),R8 + + MOVQ SP,R12 + MOVQ SP,R9 + ADDQ $31, R9 + ANDQ $~31, R9 + MOVQ R9, SP + + MOVQ DX,R9 + MOVQ CX,DX + MOVQ R8,R10 + CMPQ R9,$0 + JBE DONE + START: + MOVL 20(R10),CX + MOVL 0(R10),R8 + MOVL 0(DX),AX + MOVL 16(R10),R11 + MOVL CX,0(SP) + MOVL R8, 4 (SP) + MOVL AX, 8 (SP) + MOVL R11, 12 (SP) + MOVL 8(DX),CX + MOVL 24(R10),R8 + MOVL 4(R10),AX + MOVL 4(DX),R11 + MOVL CX,16(SP) + MOVL R8, 20 (SP) + MOVL AX, 24 (SP) + MOVL R11, 28 (SP) + MOVL 12(DX),CX + MOVL 12(R10),DX + MOVL 28(R10),R8 + MOVL 8(R10),AX + MOVL DX,32(SP) + MOVL CX, 36 (SP) + MOVL R8, 40 (SP) + MOVL AX, 44 (SP) + MOVQ $1634760805,DX + MOVQ $857760878,CX + MOVQ $2036477234,R8 + MOVQ $1797285236,AX + MOVL DX,48(SP) + MOVL CX, 52 (SP) + MOVL R8, 56 (SP) + MOVL AX, 60 (SP) + CMPQ R9,$256 + JB BYTESBETWEEN1AND255 + MOVOA 48(SP),X0 + PSHUFL $0X55,X0,X1 + PSHUFL $0XAA,X0,X2 + PSHUFL $0XFF,X0,X3 + PSHUFL $0X00,X0,X0 + MOVOA X1,64(SP) + MOVOA X2,80(SP) + MOVOA X3,96(SP) + MOVOA X0,112(SP) + MOVOA 0(SP),X0 + PSHUFL $0XAA,X0,X1 + PSHUFL $0XFF,X0,X2 + PSHUFL $0X00,X0,X3 + PSHUFL $0X55,X0,X0 + MOVOA X1,128(SP) + MOVOA X2,144(SP) + MOVOA X3,160(SP) + MOVOA X0,176(SP) + MOVOA 16(SP),X0 + PSHUFL $0XFF,X0,X1 + PSHUFL $0X55,X0,X2 + PSHUFL $0XAA,X0,X0 + MOVOA X1,192(SP) + MOVOA X2,208(SP) + MOVOA X0,224(SP) + MOVOA 32(SP),X0 + PSHUFL $0X00,X0,X1 + PSHUFL $0XAA,X0,X2 + PSHUFL $0XFF,X0,X0 + MOVOA X1,240(SP) + MOVOA X2,256(SP) + MOVOA X0,272(SP) + BYTESATLEAST256: + MOVL 16(SP),DX + MOVL 36 (SP),CX + MOVL DX,288(SP) + MOVL CX,304(SP) + SHLQ $32,CX + ADDQ CX,DX + ADDQ $1,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX, 292 (SP) + MOVL CX, 308 (SP) + ADDQ $1,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX, 296 (SP) + MOVL CX, 312 (SP) + ADDQ $1,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX, 300 (SP) + MOVL CX, 316 (SP) + ADDQ $1,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX,16(SP) + MOVL CX, 36 (SP) + MOVQ R9,352(SP) + MOVQ $20,DX + MOVOA 64(SP),X0 + MOVOA 80(SP),X1 + MOVOA 96(SP),X2 + MOVOA 256(SP),X3 + MOVOA 272(SP),X4 + MOVOA 128(SP),X5 + MOVOA 144(SP),X6 + MOVOA 176(SP),X7 + MOVOA 192(SP),X8 + MOVOA 208(SP),X9 + MOVOA 224(SP),X10 + MOVOA 304(SP),X11 + MOVOA 112(SP),X12 + MOVOA 160(SP),X13 + MOVOA 240(SP),X14 + MOVOA 288(SP),X15 + MAINLOOP1: + MOVOA X1,320(SP) + MOVOA X2,336(SP) + MOVOA X13,X1 + PADDL X12,X1 + MOVOA X1,X2 + PSLLL $7,X1 + PXOR X1,X14 + PSRLL $25,X2 + PXOR X2,X14 + MOVOA X7,X1 + PADDL X0,X1 + MOVOA X1,X2 + PSLLL $7,X1 + PXOR X1,X11 + PSRLL $25,X2 + PXOR X2,X11 + MOVOA X12,X1 + PADDL X14,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X15 + PSRLL $23,X2 + PXOR X2,X15 + MOVOA X0,X1 + PADDL X11,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X9 + PSRLL $23,X2 + PXOR X2,X9 + MOVOA X14,X1 + PADDL X15,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X13 + PSRLL $19,X2 + PXOR X2,X13 + MOVOA X11,X1 + PADDL X9,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X7 + PSRLL $19,X2 + PXOR X2,X7 + MOVOA X15,X1 + PADDL X13,X1 + MOVOA X1,X2 + PSLLL $18,X1 + PXOR X1,X12 + PSRLL $14,X2 + PXOR X2,X12 + MOVOA 320(SP),X1 + MOVOA X12,320(SP) + MOVOA X9,X2 + PADDL X7,X2 + MOVOA X2,X12 + PSLLL $18,X2 + PXOR X2,X0 + PSRLL $14,X12 + PXOR X12,X0 + MOVOA X5,X2 + PADDL X1,X2 + MOVOA X2,X12 + PSLLL $7,X2 + PXOR X2,X3 + PSRLL $25,X12 + PXOR X12,X3 + MOVOA 336(SP),X2 + MOVOA X0,336(SP) + MOVOA X6,X0 + PADDL X2,X0 + MOVOA X0,X12 + PSLLL $7,X0 + PXOR X0,X4 + PSRLL $25,X12 + PXOR X12,X4 + MOVOA X1,X0 + PADDL X3,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X10 + PSRLL $23,X12 + PXOR X12,X10 + MOVOA X2,X0 + PADDL X4,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X8 + PSRLL $23,X12 + PXOR X12,X8 + MOVOA X3,X0 + PADDL X10,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X5 + PSRLL $19,X12 + PXOR X12,X5 + MOVOA X4,X0 + PADDL X8,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X6 + PSRLL $19,X12 + PXOR X12,X6 + MOVOA X10,X0 + PADDL X5,X0 + MOVOA X0,X12 + PSLLL $18,X0 + PXOR X0,X1 + PSRLL $14,X12 + PXOR X12,X1 + MOVOA 320(SP),X0 + MOVOA X1,320(SP) + MOVOA X4,X1 + PADDL X0,X1 + MOVOA X1,X12 + PSLLL $7,X1 + PXOR X1,X7 + PSRLL $25,X12 + PXOR X12,X7 + MOVOA X8,X1 + PADDL X6,X1 + MOVOA X1,X12 + PSLLL $18,X1 + PXOR X1,X2 + PSRLL $14,X12 + PXOR X12,X2 + MOVOA 336(SP),X12 + MOVOA X2,336(SP) + MOVOA X14,X1 + PADDL X12,X1 + MOVOA X1,X2 + PSLLL $7,X1 + PXOR X1,X5 + PSRLL $25,X2 + PXOR X2,X5 + MOVOA X0,X1 + PADDL X7,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X10 + PSRLL $23,X2 + PXOR X2,X10 + MOVOA X12,X1 + PADDL X5,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X8 + PSRLL $23,X2 + PXOR X2,X8 + MOVOA X7,X1 + PADDL X10,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X4 + PSRLL $19,X2 + PXOR X2,X4 + MOVOA X5,X1 + PADDL X8,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X14 + PSRLL $19,X2 + PXOR X2,X14 + MOVOA X10,X1 + PADDL X4,X1 + MOVOA X1,X2 + PSLLL $18,X1 + PXOR X1,X0 + PSRLL $14,X2 + PXOR X2,X0 + MOVOA 320(SP),X1 + MOVOA X0,320(SP) + MOVOA X8,X0 + PADDL X14,X0 + MOVOA X0,X2 + PSLLL $18,X0 + PXOR X0,X12 + PSRLL $14,X2 + PXOR X2,X12 + MOVOA X11,X0 + PADDL X1,X0 + MOVOA X0,X2 + PSLLL $7,X0 + PXOR X0,X6 + PSRLL $25,X2 + PXOR X2,X6 + MOVOA 336(SP),X2 + MOVOA X12,336(SP) + MOVOA X3,X0 + PADDL X2,X0 + MOVOA X0,X12 + PSLLL $7,X0 + PXOR X0,X13 + PSRLL $25,X12 + PXOR X12,X13 + MOVOA X1,X0 + PADDL X6,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X15 + PSRLL $23,X12 + PXOR X12,X15 + MOVOA X2,X0 + PADDL X13,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X9 + PSRLL $23,X12 + PXOR X12,X9 + MOVOA X6,X0 + PADDL X15,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X11 + PSRLL $19,X12 + PXOR X12,X11 + MOVOA X13,X0 + PADDL X9,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X3 + PSRLL $19,X12 + PXOR X12,X3 + MOVOA X15,X0 + PADDL X11,X0 + MOVOA X0,X12 + PSLLL $18,X0 + PXOR X0,X1 + PSRLL $14,X12 + PXOR X12,X1 + MOVOA X9,X0 + PADDL X3,X0 + MOVOA X0,X12 + PSLLL $18,X0 + PXOR X0,X2 + PSRLL $14,X12 + PXOR X12,X2 + MOVOA 320(SP),X12 + MOVOA 336(SP),X0 + SUBQ $2,DX + JA MAINLOOP1 + PADDL 112(SP),X12 + PADDL 176(SP),X7 + PADDL 224(SP),X10 + PADDL 272(SP),X4 + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + PSHUFL $0X39,X12,X12 + PSHUFL $0X39,X7,X7 + PSHUFL $0X39,X10,X10 + PSHUFL $0X39,X4,X4 + XORL 0(SI),DX + XORL 4(SI),CX + XORL 8(SI),R8 + XORL 12(SI),R9 + MOVL DX,0(DI) + MOVL CX,4(DI) + MOVL R8,8(DI) + MOVL R9,12(DI) + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + PSHUFL $0X39,X12,X12 + PSHUFL $0X39,X7,X7 + PSHUFL $0X39,X10,X10 + PSHUFL $0X39,X4,X4 + XORL 64(SI),DX + XORL 68(SI),CX + XORL 72(SI),R8 + XORL 76(SI),R9 + MOVL DX,64(DI) + MOVL CX,68(DI) + MOVL R8,72(DI) + MOVL R9,76(DI) + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + PSHUFL $0X39,X12,X12 + PSHUFL $0X39,X7,X7 + PSHUFL $0X39,X10,X10 + PSHUFL $0X39,X4,X4 + XORL 128(SI),DX + XORL 132(SI),CX + XORL 136(SI),R8 + XORL 140(SI),R9 + MOVL DX,128(DI) + MOVL CX,132(DI) + MOVL R8,136(DI) + MOVL R9,140(DI) + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + XORL 192(SI),DX + XORL 196(SI),CX + XORL 200(SI),R8 + XORL 204(SI),R9 + MOVL DX,192(DI) + MOVL CX,196(DI) + MOVL R8,200(DI) + MOVL R9,204(DI) + PADDL 240(SP),X14 + PADDL 64(SP),X0 + PADDL 128(SP),X5 + PADDL 192(SP),X8 + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + PSHUFL $0X39,X14,X14 + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X5,X5 + PSHUFL $0X39,X8,X8 + XORL 16(SI),DX + XORL 20(SI),CX + XORL 24(SI),R8 + XORL 28(SI),R9 + MOVL DX,16(DI) + MOVL CX,20(DI) + MOVL R8,24(DI) + MOVL R9,28(DI) + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + PSHUFL $0X39,X14,X14 + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X5,X5 + PSHUFL $0X39,X8,X8 + XORL 80(SI),DX + XORL 84(SI),CX + XORL 88(SI),R8 + XORL 92(SI),R9 + MOVL DX,80(DI) + MOVL CX,84(DI) + MOVL R8,88(DI) + MOVL R9,92(DI) + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + PSHUFL $0X39,X14,X14 + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X5,X5 + PSHUFL $0X39,X8,X8 + XORL 144(SI),DX + XORL 148(SI),CX + XORL 152(SI),R8 + XORL 156(SI),R9 + MOVL DX,144(DI) + MOVL CX,148(DI) + MOVL R8,152(DI) + MOVL R9,156(DI) + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + XORL 208(SI),DX + XORL 212(SI),CX + XORL 216(SI),R8 + XORL 220(SI),R9 + MOVL DX,208(DI) + MOVL CX,212(DI) + MOVL R8,216(DI) + MOVL R9,220(DI) + PADDL 288(SP),X15 + PADDL 304(SP),X11 + PADDL 80(SP),X1 + PADDL 144(SP),X6 + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + PSHUFL $0X39,X15,X15 + PSHUFL $0X39,X11,X11 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X6,X6 + XORL 32(SI),DX + XORL 36(SI),CX + XORL 40(SI),R8 + XORL 44(SI),R9 + MOVL DX,32(DI) + MOVL CX,36(DI) + MOVL R8,40(DI) + MOVL R9,44(DI) + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + PSHUFL $0X39,X15,X15 + PSHUFL $0X39,X11,X11 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X6,X6 + XORL 96(SI),DX + XORL 100(SI),CX + XORL 104(SI),R8 + XORL 108(SI),R9 + MOVL DX,96(DI) + MOVL CX,100(DI) + MOVL R8,104(DI) + MOVL R9,108(DI) + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + PSHUFL $0X39,X15,X15 + PSHUFL $0X39,X11,X11 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X6,X6 + XORL 160(SI),DX + XORL 164(SI),CX + XORL 168(SI),R8 + XORL 172(SI),R9 + MOVL DX,160(DI) + MOVL CX,164(DI) + MOVL R8,168(DI) + MOVL R9,172(DI) + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + XORL 224(SI),DX + XORL 228(SI),CX + XORL 232(SI),R8 + XORL 236(SI),R9 + MOVL DX,224(DI) + MOVL CX,228(DI) + MOVL R8,232(DI) + MOVL R9,236(DI) + PADDL 160(SP),X13 + PADDL 208(SP),X9 + PADDL 256(SP),X3 + PADDL 96(SP),X2 + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + PSHUFL $0X39,X13,X13 + PSHUFL $0X39,X9,X9 + PSHUFL $0X39,X3,X3 + PSHUFL $0X39,X2,X2 + XORL 48(SI),DX + XORL 52(SI),CX + XORL 56(SI),R8 + XORL 60(SI),R9 + MOVL DX,48(DI) + MOVL CX,52(DI) + MOVL R8,56(DI) + MOVL R9,60(DI) + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + PSHUFL $0X39,X13,X13 + PSHUFL $0X39,X9,X9 + PSHUFL $0X39,X3,X3 + PSHUFL $0X39,X2,X2 + XORL 112(SI),DX + XORL 116(SI),CX + XORL 120(SI),R8 + XORL 124(SI),R9 + MOVL DX,112(DI) + MOVL CX,116(DI) + MOVL R8,120(DI) + MOVL R9,124(DI) + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + PSHUFL $0X39,X13,X13 + PSHUFL $0X39,X9,X9 + PSHUFL $0X39,X3,X3 + PSHUFL $0X39,X2,X2 + XORL 176(SI),DX + XORL 180(SI),CX + XORL 184(SI),R8 + XORL 188(SI),R9 + MOVL DX,176(DI) + MOVL CX,180(DI) + MOVL R8,184(DI) + MOVL R9,188(DI) + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + XORL 240(SI),DX + XORL 244(SI),CX + XORL 248(SI),R8 + XORL 252(SI),R9 + MOVL DX,240(DI) + MOVL CX,244(DI) + MOVL R8,248(DI) + MOVL R9,252(DI) + MOVQ 352(SP),R9 + SUBQ $256,R9 + ADDQ $256,SI + ADDQ $256,DI + CMPQ R9,$256 + JAE BYTESATLEAST256 + CMPQ R9,$0 + JBE DONE + BYTESBETWEEN1AND255: + CMPQ R9,$64 + JAE NOCOPY + MOVQ DI,DX + LEAQ 360(SP),DI + MOVQ R9,CX + REP; MOVSB + LEAQ 360(SP),DI + LEAQ 360(SP),SI + NOCOPY: + MOVQ R9,352(SP) + MOVOA 48(SP),X0 + MOVOA 0(SP),X1 + MOVOA 16(SP),X2 + MOVOA 32(SP),X3 + MOVOA X1,X4 + MOVQ $20,CX + MAINLOOP2: + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X3 + PXOR X6,X3 + PADDL X3,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X3,X3 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X1 + PSHUFL $0X4E,X2,X2 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X1,X1 + PXOR X6,X0 + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X1 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X1,X1 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X3 + PSHUFL $0X4E,X2,X2 + PXOR X6,X3 + PADDL X3,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X3,X3 + PXOR X6,X0 + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X3 + PXOR X6,X3 + PADDL X3,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X3,X3 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X1 + PSHUFL $0X4E,X2,X2 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X1,X1 + PXOR X6,X0 + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X1 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X1,X1 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X3 + PSHUFL $0X4E,X2,X2 + PXOR X6,X3 + SUBQ $4,CX + PADDL X3,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PXOR X7,X7 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X3,X3 + PXOR X6,X0 + JA MAINLOOP2 + PADDL 48(SP),X0 + PADDL 0(SP),X1 + PADDL 16(SP),X2 + PADDL 32(SP),X3 + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X2,X2 + PSHUFL $0X39,X3,X3 + XORL 0(SI),CX + XORL 48(SI),R8 + XORL 32(SI),R9 + XORL 16(SI),AX + MOVL CX,0(DI) + MOVL R8,48(DI) + MOVL R9,32(DI) + MOVL AX,16(DI) + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X2,X2 + PSHUFL $0X39,X3,X3 + XORL 20(SI),CX + XORL 4(SI),R8 + XORL 52(SI),R9 + XORL 36(SI),AX + MOVL CX,20(DI) + MOVL R8,4(DI) + MOVL R9,52(DI) + MOVL AX,36(DI) + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X2,X2 + PSHUFL $0X39,X3,X3 + XORL 40(SI),CX + XORL 24(SI),R8 + XORL 8(SI),R9 + XORL 56(SI),AX + MOVL CX,40(DI) + MOVL R8,24(DI) + MOVL R9,8(DI) + MOVL AX,56(DI) + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + XORL 60(SI),CX + XORL 44(SI),R8 + XORL 28(SI),R9 + XORL 12(SI),AX + MOVL CX,60(DI) + MOVL R8,44(DI) + MOVL R9,28(DI) + MOVL AX,12(DI) + MOVQ 352(SP),R9 + MOVL 16(SP),CX + MOVL 36 (SP),R8 + ADDQ $1,CX + SHLQ $32,R8 + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $32,R8 + MOVL CX,16(SP) + MOVL R8, 36 (SP) + CMPQ R9,$64 + JA BYTESATLEAST65 + JAE BYTESATLEAST64 + MOVQ DI,SI + MOVQ DX,DI + MOVQ R9,CX + REP; MOVSB + BYTESATLEAST64: + DONE: + MOVQ R12,SP + RET + BYTESATLEAST65: + SUBQ $64,R9 + ADDQ $64,DI + ADDQ $64,SI + JMP BYTESBETWEEN1AND255 diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go new file mode 100644 index 000000000..8a46bd2b3 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go @@ -0,0 +1,14 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine gccgo + +package salsa + +// XORKeyStream crypts bytes from in to out using the given key and counters. +// In and out must overlap entirely or not at all. Counter +// contains the raw salsa20 counter bytes (both nonce and block counter). +func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + genericXORKeyStream(out, in, counter, key) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go new file mode 100644 index 000000000..68169c6d6 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go @@ -0,0 +1,231 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package salsa + +const rounds = 20 + +// core applies the Salsa20 core function to 16-byte input in, 32-byte key k, +// and 16-byte constant c, and puts the result into 64-byte array out. +func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) { + j0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24 + j1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24 + j2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24 + j3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24 + j4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24 + j5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24 + j6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + j7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + j8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + j9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + j10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24 + j11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24 + j12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24 + j13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24 + j14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24 + j15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24 + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8 + x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15 + + for i := 0; i < rounds; i += 2 { + u := x0 + x12 + x4 ^= u<<7 | u>>(32-7) + u = x4 + x0 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x4 + x12 ^= u<<13 | u>>(32-13) + u = x12 + x8 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x1 + x9 ^= u<<7 | u>>(32-7) + u = x9 + x5 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x9 + x1 ^= u<<13 | u>>(32-13) + u = x1 + x13 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x6 + x14 ^= u<<7 | u>>(32-7) + u = x14 + x10 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x14 + x6 ^= u<<13 | u>>(32-13) + u = x6 + x2 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x11 + x3 ^= u<<7 | u>>(32-7) + u = x3 + x15 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x3 + x11 ^= u<<13 | u>>(32-13) + u = x11 + x7 + x15 ^= u<<18 | u>>(32-18) + + u = x0 + x3 + x1 ^= u<<7 | u>>(32-7) + u = x1 + x0 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x1 + x3 ^= u<<13 | u>>(32-13) + u = x3 + x2 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x4 + x6 ^= u<<7 | u>>(32-7) + u = x6 + x5 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x6 + x4 ^= u<<13 | u>>(32-13) + u = x4 + x7 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x9 + x11 ^= u<<7 | u>>(32-7) + u = x11 + x10 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x11 + x9 ^= u<<13 | u>>(32-13) + u = x9 + x8 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x14 + x12 ^= u<<7 | u>>(32-7) + u = x12 + x15 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x12 + x14 ^= u<<13 | u>>(32-13) + u = x14 + x13 + x15 ^= u<<18 | u>>(32-18) + } + x0 += j0 + x1 += j1 + x2 += j2 + x3 += j3 + x4 += j4 + x5 += j5 + x6 += j6 + x7 += j7 + x8 += j8 + x9 += j9 + x10 += j10 + x11 += j11 + x12 += j12 + x13 += j13 + x14 += j14 + x15 += j15 + + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x1) + out[5] = byte(x1 >> 8) + out[6] = byte(x1 >> 16) + out[7] = byte(x1 >> 24) + + out[8] = byte(x2) + out[9] = byte(x2 >> 8) + out[10] = byte(x2 >> 16) + out[11] = byte(x2 >> 24) + + out[12] = byte(x3) + out[13] = byte(x3 >> 8) + out[14] = byte(x3 >> 16) + out[15] = byte(x3 >> 24) + + out[16] = byte(x4) + out[17] = byte(x4 >> 8) + out[18] = byte(x4 >> 16) + out[19] = byte(x4 >> 24) + + out[20] = byte(x5) + out[21] = byte(x5 >> 8) + out[22] = byte(x5 >> 16) + out[23] = byte(x5 >> 24) + + out[24] = byte(x6) + out[25] = byte(x6 >> 8) + out[26] = byte(x6 >> 16) + out[27] = byte(x6 >> 24) + + out[28] = byte(x7) + out[29] = byte(x7 >> 8) + out[30] = byte(x7 >> 16) + out[31] = byte(x7 >> 24) + + out[32] = byte(x8) + out[33] = byte(x8 >> 8) + out[34] = byte(x8 >> 16) + out[35] = byte(x8 >> 24) + + out[36] = byte(x9) + out[37] = byte(x9 >> 8) + out[38] = byte(x9 >> 16) + out[39] = byte(x9 >> 24) + + out[40] = byte(x10) + out[41] = byte(x10 >> 8) + out[42] = byte(x10 >> 16) + out[43] = byte(x10 >> 24) + + out[44] = byte(x11) + out[45] = byte(x11 >> 8) + out[46] = byte(x11 >> 16) + out[47] = byte(x11 >> 24) + + out[48] = byte(x12) + out[49] = byte(x12 >> 8) + out[50] = byte(x12 >> 16) + out[51] = byte(x12 >> 24) + + out[52] = byte(x13) + out[53] = byte(x13 >> 8) + out[54] = byte(x13 >> 16) + out[55] = byte(x13 >> 24) + + out[56] = byte(x14) + out[57] = byte(x14 >> 8) + out[58] = byte(x14 >> 16) + out[59] = byte(x14 >> 24) + + out[60] = byte(x15) + out[61] = byte(x15 >> 8) + out[62] = byte(x15 >> 16) + out[63] = byte(x15 >> 24) +} + +// genericXORKeyStream is the generic implementation of XORKeyStream to be used +// when no assembly implementation is available. +func genericXORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + var block [64]byte + var counterCopy [16]byte + copy(counterCopy[:], counter[:]) + + for len(in) >= 64 { + core(&block, &counterCopy, key, &Sigma) + for i, x := range block { + out[i] = in[i] ^ x + } + u := uint32(1) + for i := 8; i < 16; i++ { + u += uint32(counterCopy[i]) + counterCopy[i] = byte(u) + u >>= 8 + } + in = in[64:] + out = out[64:] + } + + if len(in) > 0 { + core(&block, &counterCopy, key, &Sigma) + for i, v := range in { + out[i] = v ^ block[i] + } + } +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 345b7cd85..2aa859f76 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -1694,6 +1694,7 @@ func (sc *serverConn) processData(f *DataFrame) error { if len(data) > 0 { wrote, err := st.body.Write(data) if err != nil { + sc.sendWindowUpdate(nil, int(f.Length)-wrote) return streamError(id, ErrCodeStreamClosed) } if wrote != len(data) { @@ -2020,7 +2021,11 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res } if bodyOpen { if vv, ok := rp.header["Content-Length"]; ok { - req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) + if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil { + req.ContentLength = int64(cl) + } else { + req.ContentLength = 0 + } } else { req.ContentLength = -1 } @@ -2403,9 +2408,8 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { var ctype, clen string if clen = rws.snapHeader.Get("Content-Length"); clen != "" { rws.snapHeader.Del("Content-Length") - clen64, err := strconv.ParseInt(clen, 10, 64) - if err == nil && clen64 >= 0 { - rws.sentContentLen = clen64 + if cl, err := strconv.ParseUint(clen, 10, 63); err == nil { + rws.sentContentLen = int64(cl) } else { clen = "" } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 76a92e0ca..8b129b794 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -154,12 +154,21 @@ func (t *Transport) pingTimeout() time.Duration { // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. // It returns an error if t1 has already been HTTP/2-enabled. +// +// Use ConfigureTransports instead to configure the HTTP/2 Transport. func ConfigureTransport(t1 *http.Transport) error { - _, err := configureTransport(t1) + _, err := ConfigureTransports(t1) return err } -func configureTransport(t1 *http.Transport) (*Transport, error) { +// ConfigureTransports configures a net/http HTTP/1 Transport to use HTTP/2. +// It returns a new HTTP/2 Transport for further configuration. +// It returns an error if t1 has already been HTTP/2-enabled. +func ConfigureTransports(t1 *http.Transport) (*Transport, error) { + return configureTransports(t1) +} + +func configureTransports(t1 *http.Transport) (*Transport, error) { connPool := new(clientConnPool) t2 := &Transport{ ConnPool: noDialClientConnPool{connPool}, @@ -689,6 +698,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro cc.inflow.add(transportDefaultConnFlow + initialWindowSize) cc.bw.Flush() if cc.werr != nil { + cc.Close() return nil, cc.werr } @@ -1080,6 +1090,15 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf bodyWriter := cc.t.getBodyWriterState(cs, body) cs.on100 = bodyWriter.on100 + defer func() { + cc.wmu.Lock() + werr := cc.werr + cc.wmu.Unlock() + if werr != nil { + cc.Close() + } + }() + cc.wmu.Lock() endStream := !hasBody && !hasTrailers werr := cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) @@ -1129,6 +1148,9 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf // we can keep it. bodyWriter.cancel() cs.abortRequestBodyWrite(errStopReqBodyWrite) + if hasBody && !bodyWritten { + <-bodyWriter.resc + } } if re.err != nil { cc.forgetStreamID(cs.ID) @@ -1149,6 +1171,7 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf } else { bodyWriter.cancel() cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + <-bodyWriter.resc } cc.forgetStreamID(cs.ID) return nil, cs.getStartedWrite(), errTimeout @@ -1158,6 +1181,7 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf } else { bodyWriter.cancel() cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + <-bodyWriter.resc } cc.forgetStreamID(cs.ID) return nil, cs.getStartedWrite(), ctx.Err() @@ -1167,6 +1191,7 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf } else { bodyWriter.cancel() cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + <-bodyWriter.resc } cc.forgetStreamID(cs.ID) return nil, cs.getStartedWrite(), errRequestCanceled @@ -1176,6 +1201,7 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf // forgetStreamID. return nil, cs.getStartedWrite(), cs.resetErr case err := <-bodyWriter.resc: + bodyWritten = true // Prefer the read loop's response, if available. Issue 16102. select { case re := <-readLoopResCh: @@ -1186,7 +1212,6 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf cc.forgetStreamID(cs.ID) return nil, cs.getStartedWrite(), err } - bodyWritten = true if d := cc.responseHeaderTimeout(); d != 0 { timer := time.NewTimer(d) defer timer.Stop() @@ -2006,8 +2031,8 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra if !streamEnded || isHead { res.ContentLength = -1 if clens := res.Header["Content-Length"]; len(clens) == 1 { - if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { - res.ContentLength = clen64 + if cl, err := strconv.ParseUint(clens[0], 10, 63); err == nil { + res.ContentLength = int64(cl) } else { // TODO: care? unlike http/1, it won't mess up our framing, so it's // more safe smuggling-wise to ignore. @@ -2525,6 +2550,7 @@ func strSliceContains(ss []string, s string) bool { type erringRoundTripper struct{ err error } +func (rt erringRoundTripper) RoundTripErr() error { return rt.err } func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } // gzipReader wraps a response body so it can lazily diff --git a/vendor/golang.org/x/net/idna/tables12.00.go b/vendor/golang.org/x/net/idna/tables12.0.0.go similarity index 99% rename from vendor/golang.org/x/net/idna/tables12.00.go rename to vendor/golang.org/x/net/idna/tables12.0.0.go index f4b8ea363..f39f0cb4c 100644 --- a/vendor/golang.org/x/net/idna/tables12.00.go +++ b/vendor/golang.org/x/net/idna/tables12.0.0.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.14 +// +build go1.14,!go1.16 package idna diff --git a/vendor/golang.org/x/net/idna/tables13.0.0.go b/vendor/golang.org/x/net/idna/tables13.0.0.go new file mode 100644 index 000000000..e8c7a36d7 --- /dev/null +++ b/vendor/golang.org/x/net/idna/tables13.0.0.go @@ -0,0 +1,4839 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.16 + +package idna + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "13.0.0" + +var mappings string = "" + // Size: 8188 bytes + "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + + "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + + "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + + "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + + "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + + "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + + "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + + "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + + "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + + "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + + "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + + "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + + "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + + "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + + "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + + "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + + "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + + "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + + "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + + "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + + "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + + "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + + "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + + "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + + "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + + "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + + ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + + "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + + "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + + "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + + "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + + "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + + "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + + "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + + "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + + "月\x0511月\x0512月\x02hg\x02ev\x06令和\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニ" + + "ング\x09インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー" + + "\x09ガロン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0f" + + "キロワット\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル" + + "\x0fサンチーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット" + + "\x09ハイツ\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0c" + + "フィート\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ" + + "\x0cポイント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク" + + "\x0fマンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09" + + "ユアン\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x04" + + "2点\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + + "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + + "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + + "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + + "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + + "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + + "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + + "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + + "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + + "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + + "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + + "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x02ʍ\x04𤋮\x04𢡊\x04𢡄\x04𣏕" + + "\x04𥉉\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ" + + "\x04יִ\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּ" + + "ׂ\x04אַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04" + + "ךּ\x04כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ" + + "\x04תּ\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ" + + "\x02ڤ\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ" + + "\x02ڳ\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ" + + "\x02ۅ\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02" + + "ی\x04ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04" + + "تح\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج" + + "\x04حم\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح" + + "\x04ضخ\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ" + + "\x04فم\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل" + + "\x04كم\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ" + + "\x04مم\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى" + + "\x04هي\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 " + + "ٍّ\x05 َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04ت" + + "ر\x04تز\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04" + + "ين\x04ئخ\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه" + + "\x04شم\x04شه\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي" + + "\x04سى\x04سي\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي" + + "\x04ضى\x04ضي\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06ت" + + "حج\x06تحم\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سج" + + "ح\x06سجى\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم" + + "\x06ضحى\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي" + + "\x06غمى\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح" + + "\x06محج\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم" + + "\x06نحم\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى" + + "\x06تخي\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي" + + "\x06ضحي\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي" + + "\x06كمي\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي" + + "\x06سخي\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08" + + "عليه\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:" + + "\x01!\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\" + + "\x01$\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ" + + "\x02إ\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز" + + "\x02س\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن" + + "\x02ه\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~" + + "\x02¢\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲" + + "\x08𝆹𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η" + + "\x02κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ" + + "\x02ڡ\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029," + + "\x03(a)\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)" + + "\x03(k)\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)" + + "\x03(u)\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03p" + + "pv\x02wc\x02mc\x02md\x02mr\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ" + + "\x03二\x03多\x03解\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終" + + "\x03生\x03販\x03声\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指" + + "\x03走\x03打\x03禁\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔" + + "三〕\x09〔二〕\x09〔安〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03" + + "丸\x03乁\x03你\x03侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03" + + "具\x03㒹\x03內\x03冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03" + + "㔕\x03勇\x03勉\x03勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03" + + "灰\x03及\x03叟\x03叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03" + + "啣\x03善\x03喙\x03喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03" + + "埴\x03堍\x03型\x03堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03" + + "姘\x03婦\x03㛮\x03嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03" + + "屮\x03峀\x03岍\x03嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03" + + "㡢\x03㡼\x03庰\x03庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03" + + "忍\x03志\x03忹\x03悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03" + + "憤\x03憯\x03懞\x03懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03" + + "掃\x03揤\x03搢\x03揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03" + + "書\x03晉\x03㬙\x03暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03" + + "朡\x03杞\x03杓\x03㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03" + + "槪\x03檨\x03櫛\x03㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03" + + "汧\x03洖\x03派\x03海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03" + + "淹\x03潮\x03濆\x03瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03" + + "爵\x03牐\x03犀\x03犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03" + + "㼛\x03甤\x03甾\x03異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03" + + "䂖\x03硎\x03碌\x03磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03" + + "築\x03䈧\x03糒\x03䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03" + + "罺\x03羕\x03翺\x03者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03" + + "䑫\x03芑\x03芋\x03芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03" + + "莽\x03菧\x03著\x03荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03" + + "䕫\x03虐\x03虜\x03虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03" + + "蠁\x03䗹\x03衠\x03衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03" + + "豕\x03貫\x03賁\x03贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03" + + "鈸\x03鋗\x03鋘\x03鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03" + + "䩶\x03韠\x03䪲\x03頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03" + + "鳽\x03䳎\x03䳭\x03鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" + +var xorData string = "" + // Size: 4862 bytes + "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + + "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + + "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + + "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + + "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + + "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + + "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + + "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + + "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + + "\x03\x037 \x03\x0b+\x03\x021\x00\x02\x01\x04\x02\x01\x02\x02\x019\x02" + + "\x03\x1c\x02\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03" + + "\xc1r\x02\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<" + + "\x03\xc1s*\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03" + + "\x83\xab\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96" + + "\xe1\xcd\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03" + + "\x9a\xec\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c" + + "!\x03\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03" + + "ʦ\x93\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7" + + "\x03\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca" + + "\xfa\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e" + + "\x03\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca" + + "\xe3\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99" + + "\x03\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca" + + "\xe8\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03" + + "\x0b\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06" + + "\x05\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03" + + "\x0786\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/" + + "\x03\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f" + + "\x03\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-" + + "\x03\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03" + + "\x07\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03" + + "\x07\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03" + + "\x07\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b" + + "\x0a\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03" + + "\x07\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+" + + "\x03\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03" + + "\x044\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03" + + "\x04+ \x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!" + + "\x22\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04" + + "\x03\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>" + + "\x03\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03" + + "\x054\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03" + + "\x05):\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$" + + "\x1e\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226" + + "\x03\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05" + + "\x1b\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05" + + "\x03\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03" + + "\x06\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08" + + "\x03\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03" + + "\x0a6\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a" + + "\x1f\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03" + + "\x0a\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f" + + "\x02\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/" + + "\x03\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a" + + "\x00\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+" + + "\x10\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#" + + "<\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!" + + "\x00\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18." + + "\x03\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15" + + "\x22\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b" + + "\x12\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05" + + "<\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + + "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + + "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + + "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + + "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + + "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + + "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + + "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + + "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + + "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + + "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + + "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + + "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + + "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + + "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + + "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + + "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + + "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + + "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + + "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + + "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + + "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + + "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + + "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + + "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + + "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + + "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + + "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + + "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + + "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + + "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + + "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + + ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + + "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + + "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + + "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + + "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + + "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + + "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + + "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + + "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + + "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + + "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + + "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + + "(\x04\x023 \x03\x0b)\x08\x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!" + + "\x10\x03\x0b!0\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b" + + "\x03\x09\x1f\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14" + + "\x03\x0a\x01\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03" + + "\x08='\x03\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07" + + "\x01\x00\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03" + + "\x09\x11\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03" + + "\x0a/1\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03" + + "\x07<3\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06" + + "\x13\x00\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(" + + ";\x03\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08" + + "\x14$\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03" + + "\x0a\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19" + + "\x01\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18" + + "\x03\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03" + + "\x07\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03" + + "\x0a\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03" + + "\x0b\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03" + + "\x08\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05" + + "\x03\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11" + + "\x03\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03" + + "\x09\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a" + + ".\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + + "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + + "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + + "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + + "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + + "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + + "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + + "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + + "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + + "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + + "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + + "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + + "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + + "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + + "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + + "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + + "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + + "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + + "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + + "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + + "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + + "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + + "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + + "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + + "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + + "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + + "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + + "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + + "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + + "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + + "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + + "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + + "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + + "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + + "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + + "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + + "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + + "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + + "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + + "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + + "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + + "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + + "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + + "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + + "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + + "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + + "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + + "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + + "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + + "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + + "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + + "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + + "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + + "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + + "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + + "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + + "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + + "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + + "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + + "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + + "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + + "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + + "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + + "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + + "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + + "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + + "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + + "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + + "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + + "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + + "\x04\x03\x0c?\x05\x03\x0c" + + "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + + "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + + "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + + "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + + "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + + "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + + "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + + "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + + "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + + "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + + "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + + "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + + "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + + "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + + "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + + "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + + "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + + "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + + "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + + "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + + "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + + "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + + "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + + "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + + "\x05\x22\x05\x03\x050\x1d" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// idnaTrie. Total size: 30288 bytes (29.58 KiB). Checksum: c0cd84404a2f6f19. +type idnaTrie struct{} + +func newIdnaTrie(i int) *idnaTrie { + return &idnaTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 126: + return uint16(idnaValues[n<<6+uint32(b)]) + default: + n -= 126 + return uint16(idnaSparse.lookup(n, b)) + } +} + +// idnaValues: 128 blocks, 8192 entries, 16384 bytes +// The third block is the zero block. +var idnaValues = [8192]uint16{ + // Block 0x0, offset 0x0 + 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, + 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, + 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, + 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, + 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, + 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, + 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, + 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, + 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, + 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, + 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, + // Block 0x1, offset 0x40 + 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, + 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, + 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, + 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, + 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, + 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, + 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, + 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, + 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, + 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, + 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, + 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, + 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, + 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, + 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, + 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, + 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, + 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, + 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, + 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, + 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, + // Block 0x4, offset 0x100 + 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, + 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, + 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, + 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, + 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, + 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, + 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, + 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, + 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, + 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, + 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, + // Block 0x5, offset 0x140 + 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, + 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, + 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, + 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, + 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, + 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, + 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, + 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, + 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, + 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, + 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, + // Block 0x6, offset 0x180 + 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, + 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, + 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, + 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, + 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, + 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, + 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, + 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, + 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, + 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, + 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, + 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, + 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, + 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, + 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, + 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, + 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, + 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, + 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, + 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, + 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, + // Block 0x8, offset 0x200 + 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, + 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, + 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, + 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, + 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, + 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, + 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, + 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, + 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, + 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, + 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, + // Block 0x9, offset 0x240 + 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, + 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, + 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, + 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, + 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, + 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, + 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, + 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, + 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, + 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, + 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, + // Block 0xa, offset 0x280 + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, + 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, + 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, + 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, + 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, + 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, + 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, + 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, + 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, + 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, + 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, + 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, + 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, + 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, + 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, + 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, + 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, + 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, + 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, + 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, + // Block 0xc, offset 0x300 + 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, + 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, + 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, + 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, + 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, + 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, + 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, + 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, + 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, + 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, + 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, + // Block 0xd, offset 0x340 + 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, + 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, + 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, + 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, + 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, + 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, + 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, + 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, + 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, + 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, + 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, + // Block 0xe, offset 0x380 + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, + 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, + 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, + 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, + 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, + 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, + 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, + 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, + 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, + 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, + 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, + 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, + 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, + 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, + 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, + 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, + 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, + 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, + 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, + 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, + // Block 0x10, offset 0x400 + 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, + 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, + 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, + 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, + 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, + 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, + 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, + 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, + 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, + 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, + 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, + // Block 0x11, offset 0x440 + 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, + 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, + 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, + 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, + 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, + 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, + 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, + 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, + 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, + 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, + // Block 0x12, offset 0x480 + 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, + 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, + 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, + 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, + 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, + 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, + 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, + 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, + 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, + 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, + 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, + 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, + 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, + 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, + 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, + 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, + 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, + 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, + 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, + // Block 0x14, offset 0x500 + 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, + 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, + 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, + 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, + 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, + 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, + 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, + 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, + 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, + 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, + 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, + // Block 0x15, offset 0x540 + 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08, + 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08, + 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08, + 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0c08, 0x557: 0x0c08, + 0x558: 0x0c08, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040, + 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08, + 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08, + 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040, + 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, + 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040, + 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040, + // Block 0x16, offset 0x580 + 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308, + 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008, + 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308, + 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308, + 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1, + 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308, + 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, + 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, + 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008, + 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008, + 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008, + 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008, + 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, + 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008, + 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008, + 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008, + 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040, + 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, + 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040, + 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, + 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008, + // Block 0x18, offset 0x600 + 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040, + 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008, + 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040, + 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008, + 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1, + 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308, + 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, + 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, + 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018, + 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018, + 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x3308, 0x63f: 0x0040, + // Block 0x19, offset 0x640 + 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008, + 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040, + 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040, + 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008, + 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008, + 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, + 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, + 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, + 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008, + 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, + 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008, + // Block 0x1a, offset 0x680 + 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040, + 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308, + 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308, + 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, + 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040, + 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040, + 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, + 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, + 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308, + 0x6b6: 0x0018, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008, + 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008, + 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008, + 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008, + 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008, + 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008, + 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040, + 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, + 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008, + 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, + 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008, + // Block 0x1c, offset 0x700 + 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308, + 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008, + 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040, + 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040, + 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040, + 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308, + 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008, + 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, + 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040, + 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308, + 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308, + // Block 0x1d, offset 0x740 + 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008, + 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008, + 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, + 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008, + 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008, + 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008, + 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040, + 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008, + 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040, + 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308, + // Block 0x1e, offset 0x780 + 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040, + 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008, + 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, + 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x3308, 0x796: 0x3308, 0x797: 0x3008, + 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9, + 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308, + 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, + 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, + 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018, + 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040, + 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008, + 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040, + 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, + 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040, + 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040, + 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008, + 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008, + 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008, + 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040, + 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008, + // Block 0x20, offset 0x800 + 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040, + 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308, + 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, + 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040, + 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, + 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308, + 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008, + 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, + 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, + 0x836: 0x0040, 0x837: 0x0018, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018, + 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018, + // Block 0x21, offset 0x840 + 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0018, 0x845: 0x0008, + 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008, + 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040, + 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008, + 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008, + 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008, + 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040, + 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, + 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008, + 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040, + 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308, + // Block 0x22, offset 0x880 + 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040, + 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, + 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, + 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040, + 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040, + 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, + 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040, + 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040, + 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040, + 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008, + 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040, + 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008, + 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018, + 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308, + 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, + 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018, + 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008, + 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008, + // Block 0x24, offset 0x900 + 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040, + 0x906: 0x0008, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0008, 0x90a: 0x0008, 0x90b: 0x0040, + 0x90c: 0x0008, 0x90d: 0x0008, 0x90e: 0x0008, 0x90f: 0x0008, 0x910: 0x0008, 0x911: 0x0008, + 0x912: 0x0008, 0x913: 0x0008, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008, + 0x918: 0x0008, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008, + 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0008, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, + 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0008, 0x929: 0x0008, + 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0008, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008, + 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308, + 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x3b08, 0x93b: 0x3308, + 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040, + // Block 0x25, offset 0x940 + 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008, + 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, + 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, + 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79, + 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008, + 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, + 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9, + 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040, + 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59, + 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308, + 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008, + // Block 0x26, offset 0x980 + 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018, + 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, + 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308, + 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308, + 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11, + 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308, + 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308, + 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308, + 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308, + 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308, + 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, + 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, + 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008, + 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008, + 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008, + 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008, + 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008, + 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008, + 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41, + 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008, + 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269, + // Block 0x28, offset 0xa00 + 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1, + 0xa06: 0x05b5, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011, + 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041, + 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05cd, 0xa15: 0x05cd, 0xa16: 0x0f99, 0xa17: 0x0fa9, + 0xa18: 0x0fb9, 0xa19: 0x05b5, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05e5, 0xa1d: 0x1099, + 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269, + 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1, + 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008, + 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008, + 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008, + 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008, + // Block 0x29, offset 0xa40 + 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008, + 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008, + 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008, + 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, + 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169, + 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9, + 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05fd, 0xa68: 0x1239, 0xa69: 0x1251, + 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9, + 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359, + 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x0615, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1, + 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429, + // Block 0x2a, offset 0xa80 + 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, + 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, + 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, + 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008, + 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008, + 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, + 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, + 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, + 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, + 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, + 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, + // Block 0x2b, offset 0xac0 + 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008, + 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008, + 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008, + 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, + 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x062d, 0xadb: 0x064d, 0xadc: 0x0008, 0xadd: 0x0008, + 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, + 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008, + 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008, + 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008, + 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008, + 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008, + // Block 0x2c, offset 0xb00 + 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008, + 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045, + 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008, + 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008, + 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045, + 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008, + 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, + 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045, + 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489, + 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1, + 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040, + // Block 0x2d, offset 0xb40 + 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1, + 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591, + 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1, + 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1, + 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771, + 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891, + 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831, + 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951, + 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040, + 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x0665, 0xb7b: 0x1459, + 0xb7c: 0x19b1, 0xb7d: 0x067e, 0xb7e: 0x1a31, 0xb7f: 0x069e, + // Block 0x2e, offset 0xb80 + 0xb80: 0x06be, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040, + 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06dd, 0xb89: 0x1471, 0xb8a: 0x06f5, 0xb8b: 0x1489, + 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008, + 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008, + 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x070d, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2, + 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61, + 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045, + 0xbaa: 0x0725, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa, + 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040, + 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x073d, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9, + 0xbbc: 0x1ce9, 0xbbd: 0x0756, 0xbbe: 0x0776, 0xbbf: 0x0040, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a, + 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0, + 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d, + 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x0796, + 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, + 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, + 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040, + 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a, + 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018, + 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, + 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x07b6, 0xbff: 0x0018, + // Block 0x30, offset 0xc00 + 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018, + 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018, + 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018, + 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9, + 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018, + 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340, + 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040, + 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340, + 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61, + 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07d5, + 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71, + // Block 0x31, offset 0xc40 + 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61, + 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07ed, + 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09, + 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359, + 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040, + 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018, + 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018, + 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018, + 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018, + 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018, + 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018, + // Block 0x32, offset 0xc80 + 0xc80: 0x0806, 0xc81: 0x0826, 0xc82: 0x1159, 0xc83: 0x0845, 0xc84: 0x0018, 0xc85: 0x0866, + 0xc86: 0x0886, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x08a5, 0xc8a: 0x0f31, 0xc8b: 0x0249, + 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41, + 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018, + 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269, + 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08c5, 0xca2: 0x2061, 0xca3: 0x0018, + 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018, + 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09, + 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9, + 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08e5, + 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x0905, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9, + 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018, + 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151, + 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279, + 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399, + 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x091d, 0xce3: 0x2439, + 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x093d, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369, + 0xcea: 0x24a9, 0xceb: 0x095d, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61, + 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x097d, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451, + 0xcf6: 0x099d, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09bd, + 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61, + // Block 0x34, offset 0xd00 + 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018, + 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040, + 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, + 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, + 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040, + 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51, + 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601, + 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691, + 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a1e, 0xd35: 0x0a3e, + 0xd36: 0x0a5e, 0xd37: 0x0a7e, 0xd38: 0x0a9e, 0xd39: 0x0abe, 0xd3a: 0x0ade, 0xd3b: 0x0afe, + 0xd3c: 0x0b1e, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a, + // Block 0x35, offset 0xd40 + 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a, + 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040, + 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040, + 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040, + 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b3e, 0xd5d: 0x0b5e, + 0xd5e: 0x0b7e, 0xd5f: 0x0b9e, 0xd60: 0x0bbe, 0xd61: 0x0bde, 0xd62: 0x0bfe, 0xd63: 0x0c1e, + 0xd64: 0x0c3e, 0xd65: 0x0c5e, 0xd66: 0x0c7e, 0xd67: 0x0c9e, 0xd68: 0x0cbe, 0xd69: 0x0cde, + 0xd6a: 0x0cfe, 0xd6b: 0x0d1e, 0xd6c: 0x0d3e, 0xd6d: 0x0d5e, 0xd6e: 0x0d7e, 0xd6f: 0x0d9e, + 0xd70: 0x0dbe, 0xd71: 0x0dde, 0xd72: 0x0dfe, 0xd73: 0x0e1e, 0xd74: 0x0e3e, 0xd75: 0x0e5e, + 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199, + 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259, + // Block 0x36, offset 0xd80 + 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99, + 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089, + 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9, + 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249, + 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71, + 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9, + 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1, + 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018, + 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018, + 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, + 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008, + 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008, + 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008, + 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008, + 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008, + 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ed5, + 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d, + 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9, + 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d, + 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008, + 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9, + // Block 0x38, offset 0xe00 + 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008, + 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008, + 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008, + 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008, + 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008, + 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008, + 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, + 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308, + 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040, + 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018, + 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, + // Block 0x39, offset 0xe40 + 0xe40: 0x2715, 0xe41: 0x2735, 0xe42: 0x2755, 0xe43: 0x2775, 0xe44: 0x2795, 0xe45: 0x27b5, + 0xe46: 0x27d5, 0xe47: 0x27f5, 0xe48: 0x2815, 0xe49: 0x2835, 0xe4a: 0x2855, 0xe4b: 0x2875, + 0xe4c: 0x2895, 0xe4d: 0x28b5, 0xe4e: 0x28d5, 0xe4f: 0x28f5, 0xe50: 0x2915, 0xe51: 0x2935, + 0xe52: 0x2955, 0xe53: 0x2975, 0xe54: 0x2995, 0xe55: 0x29b5, 0xe56: 0x0040, 0xe57: 0x0040, + 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040, + 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040, + 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040, + 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040, + 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040, + 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040, + 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040, + // Block 0x3a, offset 0xe80 + 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008, + 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018, + 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018, + 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018, + 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018, + 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018, + 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018, + 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018, + 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018, + 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29d5, 0xeb9: 0x29f5, 0xeba: 0x2a15, 0xebb: 0x0018, + 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018, + // Block 0x3b, offset 0xec0 + 0xec0: 0x2b55, 0xec1: 0x2b75, 0xec2: 0x2b95, 0xec3: 0x2bb5, 0xec4: 0x2bd5, 0xec5: 0x2bf5, + 0xec6: 0x2bf5, 0xec7: 0x2bf5, 0xec8: 0x2c15, 0xec9: 0x2c15, 0xeca: 0x2c15, 0xecb: 0x2c15, + 0xecc: 0x2c35, 0xecd: 0x2c35, 0xece: 0x2c35, 0xecf: 0x2c55, 0xed0: 0x2c75, 0xed1: 0x2c75, + 0xed2: 0x2a95, 0xed3: 0x2a95, 0xed4: 0x2c75, 0xed5: 0x2c75, 0xed6: 0x2c95, 0xed7: 0x2c95, + 0xed8: 0x2c75, 0xed9: 0x2c75, 0xeda: 0x2a95, 0xedb: 0x2a95, 0xedc: 0x2c75, 0xedd: 0x2c75, + 0xede: 0x2c55, 0xedf: 0x2c55, 0xee0: 0x2cb5, 0xee1: 0x2cb5, 0xee2: 0x2cd5, 0xee3: 0x2cd5, + 0xee4: 0x0040, 0xee5: 0x2cf5, 0xee6: 0x2d15, 0xee7: 0x2d35, 0xee8: 0x2d35, 0xee9: 0x2d55, + 0xeea: 0x2d75, 0xeeb: 0x2d95, 0xeec: 0x2db5, 0xeed: 0x2dd5, 0xeee: 0x2df5, 0xeef: 0x2e15, + 0xef0: 0x2e35, 0xef1: 0x2e55, 0xef2: 0x2e55, 0xef3: 0x2e75, 0xef4: 0x2e95, 0xef5: 0x2e95, + 0xef6: 0x2eb5, 0xef7: 0x2ed5, 0xef8: 0x2e75, 0xef9: 0x2ef5, 0xefa: 0x2f15, 0xefb: 0x2ef5, + 0xefc: 0x2e75, 0xefd: 0x2f35, 0xefe: 0x2f55, 0xeff: 0x2f75, + // Block 0x3c, offset 0xf00 + 0xf00: 0x2f95, 0xf01: 0x2fb5, 0xf02: 0x2d15, 0xf03: 0x2cf5, 0xf04: 0x2fd5, 0xf05: 0x2ff5, + 0xf06: 0x3015, 0xf07: 0x3035, 0xf08: 0x3055, 0xf09: 0x3075, 0xf0a: 0x3095, 0xf0b: 0x30b5, + 0xf0c: 0x30d5, 0xf0d: 0x30f5, 0xf0e: 0x3115, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018, + 0xf12: 0x3135, 0xf13: 0x3155, 0xf14: 0x3175, 0xf15: 0x3195, 0xf16: 0x31b5, 0xf17: 0x31d5, + 0xf18: 0x31f5, 0xf19: 0x3215, 0xf1a: 0x3235, 0xf1b: 0x3255, 0xf1c: 0x3175, 0xf1d: 0x3275, + 0xf1e: 0x3295, 0xf1f: 0x32b5, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008, + 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008, + 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008, + 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008, + 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0008, + 0xf3c: 0x0008, 0xf3d: 0x0008, 0xf3e: 0x0008, 0xf3f: 0x0008, + // Block 0x3d, offset 0xf40 + 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32d5, 0xf45: 0x32f5, + 0xf46: 0x3315, 0xf47: 0x3335, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018, + 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x3355, 0xf51: 0x3761, + 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1, + 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881, + 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x3375, 0xf61: 0x3395, 0xf62: 0x33b5, 0xf63: 0x33d5, + 0xf64: 0x33f5, 0xf65: 0x33f5, 0xf66: 0x3415, 0xf67: 0x3435, 0xf68: 0x3455, 0xf69: 0x3475, + 0xf6a: 0x3495, 0xf6b: 0x34b5, 0xf6c: 0x34d5, 0xf6d: 0x34f5, 0xf6e: 0x3515, 0xf6f: 0x3535, + 0xf70: 0x3555, 0xf71: 0x3575, 0xf72: 0x3595, 0xf73: 0x35b5, 0xf74: 0x35d5, 0xf75: 0x35f5, + 0xf76: 0x3615, 0xf77: 0x3635, 0xf78: 0x3655, 0xf79: 0x3675, 0xf7a: 0x3695, 0xf7b: 0x36b5, + 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36d5, 0xf7f: 0x0018, + // Block 0x3e, offset 0xf80 + 0xf80: 0x36f5, 0xf81: 0x3715, 0xf82: 0x3735, 0xf83: 0x3755, 0xf84: 0x3775, 0xf85: 0x3795, + 0xf86: 0x37b5, 0xf87: 0x37d5, 0xf88: 0x37f5, 0xf89: 0x3815, 0xf8a: 0x3835, 0xf8b: 0x3855, + 0xf8c: 0x3875, 0xf8d: 0x3895, 0xf8e: 0x38b5, 0xf8f: 0x38d5, 0xf90: 0x38f5, 0xf91: 0x3915, + 0xf92: 0x3935, 0xf93: 0x3955, 0xf94: 0x3975, 0xf95: 0x3995, 0xf96: 0x39b5, 0xf97: 0x39d5, + 0xf98: 0x39f5, 0xf99: 0x3a15, 0xf9a: 0x3a35, 0xf9b: 0x3a55, 0xf9c: 0x3a75, 0xf9d: 0x3a95, + 0xf9e: 0x3ab5, 0xf9f: 0x3ad5, 0xfa0: 0x3af5, 0xfa1: 0x3b15, 0xfa2: 0x3b35, 0xfa3: 0x3b55, + 0xfa4: 0x3b75, 0xfa5: 0x3b95, 0xfa6: 0x1295, 0xfa7: 0x3bb5, 0xfa8: 0x3bd5, 0xfa9: 0x3bf5, + 0xfaa: 0x3c15, 0xfab: 0x3c35, 0xfac: 0x3c55, 0xfad: 0x3c75, 0xfae: 0x23b5, 0xfaf: 0x3c95, + 0xfb0: 0x3cb5, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999, + 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29, + 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69, + 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69, + 0xfcc: 0x3c99, 0xfcd: 0x3cd5, 0xfce: 0x3cb1, 0xfcf: 0x3cf5, 0xfd0: 0x3d15, 0xfd1: 0x3d2d, + 0xfd2: 0x3d45, 0xfd3: 0x3d5d, 0xfd4: 0x3d75, 0xfd5: 0x3d75, 0xfd6: 0x3d5d, 0xfd7: 0x3d8d, + 0xfd8: 0x07d5, 0xfd9: 0x3da5, 0xfda: 0x3dbd, 0xfdb: 0x3dd5, 0xfdc: 0x3ded, 0xfdd: 0x3e05, + 0xfde: 0x3e1d, 0xfdf: 0x3e35, 0xfe0: 0x3e4d, 0xfe1: 0x3e65, 0xfe2: 0x3e7d, 0xfe3: 0x3e95, + 0xfe4: 0x3ead, 0xfe5: 0x3ead, 0xfe6: 0x3ec5, 0xfe7: 0x3ec5, 0xfe8: 0x3edd, 0xfe9: 0x3edd, + 0xfea: 0x3ef5, 0xfeb: 0x3f0d, 0xfec: 0x3f25, 0xfed: 0x3f3d, 0xfee: 0x3f55, 0xfef: 0x3f55, + 0xff0: 0x3f6d, 0xff1: 0x3f6d, 0xff2: 0x3f6d, 0xff3: 0x3f85, 0xff4: 0x3f9d, 0xff5: 0x3fb5, + 0xff6: 0x3fcd, 0xff7: 0x3fb5, 0xff8: 0x3fe5, 0xff9: 0x3ffd, 0xffa: 0x3f85, 0xffb: 0x4015, + 0xffc: 0x402d, 0xffd: 0x402d, 0xffe: 0x402d, 0xfff: 0x3cc9, + // Block 0x40, offset 0x1000 + 0x1000: 0x3d01, 0x1001: 0x3d69, 0x1002: 0x3dd1, 0x1003: 0x3e39, 0x1004: 0x3e89, 0x1005: 0x3ef1, + 0x1006: 0x3f41, 0x1007: 0x3f91, 0x1008: 0x4011, 0x1009: 0x4079, 0x100a: 0x40c9, 0x100b: 0x4119, + 0x100c: 0x4169, 0x100d: 0x41d1, 0x100e: 0x4239, 0x100f: 0x4289, 0x1010: 0x42d9, 0x1011: 0x4311, + 0x1012: 0x4361, 0x1013: 0x43c9, 0x1014: 0x4431, 0x1015: 0x4469, 0x1016: 0x44e9, 0x1017: 0x4581, + 0x1018: 0x4601, 0x1019: 0x4651, 0x101a: 0x46d1, 0x101b: 0x4751, 0x101c: 0x47b9, 0x101d: 0x4809, + 0x101e: 0x4859, 0x101f: 0x48a9, 0x1020: 0x4911, 0x1021: 0x4991, 0x1022: 0x49f9, 0x1023: 0x4a49, + 0x1024: 0x4a99, 0x1025: 0x4ae9, 0x1026: 0x4b21, 0x1027: 0x4b59, 0x1028: 0x4b91, 0x1029: 0x4bc9, + 0x102a: 0x4c19, 0x102b: 0x4c69, 0x102c: 0x4ce9, 0x102d: 0x4d39, 0x102e: 0x4da1, 0x102f: 0x4e21, + 0x1030: 0x4e71, 0x1031: 0x4ea9, 0x1032: 0x4ee1, 0x1033: 0x4f61, 0x1034: 0x4fc9, 0x1035: 0x5049, + 0x1036: 0x5099, 0x1037: 0x5119, 0x1038: 0x5151, 0x1039: 0x51a1, 0x103a: 0x51f1, 0x103b: 0x5241, + 0x103c: 0x5291, 0x103d: 0x52e1, 0x103e: 0x5349, 0x103f: 0x5399, + // Block 0x41, offset 0x1040 + 0x1040: 0x53d1, 0x1041: 0x5421, 0x1042: 0x5471, 0x1043: 0x54c1, 0x1044: 0x5529, 0x1045: 0x5579, + 0x1046: 0x55c9, 0x1047: 0x5619, 0x1048: 0x5699, 0x1049: 0x5701, 0x104a: 0x5739, 0x104b: 0x57b9, + 0x104c: 0x57f1, 0x104d: 0x5859, 0x104e: 0x58c1, 0x104f: 0x5911, 0x1050: 0x5961, 0x1051: 0x59b1, + 0x1052: 0x5a19, 0x1053: 0x5a51, 0x1054: 0x5aa1, 0x1055: 0x5b09, 0x1056: 0x5b41, 0x1057: 0x5bc1, + 0x1058: 0x5c11, 0x1059: 0x5c39, 0x105a: 0x5c61, 0x105b: 0x5c89, 0x105c: 0x5cb1, 0x105d: 0x5cd9, + 0x105e: 0x5d01, 0x105f: 0x5d29, 0x1060: 0x5d51, 0x1061: 0x5d79, 0x1062: 0x5da1, 0x1063: 0x5dd1, + 0x1064: 0x5e01, 0x1065: 0x5e31, 0x1066: 0x5e61, 0x1067: 0x5e91, 0x1068: 0x5ec1, 0x1069: 0x5ef1, + 0x106a: 0x5f21, 0x106b: 0x5f51, 0x106c: 0x5f81, 0x106d: 0x5fb1, 0x106e: 0x5fe1, 0x106f: 0x6011, + 0x1070: 0x6041, 0x1071: 0x4045, 0x1072: 0x6071, 0x1073: 0x6089, 0x1074: 0x4065, 0x1075: 0x60a1, + 0x1076: 0x60b9, 0x1077: 0x60d1, 0x1078: 0x4085, 0x1079: 0x4085, 0x107a: 0x60e9, 0x107b: 0x6101, + 0x107c: 0x6139, 0x107d: 0x6171, 0x107e: 0x61a9, 0x107f: 0x61e1, + // Block 0x42, offset 0x1080 + 0x1080: 0x6249, 0x1081: 0x6261, 0x1082: 0x40a5, 0x1083: 0x6279, 0x1084: 0x6291, 0x1085: 0x62a9, + 0x1086: 0x62c1, 0x1087: 0x62d9, 0x1088: 0x40c5, 0x1089: 0x62f1, 0x108a: 0x6319, 0x108b: 0x6331, + 0x108c: 0x40e5, 0x108d: 0x40e5, 0x108e: 0x6349, 0x108f: 0x6361, 0x1090: 0x6379, 0x1091: 0x4105, + 0x1092: 0x4125, 0x1093: 0x4145, 0x1094: 0x4165, 0x1095: 0x4185, 0x1096: 0x6391, 0x1097: 0x63a9, + 0x1098: 0x63c1, 0x1099: 0x63d9, 0x109a: 0x63f1, 0x109b: 0x41a5, 0x109c: 0x6409, 0x109d: 0x6421, + 0x109e: 0x6439, 0x109f: 0x41c5, 0x10a0: 0x41e5, 0x10a1: 0x6451, 0x10a2: 0x4205, 0x10a3: 0x4225, + 0x10a4: 0x4245, 0x10a5: 0x6469, 0x10a6: 0x4265, 0x10a7: 0x6481, 0x10a8: 0x64b1, 0x10a9: 0x6249, + 0x10aa: 0x4285, 0x10ab: 0x42a5, 0x10ac: 0x42c5, 0x10ad: 0x42e5, 0x10ae: 0x64e9, 0x10af: 0x6529, + 0x10b0: 0x6571, 0x10b1: 0x6589, 0x10b2: 0x4305, 0x10b3: 0x65a1, 0x10b4: 0x65b9, 0x10b5: 0x65d1, + 0x10b6: 0x4325, 0x10b7: 0x65e9, 0x10b8: 0x6601, 0x10b9: 0x65e9, 0x10ba: 0x6619, 0x10bb: 0x6631, + 0x10bc: 0x4345, 0x10bd: 0x6649, 0x10be: 0x6661, 0x10bf: 0x6649, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x4365, 0x10c1: 0x4385, 0x10c2: 0x0040, 0x10c3: 0x6679, 0x10c4: 0x6691, 0x10c5: 0x66a9, + 0x10c6: 0x66c1, 0x10c7: 0x0040, 0x10c8: 0x66f9, 0x10c9: 0x6711, 0x10ca: 0x6729, 0x10cb: 0x6741, + 0x10cc: 0x6759, 0x10cd: 0x6771, 0x10ce: 0x6439, 0x10cf: 0x6789, 0x10d0: 0x67a1, 0x10d1: 0x67b9, + 0x10d2: 0x43a5, 0x10d3: 0x67d1, 0x10d4: 0x62c1, 0x10d5: 0x43c5, 0x10d6: 0x43e5, 0x10d7: 0x67e9, + 0x10d8: 0x0040, 0x10d9: 0x4405, 0x10da: 0x6801, 0x10db: 0x6819, 0x10dc: 0x6831, 0x10dd: 0x6849, + 0x10de: 0x6861, 0x10df: 0x6891, 0x10e0: 0x68c1, 0x10e1: 0x68e9, 0x10e2: 0x6911, 0x10e3: 0x6939, + 0x10e4: 0x6961, 0x10e5: 0x6989, 0x10e6: 0x69b1, 0x10e7: 0x69d9, 0x10e8: 0x6a01, 0x10e9: 0x6a29, + 0x10ea: 0x6a59, 0x10eb: 0x6a89, 0x10ec: 0x6ab9, 0x10ed: 0x6ae9, 0x10ee: 0x6b19, 0x10ef: 0x6b49, + 0x10f0: 0x6b79, 0x10f1: 0x6ba9, 0x10f2: 0x6bd9, 0x10f3: 0x6c09, 0x10f4: 0x6c39, 0x10f5: 0x6c69, + 0x10f6: 0x6c99, 0x10f7: 0x6cc9, 0x10f8: 0x6cf9, 0x10f9: 0x6d29, 0x10fa: 0x6d59, 0x10fb: 0x6d89, + 0x10fc: 0x6db9, 0x10fd: 0x6de9, 0x10fe: 0x6e19, 0x10ff: 0x4425, + // Block 0x44, offset 0x1100 + 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, + 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, + 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, + 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, + 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008, + 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008, + 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, + 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308, + 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308, + 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308, + 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008, + // Block 0x45, offset 0x1140 + 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, + 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, + 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, + 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, + 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e49, + 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008, + 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008, + 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008, + 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, + 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008, + 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008, + // Block 0x46, offset 0x1180 + 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018, + 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018, + 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018, + 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008, + 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008, + 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, + 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, + 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008, + 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008, + 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008, + // Block 0x47, offset 0x11c0 + 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, + 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008, + 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, + 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, + 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, + 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, + 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, + 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008, + 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008, + 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d, + 0x11fc: 0x0008, 0x11fd: 0x4445, 0x11fe: 0xe00d, 0x11ff: 0x0008, + // Block 0x48, offset 0x1200 + 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008, + 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d, + 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008, + 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008, + 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008, + 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008, + 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008, + 0x122a: 0x6e61, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e79, 0x122e: 0x1221, 0x122f: 0x0008, + 0x1230: 0x6e91, 0x1231: 0x6ea9, 0x1232: 0x1239, 0x1233: 0x4465, 0x1234: 0xe00d, 0x1235: 0x0008, + 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0xe00d, 0x1239: 0x0008, 0x123a: 0xe00d, 0x123b: 0x0008, + 0x123c: 0xe00d, 0x123d: 0x0008, 0x123e: 0xe00d, 0x123f: 0x0008, + // Block 0x49, offset 0x1240 + 0x1240: 0x650d, 0x1241: 0x652d, 0x1242: 0x654d, 0x1243: 0x656d, 0x1244: 0x658d, 0x1245: 0x65ad, + 0x1246: 0x65cd, 0x1247: 0x65ed, 0x1248: 0x660d, 0x1249: 0x662d, 0x124a: 0x664d, 0x124b: 0x666d, + 0x124c: 0x668d, 0x124d: 0x66ad, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x66cd, 0x1251: 0x0008, + 0x1252: 0x66ed, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x670d, 0x1256: 0x672d, 0x1257: 0x674d, + 0x1258: 0x676d, 0x1259: 0x678d, 0x125a: 0x67ad, 0x125b: 0x67cd, 0x125c: 0x67ed, 0x125d: 0x680d, + 0x125e: 0x682d, 0x125f: 0x0008, 0x1260: 0x684d, 0x1261: 0x0008, 0x1262: 0x686d, 0x1263: 0x0008, + 0x1264: 0x0008, 0x1265: 0x688d, 0x1266: 0x68ad, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008, + 0x126a: 0x68cd, 0x126b: 0x68ed, 0x126c: 0x690d, 0x126d: 0x692d, 0x126e: 0x694d, 0x126f: 0x696d, + 0x1270: 0x698d, 0x1271: 0x69ad, 0x1272: 0x69cd, 0x1273: 0x69ed, 0x1274: 0x6a0d, 0x1275: 0x6a2d, + 0x1276: 0x6a4d, 0x1277: 0x6a6d, 0x1278: 0x6a8d, 0x1279: 0x6aad, 0x127a: 0x6acd, 0x127b: 0x6aed, + 0x127c: 0x6b0d, 0x127d: 0x6b2d, 0x127e: 0x6b4d, 0x127f: 0x6b6d, + // Block 0x4a, offset 0x1280 + 0x1280: 0x7acd, 0x1281: 0x7aed, 0x1282: 0x7b0d, 0x1283: 0x7b2d, 0x1284: 0x7b4d, 0x1285: 0x7b6d, + 0x1286: 0x7b8d, 0x1287: 0x7bad, 0x1288: 0x7bcd, 0x1289: 0x7bed, 0x128a: 0x7c0d, 0x128b: 0x7c2d, + 0x128c: 0x7c4d, 0x128d: 0x7c6d, 0x128e: 0x7c8d, 0x128f: 0x6f19, 0x1290: 0x6f41, 0x1291: 0x6f69, + 0x1292: 0x7cad, 0x1293: 0x7ccd, 0x1294: 0x7ced, 0x1295: 0x6f91, 0x1296: 0x6fb9, 0x1297: 0x6fe1, + 0x1298: 0x7d0d, 0x1299: 0x7d2d, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040, + 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040, + 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040, + 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040, + 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040, + 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040, + 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x7009, 0x12c1: 0x7021, 0x12c2: 0x7039, 0x12c3: 0x7d4d, 0x12c4: 0x7d6d, 0x12c5: 0x7051, + 0x12c6: 0x7051, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040, + 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040, + 0x12d2: 0x0040, 0x12d3: 0x7069, 0x12d4: 0x7091, 0x12d5: 0x70b9, 0x12d6: 0x70e1, 0x12d7: 0x7109, + 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x7131, + 0x12de: 0x3308, 0x12df: 0x7159, 0x12e0: 0x7181, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7199, + 0x12e4: 0x71b1, 0x12e5: 0x71c9, 0x12e6: 0x71e1, 0x12e7: 0x71f9, 0x12e8: 0x7211, 0x12e9: 0x1fb2, + 0x12ea: 0x7229, 0x12eb: 0x7251, 0x12ec: 0x7279, 0x12ed: 0x72b1, 0x12ee: 0x72e9, 0x12ef: 0x7311, + 0x12f0: 0x7339, 0x12f1: 0x7361, 0x12f2: 0x7389, 0x12f3: 0x73b1, 0x12f4: 0x73d9, 0x12f5: 0x7401, + 0x12f6: 0x7429, 0x12f7: 0x0040, 0x12f8: 0x7451, 0x12f9: 0x7479, 0x12fa: 0x74a1, 0x12fb: 0x74c9, + 0x12fc: 0x74f1, 0x12fd: 0x0040, 0x12fe: 0x7519, 0x12ff: 0x0040, + // Block 0x4c, offset 0x1300 + 0x1300: 0x7541, 0x1301: 0x7569, 0x1302: 0x0040, 0x1303: 0x7591, 0x1304: 0x75b9, 0x1305: 0x0040, + 0x1306: 0x75e1, 0x1307: 0x7609, 0x1308: 0x7631, 0x1309: 0x7659, 0x130a: 0x7681, 0x130b: 0x76a9, + 0x130c: 0x76d1, 0x130d: 0x76f9, 0x130e: 0x7721, 0x130f: 0x7749, 0x1310: 0x7771, 0x1311: 0x7771, + 0x1312: 0x7789, 0x1313: 0x7789, 0x1314: 0x7789, 0x1315: 0x7789, 0x1316: 0x77a1, 0x1317: 0x77a1, + 0x1318: 0x77a1, 0x1319: 0x77a1, 0x131a: 0x77b9, 0x131b: 0x77b9, 0x131c: 0x77b9, 0x131d: 0x77b9, + 0x131e: 0x77d1, 0x131f: 0x77d1, 0x1320: 0x77d1, 0x1321: 0x77d1, 0x1322: 0x77e9, 0x1323: 0x77e9, + 0x1324: 0x77e9, 0x1325: 0x77e9, 0x1326: 0x7801, 0x1327: 0x7801, 0x1328: 0x7801, 0x1329: 0x7801, + 0x132a: 0x7819, 0x132b: 0x7819, 0x132c: 0x7819, 0x132d: 0x7819, 0x132e: 0x7831, 0x132f: 0x7831, + 0x1330: 0x7831, 0x1331: 0x7831, 0x1332: 0x7849, 0x1333: 0x7849, 0x1334: 0x7849, 0x1335: 0x7849, + 0x1336: 0x7861, 0x1337: 0x7861, 0x1338: 0x7861, 0x1339: 0x7861, 0x133a: 0x7879, 0x133b: 0x7879, + 0x133c: 0x7879, 0x133d: 0x7879, 0x133e: 0x7891, 0x133f: 0x7891, + // Block 0x4d, offset 0x1340 + 0x1340: 0x7891, 0x1341: 0x7891, 0x1342: 0x78a9, 0x1343: 0x78a9, 0x1344: 0x78c1, 0x1345: 0x78c1, + 0x1346: 0x78d9, 0x1347: 0x78d9, 0x1348: 0x78f1, 0x1349: 0x78f1, 0x134a: 0x7909, 0x134b: 0x7909, + 0x134c: 0x7921, 0x134d: 0x7921, 0x134e: 0x7939, 0x134f: 0x7939, 0x1350: 0x7939, 0x1351: 0x7939, + 0x1352: 0x7951, 0x1353: 0x7951, 0x1354: 0x7951, 0x1355: 0x7951, 0x1356: 0x7969, 0x1357: 0x7969, + 0x1358: 0x7969, 0x1359: 0x7969, 0x135a: 0x7981, 0x135b: 0x7981, 0x135c: 0x7981, 0x135d: 0x7981, + 0x135e: 0x7999, 0x135f: 0x7999, 0x1360: 0x79b1, 0x1361: 0x79b1, 0x1362: 0x79b1, 0x1363: 0x79b1, + 0x1364: 0x79c9, 0x1365: 0x79c9, 0x1366: 0x79e1, 0x1367: 0x79e1, 0x1368: 0x79e1, 0x1369: 0x79e1, + 0x136a: 0x79f9, 0x136b: 0x79f9, 0x136c: 0x79f9, 0x136d: 0x79f9, 0x136e: 0x7a11, 0x136f: 0x7a11, + 0x1370: 0x7a29, 0x1371: 0x7a29, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818, + 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818, + 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040, + 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040, + 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040, + 0x1392: 0x0040, 0x1393: 0x7a41, 0x1394: 0x7a41, 0x1395: 0x7a41, 0x1396: 0x7a41, 0x1397: 0x7a59, + 0x1398: 0x7a59, 0x1399: 0x7a71, 0x139a: 0x7a71, 0x139b: 0x7a89, 0x139c: 0x7a89, 0x139d: 0x0479, + 0x139e: 0x7aa1, 0x139f: 0x7aa1, 0x13a0: 0x7ab9, 0x13a1: 0x7ab9, 0x13a2: 0x7ad1, 0x13a3: 0x7ad1, + 0x13a4: 0x7ae9, 0x13a5: 0x7ae9, 0x13a6: 0x7ae9, 0x13a7: 0x7ae9, 0x13a8: 0x7b01, 0x13a9: 0x7b01, + 0x13aa: 0x7b19, 0x13ab: 0x7b19, 0x13ac: 0x7b41, 0x13ad: 0x7b41, 0x13ae: 0x7b69, 0x13af: 0x7b69, + 0x13b0: 0x7b91, 0x13b1: 0x7b91, 0x13b2: 0x7bb9, 0x13b3: 0x7bb9, 0x13b4: 0x7be1, 0x13b5: 0x7be1, + 0x13b6: 0x7c09, 0x13b7: 0x7c09, 0x13b8: 0x7c09, 0x13b9: 0x7c31, 0x13ba: 0x7c31, 0x13bb: 0x7c31, + 0x13bc: 0x7c59, 0x13bd: 0x7c59, 0x13be: 0x7c59, 0x13bf: 0x7c59, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x8649, 0x13c1: 0x8671, 0x13c2: 0x8699, 0x13c3: 0x86c1, 0x13c4: 0x86e9, 0x13c5: 0x8711, + 0x13c6: 0x8739, 0x13c7: 0x8761, 0x13c8: 0x8789, 0x13c9: 0x87b1, 0x13ca: 0x87d9, 0x13cb: 0x8801, + 0x13cc: 0x8829, 0x13cd: 0x8851, 0x13ce: 0x8879, 0x13cf: 0x88a1, 0x13d0: 0x88c9, 0x13d1: 0x88f1, + 0x13d2: 0x8919, 0x13d3: 0x8941, 0x13d4: 0x8969, 0x13d5: 0x8991, 0x13d6: 0x89b9, 0x13d7: 0x89e1, + 0x13d8: 0x8a09, 0x13d9: 0x8a31, 0x13da: 0x8a59, 0x13db: 0x8a81, 0x13dc: 0x8aa9, 0x13dd: 0x8ad1, + 0x13de: 0x8afa, 0x13df: 0x8b2a, 0x13e0: 0x8b5a, 0x13e1: 0x8b8a, 0x13e2: 0x8bba, 0x13e3: 0x8bea, + 0x13e4: 0x8c19, 0x13e5: 0x8c41, 0x13e6: 0x7cc1, 0x13e7: 0x8c69, 0x13e8: 0x7c31, 0x13e9: 0x7ce9, + 0x13ea: 0x8c91, 0x13eb: 0x8cb9, 0x13ec: 0x7d89, 0x13ed: 0x8ce1, 0x13ee: 0x7db1, 0x13ef: 0x7dd9, + 0x13f0: 0x8d09, 0x13f1: 0x8d31, 0x13f2: 0x7e79, 0x13f3: 0x8d59, 0x13f4: 0x7ea1, 0x13f5: 0x7ec9, + 0x13f6: 0x8d81, 0x13f7: 0x8da9, 0x13f8: 0x7f19, 0x13f9: 0x8dd1, 0x13fa: 0x7f41, 0x13fb: 0x7f69, + 0x13fc: 0x83f1, 0x13fd: 0x8419, 0x13fe: 0x8491, 0x13ff: 0x84b9, + // Block 0x50, offset 0x1400 + 0x1400: 0x84e1, 0x1401: 0x8581, 0x1402: 0x85a9, 0x1403: 0x85d1, 0x1404: 0x85f9, 0x1405: 0x8699, + 0x1406: 0x86c1, 0x1407: 0x86e9, 0x1408: 0x8df9, 0x1409: 0x8789, 0x140a: 0x8e21, 0x140b: 0x8e49, + 0x140c: 0x8879, 0x140d: 0x8e71, 0x140e: 0x88a1, 0x140f: 0x88c9, 0x1410: 0x8ad1, 0x1411: 0x8e99, + 0x1412: 0x8ec1, 0x1413: 0x8a09, 0x1414: 0x8ee9, 0x1415: 0x8a31, 0x1416: 0x8a59, 0x1417: 0x7c71, + 0x1418: 0x7c99, 0x1419: 0x8f11, 0x141a: 0x7cc1, 0x141b: 0x8f39, 0x141c: 0x7d11, 0x141d: 0x7d39, + 0x141e: 0x7d61, 0x141f: 0x7d89, 0x1420: 0x8f61, 0x1421: 0x7e01, 0x1422: 0x7e29, 0x1423: 0x7e51, + 0x1424: 0x7e79, 0x1425: 0x8f89, 0x1426: 0x7f19, 0x1427: 0x7f91, 0x1428: 0x7fb9, 0x1429: 0x7fe1, + 0x142a: 0x8009, 0x142b: 0x8031, 0x142c: 0x8081, 0x142d: 0x80a9, 0x142e: 0x80d1, 0x142f: 0x80f9, + 0x1430: 0x8121, 0x1431: 0x8149, 0x1432: 0x8fb1, 0x1433: 0x8171, 0x1434: 0x8199, 0x1435: 0x81c1, + 0x1436: 0x81e9, 0x1437: 0x8211, 0x1438: 0x8239, 0x1439: 0x8289, 0x143a: 0x82b1, 0x143b: 0x82d9, + 0x143c: 0x8301, 0x143d: 0x8329, 0x143e: 0x8351, 0x143f: 0x8379, + // Block 0x51, offset 0x1440 + 0x1440: 0x83a1, 0x1441: 0x83c9, 0x1442: 0x8441, 0x1443: 0x8469, 0x1444: 0x8509, 0x1445: 0x8531, + 0x1446: 0x8559, 0x1447: 0x8581, 0x1448: 0x85a9, 0x1449: 0x8621, 0x144a: 0x8649, 0x144b: 0x8671, + 0x144c: 0x8699, 0x144d: 0x8fd9, 0x144e: 0x8711, 0x144f: 0x8739, 0x1450: 0x8761, 0x1451: 0x8789, + 0x1452: 0x8801, 0x1453: 0x8829, 0x1454: 0x8851, 0x1455: 0x8879, 0x1456: 0x9001, 0x1457: 0x88f1, + 0x1458: 0x8919, 0x1459: 0x9029, 0x145a: 0x8991, 0x145b: 0x89b9, 0x145c: 0x89e1, 0x145d: 0x8a09, + 0x145e: 0x9051, 0x145f: 0x7cc1, 0x1460: 0x8f39, 0x1461: 0x7d89, 0x1462: 0x8f61, 0x1463: 0x7e79, + 0x1464: 0x8f89, 0x1465: 0x7f19, 0x1466: 0x9079, 0x1467: 0x8121, 0x1468: 0x90a1, 0x1469: 0x90c9, + 0x146a: 0x90f1, 0x146b: 0x8581, 0x146c: 0x85a9, 0x146d: 0x8699, 0x146e: 0x8879, 0x146f: 0x9001, + 0x1470: 0x8a09, 0x1471: 0x9051, 0x1472: 0x9119, 0x1473: 0x9151, 0x1474: 0x9189, 0x1475: 0x91c1, + 0x1476: 0x91e9, 0x1477: 0x9211, 0x1478: 0x9239, 0x1479: 0x9261, 0x147a: 0x9289, 0x147b: 0x92b1, + 0x147c: 0x92d9, 0x147d: 0x9301, 0x147e: 0x9329, 0x147f: 0x9351, + // Block 0x52, offset 0x1480 + 0x1480: 0x9379, 0x1481: 0x93a1, 0x1482: 0x93c9, 0x1483: 0x93f1, 0x1484: 0x9419, 0x1485: 0x9441, + 0x1486: 0x9469, 0x1487: 0x9491, 0x1488: 0x94b9, 0x1489: 0x94e1, 0x148a: 0x9509, 0x148b: 0x9531, + 0x148c: 0x90c9, 0x148d: 0x9559, 0x148e: 0x9581, 0x148f: 0x95a9, 0x1490: 0x95d1, 0x1491: 0x91c1, + 0x1492: 0x91e9, 0x1493: 0x9211, 0x1494: 0x9239, 0x1495: 0x9261, 0x1496: 0x9289, 0x1497: 0x92b1, + 0x1498: 0x92d9, 0x1499: 0x9301, 0x149a: 0x9329, 0x149b: 0x9351, 0x149c: 0x9379, 0x149d: 0x93a1, + 0x149e: 0x93c9, 0x149f: 0x93f1, 0x14a0: 0x9419, 0x14a1: 0x9441, 0x14a2: 0x9469, 0x14a3: 0x9491, + 0x14a4: 0x94b9, 0x14a5: 0x94e1, 0x14a6: 0x9509, 0x14a7: 0x9531, 0x14a8: 0x90c9, 0x14a9: 0x9559, + 0x14aa: 0x9581, 0x14ab: 0x95a9, 0x14ac: 0x95d1, 0x14ad: 0x94e1, 0x14ae: 0x9509, 0x14af: 0x9531, + 0x14b0: 0x90c9, 0x14b1: 0x90a1, 0x14b2: 0x90f1, 0x14b3: 0x8261, 0x14b4: 0x80a9, 0x14b5: 0x80d1, + 0x14b6: 0x80f9, 0x14b7: 0x94e1, 0x14b8: 0x9509, 0x14b9: 0x9531, 0x14ba: 0x8261, 0x14bb: 0x8289, + 0x14bc: 0x95f9, 0x14bd: 0x95f9, 0x14be: 0x0018, 0x14bf: 0x0018, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040, + 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, + 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x9621, 0x14d1: 0x9659, + 0x14d2: 0x9659, 0x14d3: 0x9691, 0x14d4: 0x96c9, 0x14d5: 0x9701, 0x14d6: 0x9739, 0x14d7: 0x9771, + 0x14d8: 0x97a9, 0x14d9: 0x97a9, 0x14da: 0x97e1, 0x14db: 0x9819, 0x14dc: 0x9851, 0x14dd: 0x9889, + 0x14de: 0x98c1, 0x14df: 0x98f9, 0x14e0: 0x98f9, 0x14e1: 0x9931, 0x14e2: 0x9969, 0x14e3: 0x9969, + 0x14e4: 0x99a1, 0x14e5: 0x99a1, 0x14e6: 0x99d9, 0x14e7: 0x9a11, 0x14e8: 0x9a11, 0x14e9: 0x9a49, + 0x14ea: 0x9a81, 0x14eb: 0x9a81, 0x14ec: 0x9ab9, 0x14ed: 0x9ab9, 0x14ee: 0x9af1, 0x14ef: 0x9b29, + 0x14f0: 0x9b29, 0x14f1: 0x9b61, 0x14f2: 0x9b61, 0x14f3: 0x9b99, 0x14f4: 0x9bd1, 0x14f5: 0x9c09, + 0x14f6: 0x9c41, 0x14f7: 0x9c41, 0x14f8: 0x9c79, 0x14f9: 0x9cb1, 0x14fa: 0x9ce9, 0x14fb: 0x9d21, + 0x14fc: 0x9d59, 0x14fd: 0x9d59, 0x14fe: 0x9d91, 0x14ff: 0x9dc9, + // Block 0x54, offset 0x1500 + 0x1500: 0xa999, 0x1501: 0xa9d1, 0x1502: 0xaa09, 0x1503: 0xa8f1, 0x1504: 0x9c09, 0x1505: 0x99d9, + 0x1506: 0xaa41, 0x1507: 0xaa79, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040, + 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040, + 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040, + 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, + 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040, + 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040, + 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, + 0x1530: 0xaab1, 0x1531: 0xaae9, 0x1532: 0xab21, 0x1533: 0xab69, 0x1534: 0xabb1, 0x1535: 0xabf9, + 0x1536: 0xac41, 0x1537: 0xac89, 0x1538: 0xacd1, 0x1539: 0xad19, 0x153a: 0xad52, 0x153b: 0xae62, + 0x153c: 0xaee1, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040, + // Block 0x55, offset 0x1540 + 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0, + 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0, + 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaf2a, 0x1551: 0x7d8d, + 0x1552: 0x0040, 0x1553: 0xaf3a, 0x1554: 0x03c2, 0x1555: 0xaf4a, 0x1556: 0xaf5a, 0x1557: 0x7dad, + 0x1558: 0x7dcd, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040, + 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308, + 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308, + 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308, + 0x1570: 0x0040, 0x1571: 0x7ded, 0x1572: 0x7e0d, 0x1573: 0xaf6a, 0x1574: 0xaf6a, 0x1575: 0x1fd2, + 0x1576: 0x1fe2, 0x1577: 0xaf7a, 0x1578: 0xaf8a, 0x1579: 0x7e2d, 0x157a: 0x7e4d, 0x157b: 0x7e6d, + 0x157c: 0x7e2d, 0x157d: 0x7e8d, 0x157e: 0x7ead, 0x157f: 0x7e8d, + // Block 0x56, offset 0x1580 + 0x1580: 0x7ecd, 0x1581: 0x7eed, 0x1582: 0x7f0d, 0x1583: 0x7eed, 0x1584: 0x7f2d, 0x1585: 0x0018, + 0x1586: 0x0018, 0x1587: 0xaf9a, 0x1588: 0xafaa, 0x1589: 0x7f4e, 0x158a: 0x7f6e, 0x158b: 0x7f8e, + 0x158c: 0x7fae, 0x158d: 0xaf6a, 0x158e: 0xaf6a, 0x158f: 0xaf6a, 0x1590: 0xaf2a, 0x1591: 0x7fcd, + 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaf3a, 0x1596: 0xaf5a, 0x1597: 0xaf4a, + 0x1598: 0x7fed, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf7a, 0x159c: 0xaf8a, 0x159d: 0x7ecd, + 0x159e: 0x7f2d, 0x159f: 0xafba, 0x15a0: 0xafca, 0x15a1: 0xafda, 0x15a2: 0x1fb2, 0x15a3: 0xafe9, + 0x15a4: 0xaffa, 0x15a5: 0xb00a, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xb01a, 0x15a9: 0xb02a, + 0x15aa: 0xb03a, 0x15ab: 0xb04a, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040, + 0x15b0: 0x800e, 0x15b1: 0xb059, 0x15b2: 0x802e, 0x15b3: 0x0808, 0x15b4: 0x804e, 0x15b5: 0x0040, + 0x15b6: 0x806e, 0x15b7: 0xb081, 0x15b8: 0x808e, 0x15b9: 0xb0a9, 0x15ba: 0x80ae, 0x15bb: 0xb0d1, + 0x15bc: 0x80ce, 0x15bd: 0xb0f9, 0x15be: 0x80ee, 0x15bf: 0xb121, + // Block 0x57, offset 0x15c0 + 0x15c0: 0xb149, 0x15c1: 0xb161, 0x15c2: 0xb161, 0x15c3: 0xb179, 0x15c4: 0xb179, 0x15c5: 0xb191, + 0x15c6: 0xb191, 0x15c7: 0xb1a9, 0x15c8: 0xb1a9, 0x15c9: 0xb1c1, 0x15ca: 0xb1c1, 0x15cb: 0xb1c1, + 0x15cc: 0xb1c1, 0x15cd: 0xb1d9, 0x15ce: 0xb1d9, 0x15cf: 0xb1f1, 0x15d0: 0xb1f1, 0x15d1: 0xb1f1, + 0x15d2: 0xb1f1, 0x15d3: 0xb209, 0x15d4: 0xb209, 0x15d5: 0xb221, 0x15d6: 0xb221, 0x15d7: 0xb221, + 0x15d8: 0xb221, 0x15d9: 0xb239, 0x15da: 0xb239, 0x15db: 0xb239, 0x15dc: 0xb239, 0x15dd: 0xb251, + 0x15de: 0xb251, 0x15df: 0xb251, 0x15e0: 0xb251, 0x15e1: 0xb269, 0x15e2: 0xb269, 0x15e3: 0xb269, + 0x15e4: 0xb269, 0x15e5: 0xb281, 0x15e6: 0xb281, 0x15e7: 0xb281, 0x15e8: 0xb281, 0x15e9: 0xb299, + 0x15ea: 0xb299, 0x15eb: 0xb2b1, 0x15ec: 0xb2b1, 0x15ed: 0xb2c9, 0x15ee: 0xb2c9, 0x15ef: 0xb2e1, + 0x15f0: 0xb2e1, 0x15f1: 0xb2f9, 0x15f2: 0xb2f9, 0x15f3: 0xb2f9, 0x15f4: 0xb2f9, 0x15f5: 0xb311, + 0x15f6: 0xb311, 0x15f7: 0xb311, 0x15f8: 0xb311, 0x15f9: 0xb329, 0x15fa: 0xb329, 0x15fb: 0xb329, + 0x15fc: 0xb329, 0x15fd: 0xb341, 0x15fe: 0xb341, 0x15ff: 0xb341, + // Block 0x58, offset 0x1600 + 0x1600: 0xb341, 0x1601: 0xb359, 0x1602: 0xb359, 0x1603: 0xb359, 0x1604: 0xb359, 0x1605: 0xb371, + 0x1606: 0xb371, 0x1607: 0xb371, 0x1608: 0xb371, 0x1609: 0xb389, 0x160a: 0xb389, 0x160b: 0xb389, + 0x160c: 0xb389, 0x160d: 0xb3a1, 0x160e: 0xb3a1, 0x160f: 0xb3a1, 0x1610: 0xb3a1, 0x1611: 0xb3b9, + 0x1612: 0xb3b9, 0x1613: 0xb3b9, 0x1614: 0xb3b9, 0x1615: 0xb3d1, 0x1616: 0xb3d1, 0x1617: 0xb3d1, + 0x1618: 0xb3d1, 0x1619: 0xb3e9, 0x161a: 0xb3e9, 0x161b: 0xb3e9, 0x161c: 0xb3e9, 0x161d: 0xb401, + 0x161e: 0xb401, 0x161f: 0xb401, 0x1620: 0xb401, 0x1621: 0xb419, 0x1622: 0xb419, 0x1623: 0xb419, + 0x1624: 0xb419, 0x1625: 0xb431, 0x1626: 0xb431, 0x1627: 0xb431, 0x1628: 0xb431, 0x1629: 0xb449, + 0x162a: 0xb449, 0x162b: 0xb449, 0x162c: 0xb449, 0x162d: 0xb461, 0x162e: 0xb461, 0x162f: 0x7b01, + 0x1630: 0x7b01, 0x1631: 0xb479, 0x1632: 0xb479, 0x1633: 0xb479, 0x1634: 0xb479, 0x1635: 0xb491, + 0x1636: 0xb491, 0x1637: 0xb4b9, 0x1638: 0xb4b9, 0x1639: 0xb4e1, 0x163a: 0xb4e1, 0x163b: 0xb509, + 0x163c: 0xb509, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0, + // Block 0x59, offset 0x1640 + 0x1640: 0x0040, 0x1641: 0xaf4a, 0x1642: 0xb532, 0x1643: 0xafba, 0x1644: 0xb02a, 0x1645: 0xb03a, + 0x1646: 0xafca, 0x1647: 0xb542, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xafda, 0x164b: 0x1fb2, + 0x164c: 0xaf2a, 0x164d: 0xafe9, 0x164e: 0x29d1, 0x164f: 0xb552, 0x1650: 0x1f41, 0x1651: 0x00c9, + 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81, + 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaf3a, 0x165b: 0x03c2, 0x165c: 0xaffa, 0x165d: 0x1fc2, + 0x165e: 0xb00a, 0x165f: 0xaf5a, 0x1660: 0xb04a, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159, + 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41, + 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9, + 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9, + 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf9a, + 0x167c: 0xb01a, 0x167d: 0xafaa, 0x167e: 0xb562, 0x167f: 0xaf6a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09, + 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51, + 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039, + 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279, + 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf7a, 0x169c: 0xb572, 0x169d: 0xaf8a, + 0x169e: 0xb582, 0x169f: 0x810d, 0x16a0: 0x812d, 0x16a1: 0x29d1, 0x16a2: 0x814d, 0x16a3: 0x814d, + 0x16a4: 0x816d, 0x16a5: 0x818d, 0x16a6: 0x81ad, 0x16a7: 0x81cd, 0x16a8: 0x81ed, 0x16a9: 0x820d, + 0x16aa: 0x822d, 0x16ab: 0x824d, 0x16ac: 0x826d, 0x16ad: 0x828d, 0x16ae: 0x82ad, 0x16af: 0x82cd, + 0x16b0: 0x82ed, 0x16b1: 0x830d, 0x16b2: 0x832d, 0x16b3: 0x834d, 0x16b4: 0x836d, 0x16b5: 0x838d, + 0x16b6: 0x83ad, 0x16b7: 0x83cd, 0x16b8: 0x83ed, 0x16b9: 0x840d, 0x16ba: 0x842d, 0x16bb: 0x844d, + 0x16bc: 0x81ed, 0x16bd: 0x846d, 0x16be: 0x848d, 0x16bf: 0x824d, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x84ad, 0x16c1: 0x84cd, 0x16c2: 0x84ed, 0x16c3: 0x850d, 0x16c4: 0x852d, 0x16c5: 0x854d, + 0x16c6: 0x856d, 0x16c7: 0x858d, 0x16c8: 0x850d, 0x16c9: 0x85ad, 0x16ca: 0x850d, 0x16cb: 0x85cd, + 0x16cc: 0x85cd, 0x16cd: 0x85ed, 0x16ce: 0x85ed, 0x16cf: 0x860d, 0x16d0: 0x854d, 0x16d1: 0x862d, + 0x16d2: 0x864d, 0x16d3: 0x862d, 0x16d4: 0x866d, 0x16d5: 0x864d, 0x16d6: 0x868d, 0x16d7: 0x868d, + 0x16d8: 0x86ad, 0x16d9: 0x86ad, 0x16da: 0x86cd, 0x16db: 0x86cd, 0x16dc: 0x864d, 0x16dd: 0x814d, + 0x16de: 0x86ed, 0x16df: 0x870d, 0x16e0: 0x0040, 0x16e1: 0x872d, 0x16e2: 0x874d, 0x16e3: 0x876d, + 0x16e4: 0x878d, 0x16e5: 0x876d, 0x16e6: 0x87ad, 0x16e7: 0x87cd, 0x16e8: 0x87ed, 0x16e9: 0x87ed, + 0x16ea: 0x880d, 0x16eb: 0x880d, 0x16ec: 0x882d, 0x16ed: 0x882d, 0x16ee: 0x880d, 0x16ef: 0x880d, + 0x16f0: 0x884d, 0x16f1: 0x886d, 0x16f2: 0x888d, 0x16f3: 0x88ad, 0x16f4: 0x88cd, 0x16f5: 0x88ed, + 0x16f6: 0x88ed, 0x16f7: 0x88ed, 0x16f8: 0x890d, 0x16f9: 0x890d, 0x16fa: 0x890d, 0x16fb: 0x890d, + 0x16fc: 0x87ed, 0x16fd: 0x87ed, 0x16fe: 0x87ed, 0x16ff: 0x0040, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x874d, 0x1703: 0x872d, 0x1704: 0x892d, 0x1705: 0x872d, + 0x1706: 0x874d, 0x1707: 0x872d, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x894d, 0x170b: 0x874d, + 0x170c: 0x896d, 0x170d: 0x892d, 0x170e: 0x896d, 0x170f: 0x874d, 0x1710: 0x0040, 0x1711: 0x0040, + 0x1712: 0x898d, 0x1713: 0x89ad, 0x1714: 0x88ad, 0x1715: 0x896d, 0x1716: 0x892d, 0x1717: 0x896d, + 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x89cd, 0x171b: 0x89ed, 0x171c: 0x89cd, 0x171d: 0x0040, + 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb591, 0x1721: 0xb5a9, 0x1722: 0xb5c1, 0x1723: 0x8a0e, + 0x1724: 0xb5d9, 0x1725: 0xb5f1, 0x1726: 0x8a2d, 0x1727: 0x0040, 0x1728: 0x8a4d, 0x1729: 0x8a6d, + 0x172a: 0x8a8d, 0x172b: 0x8a6d, 0x172c: 0x8aad, 0x172d: 0x8acd, 0x172e: 0x8aed, 0x172f: 0x0040, + 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, + 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340, + 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, + // Block 0x5d, offset 0x1740 + 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08, + 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808, + 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08, + 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908, + 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08, + 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808, + 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040, + 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18, + 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818, + 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, + 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, + // Block 0x5e, offset 0x1780 + 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08, + 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08, + 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08, + 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040, + 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040, + 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040, + 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18, + 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818, + 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040, + 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, + 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008, + 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008, + 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040, + 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008, + 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008, + 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008, + 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040, + 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008, + 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008, + 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x3308, + 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008, + // Block 0x60, offset 0x1800 + 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040, + 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008, + 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040, + 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008, + 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008, + 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008, + 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308, + 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040, + 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040, + 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040, + 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040, + // Block 0x61, offset 0x1840 + 0x1840: 0x0008, 0x1841: 0x0008, 0x1842: 0x0008, 0x1843: 0x0008, 0x1844: 0x0008, 0x1845: 0x0008, + 0x1846: 0x0008, 0x1847: 0x0040, 0x1848: 0x0040, 0x1849: 0x0008, 0x184a: 0x0040, 0x184b: 0x0040, + 0x184c: 0x0008, 0x184d: 0x0008, 0x184e: 0x0008, 0x184f: 0x0008, 0x1850: 0x0008, 0x1851: 0x0008, + 0x1852: 0x0008, 0x1853: 0x0008, 0x1854: 0x0040, 0x1855: 0x0008, 0x1856: 0x0008, 0x1857: 0x0040, + 0x1858: 0x0008, 0x1859: 0x0008, 0x185a: 0x0008, 0x185b: 0x0008, 0x185c: 0x0008, 0x185d: 0x0008, + 0x185e: 0x0008, 0x185f: 0x0008, 0x1860: 0x0008, 0x1861: 0x0008, 0x1862: 0x0008, 0x1863: 0x0008, + 0x1864: 0x0008, 0x1865: 0x0008, 0x1866: 0x0008, 0x1867: 0x0008, 0x1868: 0x0008, 0x1869: 0x0008, + 0x186a: 0x0008, 0x186b: 0x0008, 0x186c: 0x0008, 0x186d: 0x0008, 0x186e: 0x0008, 0x186f: 0x0008, + 0x1870: 0x3008, 0x1871: 0x3008, 0x1872: 0x3008, 0x1873: 0x3008, 0x1874: 0x3008, 0x1875: 0x3008, + 0x1876: 0x0040, 0x1877: 0x3008, 0x1878: 0x3008, 0x1879: 0x0040, 0x187a: 0x0040, 0x187b: 0x3308, + 0x187c: 0x3308, 0x187d: 0x3808, 0x187e: 0x3b08, 0x187f: 0x0008, + // Block 0x62, offset 0x1880 + 0x1880: 0x0039, 0x1881: 0x0ee9, 0x1882: 0x1159, 0x1883: 0x0ef9, 0x1884: 0x0f09, 0x1885: 0x1199, + 0x1886: 0x0f31, 0x1887: 0x0249, 0x1888: 0x0f41, 0x1889: 0x0259, 0x188a: 0x0f51, 0x188b: 0x0359, + 0x188c: 0x0f61, 0x188d: 0x0f71, 0x188e: 0x00d9, 0x188f: 0x0f99, 0x1890: 0x2039, 0x1891: 0x0269, + 0x1892: 0x01d9, 0x1893: 0x0fa9, 0x1894: 0x0fb9, 0x1895: 0x1089, 0x1896: 0x0279, 0x1897: 0x0369, + 0x1898: 0x0289, 0x1899: 0x13d1, 0x189a: 0x0039, 0x189b: 0x0ee9, 0x189c: 0x1159, 0x189d: 0x0ef9, + 0x189e: 0x0f09, 0x189f: 0x1199, 0x18a0: 0x0f31, 0x18a1: 0x0249, 0x18a2: 0x0f41, 0x18a3: 0x0259, + 0x18a4: 0x0f51, 0x18a5: 0x0359, 0x18a6: 0x0f61, 0x18a7: 0x0f71, 0x18a8: 0x00d9, 0x18a9: 0x0f99, + 0x18aa: 0x2039, 0x18ab: 0x0269, 0x18ac: 0x01d9, 0x18ad: 0x0fa9, 0x18ae: 0x0fb9, 0x18af: 0x1089, + 0x18b0: 0x0279, 0x18b1: 0x0369, 0x18b2: 0x0289, 0x18b3: 0x13d1, 0x18b4: 0x0039, 0x18b5: 0x0ee9, + 0x18b6: 0x1159, 0x18b7: 0x0ef9, 0x18b8: 0x0f09, 0x18b9: 0x1199, 0x18ba: 0x0f31, 0x18bb: 0x0249, + 0x18bc: 0x0f41, 0x18bd: 0x0259, 0x18be: 0x0f51, 0x18bf: 0x0359, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x0f61, 0x18c1: 0x0f71, 0x18c2: 0x00d9, 0x18c3: 0x0f99, 0x18c4: 0x2039, 0x18c5: 0x0269, + 0x18c6: 0x01d9, 0x18c7: 0x0fa9, 0x18c8: 0x0fb9, 0x18c9: 0x1089, 0x18ca: 0x0279, 0x18cb: 0x0369, + 0x18cc: 0x0289, 0x18cd: 0x13d1, 0x18ce: 0x0039, 0x18cf: 0x0ee9, 0x18d0: 0x1159, 0x18d1: 0x0ef9, + 0x18d2: 0x0f09, 0x18d3: 0x1199, 0x18d4: 0x0f31, 0x18d5: 0x0040, 0x18d6: 0x0f41, 0x18d7: 0x0259, + 0x18d8: 0x0f51, 0x18d9: 0x0359, 0x18da: 0x0f61, 0x18db: 0x0f71, 0x18dc: 0x00d9, 0x18dd: 0x0f99, + 0x18de: 0x2039, 0x18df: 0x0269, 0x18e0: 0x01d9, 0x18e1: 0x0fa9, 0x18e2: 0x0fb9, 0x18e3: 0x1089, + 0x18e4: 0x0279, 0x18e5: 0x0369, 0x18e6: 0x0289, 0x18e7: 0x13d1, 0x18e8: 0x0039, 0x18e9: 0x0ee9, + 0x18ea: 0x1159, 0x18eb: 0x0ef9, 0x18ec: 0x0f09, 0x18ed: 0x1199, 0x18ee: 0x0f31, 0x18ef: 0x0249, + 0x18f0: 0x0f41, 0x18f1: 0x0259, 0x18f2: 0x0f51, 0x18f3: 0x0359, 0x18f4: 0x0f61, 0x18f5: 0x0f71, + 0x18f6: 0x00d9, 0x18f7: 0x0f99, 0x18f8: 0x2039, 0x18f9: 0x0269, 0x18fa: 0x01d9, 0x18fb: 0x0fa9, + 0x18fc: 0x0fb9, 0x18fd: 0x1089, 0x18fe: 0x0279, 0x18ff: 0x0369, + // Block 0x64, offset 0x1900 + 0x1900: 0x0289, 0x1901: 0x13d1, 0x1902: 0x0039, 0x1903: 0x0ee9, 0x1904: 0x1159, 0x1905: 0x0ef9, + 0x1906: 0x0f09, 0x1907: 0x1199, 0x1908: 0x0f31, 0x1909: 0x0249, 0x190a: 0x0f41, 0x190b: 0x0259, + 0x190c: 0x0f51, 0x190d: 0x0359, 0x190e: 0x0f61, 0x190f: 0x0f71, 0x1910: 0x00d9, 0x1911: 0x0f99, + 0x1912: 0x2039, 0x1913: 0x0269, 0x1914: 0x01d9, 0x1915: 0x0fa9, 0x1916: 0x0fb9, 0x1917: 0x1089, + 0x1918: 0x0279, 0x1919: 0x0369, 0x191a: 0x0289, 0x191b: 0x13d1, 0x191c: 0x0039, 0x191d: 0x0040, + 0x191e: 0x1159, 0x191f: 0x0ef9, 0x1920: 0x0040, 0x1921: 0x0040, 0x1922: 0x0f31, 0x1923: 0x0040, + 0x1924: 0x0040, 0x1925: 0x0259, 0x1926: 0x0f51, 0x1927: 0x0040, 0x1928: 0x0040, 0x1929: 0x0f71, + 0x192a: 0x00d9, 0x192b: 0x0f99, 0x192c: 0x2039, 0x192d: 0x0040, 0x192e: 0x01d9, 0x192f: 0x0fa9, + 0x1930: 0x0fb9, 0x1931: 0x1089, 0x1932: 0x0279, 0x1933: 0x0369, 0x1934: 0x0289, 0x1935: 0x13d1, + 0x1936: 0x0039, 0x1937: 0x0ee9, 0x1938: 0x1159, 0x1939: 0x0ef9, 0x193a: 0x0040, 0x193b: 0x1199, + 0x193c: 0x0040, 0x193d: 0x0249, 0x193e: 0x0f41, 0x193f: 0x0259, + // Block 0x65, offset 0x1940 + 0x1940: 0x0f51, 0x1941: 0x0359, 0x1942: 0x0f61, 0x1943: 0x0f71, 0x1944: 0x0040, 0x1945: 0x0f99, + 0x1946: 0x2039, 0x1947: 0x0269, 0x1948: 0x01d9, 0x1949: 0x0fa9, 0x194a: 0x0fb9, 0x194b: 0x1089, + 0x194c: 0x0279, 0x194d: 0x0369, 0x194e: 0x0289, 0x194f: 0x13d1, 0x1950: 0x0039, 0x1951: 0x0ee9, + 0x1952: 0x1159, 0x1953: 0x0ef9, 0x1954: 0x0f09, 0x1955: 0x1199, 0x1956: 0x0f31, 0x1957: 0x0249, + 0x1958: 0x0f41, 0x1959: 0x0259, 0x195a: 0x0f51, 0x195b: 0x0359, 0x195c: 0x0f61, 0x195d: 0x0f71, + 0x195e: 0x00d9, 0x195f: 0x0f99, 0x1960: 0x2039, 0x1961: 0x0269, 0x1962: 0x01d9, 0x1963: 0x0fa9, + 0x1964: 0x0fb9, 0x1965: 0x1089, 0x1966: 0x0279, 0x1967: 0x0369, 0x1968: 0x0289, 0x1969: 0x13d1, + 0x196a: 0x0039, 0x196b: 0x0ee9, 0x196c: 0x1159, 0x196d: 0x0ef9, 0x196e: 0x0f09, 0x196f: 0x1199, + 0x1970: 0x0f31, 0x1971: 0x0249, 0x1972: 0x0f41, 0x1973: 0x0259, 0x1974: 0x0f51, 0x1975: 0x0359, + 0x1976: 0x0f61, 0x1977: 0x0f71, 0x1978: 0x00d9, 0x1979: 0x0f99, 0x197a: 0x2039, 0x197b: 0x0269, + 0x197c: 0x01d9, 0x197d: 0x0fa9, 0x197e: 0x0fb9, 0x197f: 0x1089, + // Block 0x66, offset 0x1980 + 0x1980: 0x0279, 0x1981: 0x0369, 0x1982: 0x0289, 0x1983: 0x13d1, 0x1984: 0x0039, 0x1985: 0x0ee9, + 0x1986: 0x0040, 0x1987: 0x0ef9, 0x1988: 0x0f09, 0x1989: 0x1199, 0x198a: 0x0f31, 0x198b: 0x0040, + 0x198c: 0x0040, 0x198d: 0x0259, 0x198e: 0x0f51, 0x198f: 0x0359, 0x1990: 0x0f61, 0x1991: 0x0f71, + 0x1992: 0x00d9, 0x1993: 0x0f99, 0x1994: 0x2039, 0x1995: 0x0040, 0x1996: 0x01d9, 0x1997: 0x0fa9, + 0x1998: 0x0fb9, 0x1999: 0x1089, 0x199a: 0x0279, 0x199b: 0x0369, 0x199c: 0x0289, 0x199d: 0x0040, + 0x199e: 0x0039, 0x199f: 0x0ee9, 0x19a0: 0x1159, 0x19a1: 0x0ef9, 0x19a2: 0x0f09, 0x19a3: 0x1199, + 0x19a4: 0x0f31, 0x19a5: 0x0249, 0x19a6: 0x0f41, 0x19a7: 0x0259, 0x19a8: 0x0f51, 0x19a9: 0x0359, + 0x19aa: 0x0f61, 0x19ab: 0x0f71, 0x19ac: 0x00d9, 0x19ad: 0x0f99, 0x19ae: 0x2039, 0x19af: 0x0269, + 0x19b0: 0x01d9, 0x19b1: 0x0fa9, 0x19b2: 0x0fb9, 0x19b3: 0x1089, 0x19b4: 0x0279, 0x19b5: 0x0369, + 0x19b6: 0x0289, 0x19b7: 0x13d1, 0x19b8: 0x0039, 0x19b9: 0x0ee9, 0x19ba: 0x0040, 0x19bb: 0x0ef9, + 0x19bc: 0x0f09, 0x19bd: 0x1199, 0x19be: 0x0f31, 0x19bf: 0x0040, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x0f41, 0x19c1: 0x0259, 0x19c2: 0x0f51, 0x19c3: 0x0359, 0x19c4: 0x0f61, 0x19c5: 0x0040, + 0x19c6: 0x00d9, 0x19c7: 0x0040, 0x19c8: 0x0040, 0x19c9: 0x0040, 0x19ca: 0x01d9, 0x19cb: 0x0fa9, + 0x19cc: 0x0fb9, 0x19cd: 0x1089, 0x19ce: 0x0279, 0x19cf: 0x0369, 0x19d0: 0x0289, 0x19d1: 0x0040, + 0x19d2: 0x0039, 0x19d3: 0x0ee9, 0x19d4: 0x1159, 0x19d5: 0x0ef9, 0x19d6: 0x0f09, 0x19d7: 0x1199, + 0x19d8: 0x0f31, 0x19d9: 0x0249, 0x19da: 0x0f41, 0x19db: 0x0259, 0x19dc: 0x0f51, 0x19dd: 0x0359, + 0x19de: 0x0f61, 0x19df: 0x0f71, 0x19e0: 0x00d9, 0x19e1: 0x0f99, 0x19e2: 0x2039, 0x19e3: 0x0269, + 0x19e4: 0x01d9, 0x19e5: 0x0fa9, 0x19e6: 0x0fb9, 0x19e7: 0x1089, 0x19e8: 0x0279, 0x19e9: 0x0369, + 0x19ea: 0x0289, 0x19eb: 0x13d1, 0x19ec: 0x0039, 0x19ed: 0x0ee9, 0x19ee: 0x1159, 0x19ef: 0x0ef9, + 0x19f0: 0x0f09, 0x19f1: 0x1199, 0x19f2: 0x0f31, 0x19f3: 0x0249, 0x19f4: 0x0f41, 0x19f5: 0x0259, + 0x19f6: 0x0f51, 0x19f7: 0x0359, 0x19f8: 0x0f61, 0x19f9: 0x0f71, 0x19fa: 0x00d9, 0x19fb: 0x0f99, + 0x19fc: 0x2039, 0x19fd: 0x0269, 0x19fe: 0x01d9, 0x19ff: 0x0fa9, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x0fb9, 0x1a01: 0x1089, 0x1a02: 0x0279, 0x1a03: 0x0369, 0x1a04: 0x0289, 0x1a05: 0x13d1, + 0x1a06: 0x0039, 0x1a07: 0x0ee9, 0x1a08: 0x1159, 0x1a09: 0x0ef9, 0x1a0a: 0x0f09, 0x1a0b: 0x1199, + 0x1a0c: 0x0f31, 0x1a0d: 0x0249, 0x1a0e: 0x0f41, 0x1a0f: 0x0259, 0x1a10: 0x0f51, 0x1a11: 0x0359, + 0x1a12: 0x0f61, 0x1a13: 0x0f71, 0x1a14: 0x00d9, 0x1a15: 0x0f99, 0x1a16: 0x2039, 0x1a17: 0x0269, + 0x1a18: 0x01d9, 0x1a19: 0x0fa9, 0x1a1a: 0x0fb9, 0x1a1b: 0x1089, 0x1a1c: 0x0279, 0x1a1d: 0x0369, + 0x1a1e: 0x0289, 0x1a1f: 0x13d1, 0x1a20: 0x0039, 0x1a21: 0x0ee9, 0x1a22: 0x1159, 0x1a23: 0x0ef9, + 0x1a24: 0x0f09, 0x1a25: 0x1199, 0x1a26: 0x0f31, 0x1a27: 0x0249, 0x1a28: 0x0f41, 0x1a29: 0x0259, + 0x1a2a: 0x0f51, 0x1a2b: 0x0359, 0x1a2c: 0x0f61, 0x1a2d: 0x0f71, 0x1a2e: 0x00d9, 0x1a2f: 0x0f99, + 0x1a30: 0x2039, 0x1a31: 0x0269, 0x1a32: 0x01d9, 0x1a33: 0x0fa9, 0x1a34: 0x0fb9, 0x1a35: 0x1089, + 0x1a36: 0x0279, 0x1a37: 0x0369, 0x1a38: 0x0289, 0x1a39: 0x13d1, 0x1a3a: 0x0039, 0x1a3b: 0x0ee9, + 0x1a3c: 0x1159, 0x1a3d: 0x0ef9, 0x1a3e: 0x0f09, 0x1a3f: 0x1199, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x0f31, 0x1a41: 0x0249, 0x1a42: 0x0f41, 0x1a43: 0x0259, 0x1a44: 0x0f51, 0x1a45: 0x0359, + 0x1a46: 0x0f61, 0x1a47: 0x0f71, 0x1a48: 0x00d9, 0x1a49: 0x0f99, 0x1a4a: 0x2039, 0x1a4b: 0x0269, + 0x1a4c: 0x01d9, 0x1a4d: 0x0fa9, 0x1a4e: 0x0fb9, 0x1a4f: 0x1089, 0x1a50: 0x0279, 0x1a51: 0x0369, + 0x1a52: 0x0289, 0x1a53: 0x13d1, 0x1a54: 0x0039, 0x1a55: 0x0ee9, 0x1a56: 0x1159, 0x1a57: 0x0ef9, + 0x1a58: 0x0f09, 0x1a59: 0x1199, 0x1a5a: 0x0f31, 0x1a5b: 0x0249, 0x1a5c: 0x0f41, 0x1a5d: 0x0259, + 0x1a5e: 0x0f51, 0x1a5f: 0x0359, 0x1a60: 0x0f61, 0x1a61: 0x0f71, 0x1a62: 0x00d9, 0x1a63: 0x0f99, + 0x1a64: 0x2039, 0x1a65: 0x0269, 0x1a66: 0x01d9, 0x1a67: 0x0fa9, 0x1a68: 0x0fb9, 0x1a69: 0x1089, + 0x1a6a: 0x0279, 0x1a6b: 0x0369, 0x1a6c: 0x0289, 0x1a6d: 0x13d1, 0x1a6e: 0x0039, 0x1a6f: 0x0ee9, + 0x1a70: 0x1159, 0x1a71: 0x0ef9, 0x1a72: 0x0f09, 0x1a73: 0x1199, 0x1a74: 0x0f31, 0x1a75: 0x0249, + 0x1a76: 0x0f41, 0x1a77: 0x0259, 0x1a78: 0x0f51, 0x1a79: 0x0359, 0x1a7a: 0x0f61, 0x1a7b: 0x0f71, + 0x1a7c: 0x00d9, 0x1a7d: 0x0f99, 0x1a7e: 0x2039, 0x1a7f: 0x0269, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x01d9, 0x1a81: 0x0fa9, 0x1a82: 0x0fb9, 0x1a83: 0x1089, 0x1a84: 0x0279, 0x1a85: 0x0369, + 0x1a86: 0x0289, 0x1a87: 0x13d1, 0x1a88: 0x0039, 0x1a89: 0x0ee9, 0x1a8a: 0x1159, 0x1a8b: 0x0ef9, + 0x1a8c: 0x0f09, 0x1a8d: 0x1199, 0x1a8e: 0x0f31, 0x1a8f: 0x0249, 0x1a90: 0x0f41, 0x1a91: 0x0259, + 0x1a92: 0x0f51, 0x1a93: 0x0359, 0x1a94: 0x0f61, 0x1a95: 0x0f71, 0x1a96: 0x00d9, 0x1a97: 0x0f99, + 0x1a98: 0x2039, 0x1a99: 0x0269, 0x1a9a: 0x01d9, 0x1a9b: 0x0fa9, 0x1a9c: 0x0fb9, 0x1a9d: 0x1089, + 0x1a9e: 0x0279, 0x1a9f: 0x0369, 0x1aa0: 0x0289, 0x1aa1: 0x13d1, 0x1aa2: 0x0039, 0x1aa3: 0x0ee9, + 0x1aa4: 0x1159, 0x1aa5: 0x0ef9, 0x1aa6: 0x0f09, 0x1aa7: 0x1199, 0x1aa8: 0x0f31, 0x1aa9: 0x0249, + 0x1aaa: 0x0f41, 0x1aab: 0x0259, 0x1aac: 0x0f51, 0x1aad: 0x0359, 0x1aae: 0x0f61, 0x1aaf: 0x0f71, + 0x1ab0: 0x00d9, 0x1ab1: 0x0f99, 0x1ab2: 0x2039, 0x1ab3: 0x0269, 0x1ab4: 0x01d9, 0x1ab5: 0x0fa9, + 0x1ab6: 0x0fb9, 0x1ab7: 0x1089, 0x1ab8: 0x0279, 0x1ab9: 0x0369, 0x1aba: 0x0289, 0x1abb: 0x13d1, + 0x1abc: 0x0039, 0x1abd: 0x0ee9, 0x1abe: 0x1159, 0x1abf: 0x0ef9, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x0f09, 0x1ac1: 0x1199, 0x1ac2: 0x0f31, 0x1ac3: 0x0249, 0x1ac4: 0x0f41, 0x1ac5: 0x0259, + 0x1ac6: 0x0f51, 0x1ac7: 0x0359, 0x1ac8: 0x0f61, 0x1ac9: 0x0f71, 0x1aca: 0x00d9, 0x1acb: 0x0f99, + 0x1acc: 0x2039, 0x1acd: 0x0269, 0x1ace: 0x01d9, 0x1acf: 0x0fa9, 0x1ad0: 0x0fb9, 0x1ad1: 0x1089, + 0x1ad2: 0x0279, 0x1ad3: 0x0369, 0x1ad4: 0x0289, 0x1ad5: 0x13d1, 0x1ad6: 0x0039, 0x1ad7: 0x0ee9, + 0x1ad8: 0x1159, 0x1ad9: 0x0ef9, 0x1ada: 0x0f09, 0x1adb: 0x1199, 0x1adc: 0x0f31, 0x1add: 0x0249, + 0x1ade: 0x0f41, 0x1adf: 0x0259, 0x1ae0: 0x0f51, 0x1ae1: 0x0359, 0x1ae2: 0x0f61, 0x1ae3: 0x0f71, + 0x1ae4: 0x00d9, 0x1ae5: 0x0f99, 0x1ae6: 0x2039, 0x1ae7: 0x0269, 0x1ae8: 0x01d9, 0x1ae9: 0x0fa9, + 0x1aea: 0x0fb9, 0x1aeb: 0x1089, 0x1aec: 0x0279, 0x1aed: 0x0369, 0x1aee: 0x0289, 0x1aef: 0x13d1, + 0x1af0: 0x0039, 0x1af1: 0x0ee9, 0x1af2: 0x1159, 0x1af3: 0x0ef9, 0x1af4: 0x0f09, 0x1af5: 0x1199, + 0x1af6: 0x0f31, 0x1af7: 0x0249, 0x1af8: 0x0f41, 0x1af9: 0x0259, 0x1afa: 0x0f51, 0x1afb: 0x0359, + 0x1afc: 0x0f61, 0x1afd: 0x0f71, 0x1afe: 0x00d9, 0x1aff: 0x0f99, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x2039, 0x1b01: 0x0269, 0x1b02: 0x01d9, 0x1b03: 0x0fa9, 0x1b04: 0x0fb9, 0x1b05: 0x1089, + 0x1b06: 0x0279, 0x1b07: 0x0369, 0x1b08: 0x0289, 0x1b09: 0x13d1, 0x1b0a: 0x0039, 0x1b0b: 0x0ee9, + 0x1b0c: 0x1159, 0x1b0d: 0x0ef9, 0x1b0e: 0x0f09, 0x1b0f: 0x1199, 0x1b10: 0x0f31, 0x1b11: 0x0249, + 0x1b12: 0x0f41, 0x1b13: 0x0259, 0x1b14: 0x0f51, 0x1b15: 0x0359, 0x1b16: 0x0f61, 0x1b17: 0x0f71, + 0x1b18: 0x00d9, 0x1b19: 0x0f99, 0x1b1a: 0x2039, 0x1b1b: 0x0269, 0x1b1c: 0x01d9, 0x1b1d: 0x0fa9, + 0x1b1e: 0x0fb9, 0x1b1f: 0x1089, 0x1b20: 0x0279, 0x1b21: 0x0369, 0x1b22: 0x0289, 0x1b23: 0x13d1, + 0x1b24: 0xbad1, 0x1b25: 0xbae9, 0x1b26: 0x0040, 0x1b27: 0x0040, 0x1b28: 0xbb01, 0x1b29: 0x1099, + 0x1b2a: 0x10b1, 0x1b2b: 0x10c9, 0x1b2c: 0xbb19, 0x1b2d: 0xbb31, 0x1b2e: 0xbb49, 0x1b2f: 0x1429, + 0x1b30: 0x1a31, 0x1b31: 0xbb61, 0x1b32: 0xbb79, 0x1b33: 0xbb91, 0x1b34: 0xbba9, 0x1b35: 0xbbc1, + 0x1b36: 0xbbd9, 0x1b37: 0x2109, 0x1b38: 0x1111, 0x1b39: 0x1429, 0x1b3a: 0xbbf1, 0x1b3b: 0xbc09, + 0x1b3c: 0xbc21, 0x1b3d: 0x10e1, 0x1b3e: 0x10f9, 0x1b3f: 0xbc39, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0x2079, 0x1b41: 0xbc51, 0x1b42: 0xbb01, 0x1b43: 0x1099, 0x1b44: 0x10b1, 0x1b45: 0x10c9, + 0x1b46: 0xbb19, 0x1b47: 0xbb31, 0x1b48: 0xbb49, 0x1b49: 0x1429, 0x1b4a: 0x1a31, 0x1b4b: 0xbb61, + 0x1b4c: 0xbb79, 0x1b4d: 0xbb91, 0x1b4e: 0xbba9, 0x1b4f: 0xbbc1, 0x1b50: 0xbbd9, 0x1b51: 0x2109, + 0x1b52: 0x1111, 0x1b53: 0xbbf1, 0x1b54: 0xbbf1, 0x1b55: 0xbc09, 0x1b56: 0xbc21, 0x1b57: 0x10e1, + 0x1b58: 0x10f9, 0x1b59: 0xbc39, 0x1b5a: 0x2079, 0x1b5b: 0xbc71, 0x1b5c: 0xbb19, 0x1b5d: 0x1429, + 0x1b5e: 0xbb61, 0x1b5f: 0x10e1, 0x1b60: 0x1111, 0x1b61: 0x2109, 0x1b62: 0xbb01, 0x1b63: 0x1099, + 0x1b64: 0x10b1, 0x1b65: 0x10c9, 0x1b66: 0xbb19, 0x1b67: 0xbb31, 0x1b68: 0xbb49, 0x1b69: 0x1429, + 0x1b6a: 0x1a31, 0x1b6b: 0xbb61, 0x1b6c: 0xbb79, 0x1b6d: 0xbb91, 0x1b6e: 0xbba9, 0x1b6f: 0xbbc1, + 0x1b70: 0xbbd9, 0x1b71: 0x2109, 0x1b72: 0x1111, 0x1b73: 0x1429, 0x1b74: 0xbbf1, 0x1b75: 0xbc09, + 0x1b76: 0xbc21, 0x1b77: 0x10e1, 0x1b78: 0x10f9, 0x1b79: 0xbc39, 0x1b7a: 0x2079, 0x1b7b: 0xbc51, + 0x1b7c: 0xbb01, 0x1b7d: 0x1099, 0x1b7e: 0x10b1, 0x1b7f: 0x10c9, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0xbb19, 0x1b81: 0xbb31, 0x1b82: 0xbb49, 0x1b83: 0x1429, 0x1b84: 0x1a31, 0x1b85: 0xbb61, + 0x1b86: 0xbb79, 0x1b87: 0xbb91, 0x1b88: 0xbba9, 0x1b89: 0xbbc1, 0x1b8a: 0xbbd9, 0x1b8b: 0x2109, + 0x1b8c: 0x1111, 0x1b8d: 0xbbf1, 0x1b8e: 0xbbf1, 0x1b8f: 0xbc09, 0x1b90: 0xbc21, 0x1b91: 0x10e1, + 0x1b92: 0x10f9, 0x1b93: 0xbc39, 0x1b94: 0x2079, 0x1b95: 0xbc71, 0x1b96: 0xbb19, 0x1b97: 0x1429, + 0x1b98: 0xbb61, 0x1b99: 0x10e1, 0x1b9a: 0x1111, 0x1b9b: 0x2109, 0x1b9c: 0xbb01, 0x1b9d: 0x1099, + 0x1b9e: 0x10b1, 0x1b9f: 0x10c9, 0x1ba0: 0xbb19, 0x1ba1: 0xbb31, 0x1ba2: 0xbb49, 0x1ba3: 0x1429, + 0x1ba4: 0x1a31, 0x1ba5: 0xbb61, 0x1ba6: 0xbb79, 0x1ba7: 0xbb91, 0x1ba8: 0xbba9, 0x1ba9: 0xbbc1, + 0x1baa: 0xbbd9, 0x1bab: 0x2109, 0x1bac: 0x1111, 0x1bad: 0x1429, 0x1bae: 0xbbf1, 0x1baf: 0xbc09, + 0x1bb0: 0xbc21, 0x1bb1: 0x10e1, 0x1bb2: 0x10f9, 0x1bb3: 0xbc39, 0x1bb4: 0x2079, 0x1bb5: 0xbc51, + 0x1bb6: 0xbb01, 0x1bb7: 0x1099, 0x1bb8: 0x10b1, 0x1bb9: 0x10c9, 0x1bba: 0xbb19, 0x1bbb: 0xbb31, + 0x1bbc: 0xbb49, 0x1bbd: 0x1429, 0x1bbe: 0x1a31, 0x1bbf: 0xbb61, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0xbb79, 0x1bc1: 0xbb91, 0x1bc2: 0xbba9, 0x1bc3: 0xbbc1, 0x1bc4: 0xbbd9, 0x1bc5: 0x2109, + 0x1bc6: 0x1111, 0x1bc7: 0xbbf1, 0x1bc8: 0xbbf1, 0x1bc9: 0xbc09, 0x1bca: 0xbc21, 0x1bcb: 0x10e1, + 0x1bcc: 0x10f9, 0x1bcd: 0xbc39, 0x1bce: 0x2079, 0x1bcf: 0xbc71, 0x1bd0: 0xbb19, 0x1bd1: 0x1429, + 0x1bd2: 0xbb61, 0x1bd3: 0x10e1, 0x1bd4: 0x1111, 0x1bd5: 0x2109, 0x1bd6: 0xbb01, 0x1bd7: 0x1099, + 0x1bd8: 0x10b1, 0x1bd9: 0x10c9, 0x1bda: 0xbb19, 0x1bdb: 0xbb31, 0x1bdc: 0xbb49, 0x1bdd: 0x1429, + 0x1bde: 0x1a31, 0x1bdf: 0xbb61, 0x1be0: 0xbb79, 0x1be1: 0xbb91, 0x1be2: 0xbba9, 0x1be3: 0xbbc1, + 0x1be4: 0xbbd9, 0x1be5: 0x2109, 0x1be6: 0x1111, 0x1be7: 0x1429, 0x1be8: 0xbbf1, 0x1be9: 0xbc09, + 0x1bea: 0xbc21, 0x1beb: 0x10e1, 0x1bec: 0x10f9, 0x1bed: 0xbc39, 0x1bee: 0x2079, 0x1bef: 0xbc51, + 0x1bf0: 0xbb01, 0x1bf1: 0x1099, 0x1bf2: 0x10b1, 0x1bf3: 0x10c9, 0x1bf4: 0xbb19, 0x1bf5: 0xbb31, + 0x1bf6: 0xbb49, 0x1bf7: 0x1429, 0x1bf8: 0x1a31, 0x1bf9: 0xbb61, 0x1bfa: 0xbb79, 0x1bfb: 0xbb91, + 0x1bfc: 0xbba9, 0x1bfd: 0xbbc1, 0x1bfe: 0xbbd9, 0x1bff: 0x2109, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x1111, 0x1c01: 0xbbf1, 0x1c02: 0xbbf1, 0x1c03: 0xbc09, 0x1c04: 0xbc21, 0x1c05: 0x10e1, + 0x1c06: 0x10f9, 0x1c07: 0xbc39, 0x1c08: 0x2079, 0x1c09: 0xbc71, 0x1c0a: 0xbb19, 0x1c0b: 0x1429, + 0x1c0c: 0xbb61, 0x1c0d: 0x10e1, 0x1c0e: 0x1111, 0x1c0f: 0x2109, 0x1c10: 0xbb01, 0x1c11: 0x1099, + 0x1c12: 0x10b1, 0x1c13: 0x10c9, 0x1c14: 0xbb19, 0x1c15: 0xbb31, 0x1c16: 0xbb49, 0x1c17: 0x1429, + 0x1c18: 0x1a31, 0x1c19: 0xbb61, 0x1c1a: 0xbb79, 0x1c1b: 0xbb91, 0x1c1c: 0xbba9, 0x1c1d: 0xbbc1, + 0x1c1e: 0xbbd9, 0x1c1f: 0x2109, 0x1c20: 0x1111, 0x1c21: 0x1429, 0x1c22: 0xbbf1, 0x1c23: 0xbc09, + 0x1c24: 0xbc21, 0x1c25: 0x10e1, 0x1c26: 0x10f9, 0x1c27: 0xbc39, 0x1c28: 0x2079, 0x1c29: 0xbc51, + 0x1c2a: 0xbb01, 0x1c2b: 0x1099, 0x1c2c: 0x10b1, 0x1c2d: 0x10c9, 0x1c2e: 0xbb19, 0x1c2f: 0xbb31, + 0x1c30: 0xbb49, 0x1c31: 0x1429, 0x1c32: 0x1a31, 0x1c33: 0xbb61, 0x1c34: 0xbb79, 0x1c35: 0xbb91, + 0x1c36: 0xbba9, 0x1c37: 0xbbc1, 0x1c38: 0xbbd9, 0x1c39: 0x2109, 0x1c3a: 0x1111, 0x1c3b: 0xbbf1, + 0x1c3c: 0xbbf1, 0x1c3d: 0xbc09, 0x1c3e: 0xbc21, 0x1c3f: 0x10e1, + // Block 0x71, offset 0x1c40 + 0x1c40: 0x10f9, 0x1c41: 0xbc39, 0x1c42: 0x2079, 0x1c43: 0xbc71, 0x1c44: 0xbb19, 0x1c45: 0x1429, + 0x1c46: 0xbb61, 0x1c47: 0x10e1, 0x1c48: 0x1111, 0x1c49: 0x2109, 0x1c4a: 0xbc91, 0x1c4b: 0xbc91, + 0x1c4c: 0x0040, 0x1c4d: 0x0040, 0x1c4e: 0x1f41, 0x1c4f: 0x00c9, 0x1c50: 0x0069, 0x1c51: 0x0079, + 0x1c52: 0x1f51, 0x1c53: 0x1f61, 0x1c54: 0x1f71, 0x1c55: 0x1f81, 0x1c56: 0x1f91, 0x1c57: 0x1fa1, + 0x1c58: 0x1f41, 0x1c59: 0x00c9, 0x1c5a: 0x0069, 0x1c5b: 0x0079, 0x1c5c: 0x1f51, 0x1c5d: 0x1f61, + 0x1c5e: 0x1f71, 0x1c5f: 0x1f81, 0x1c60: 0x1f91, 0x1c61: 0x1fa1, 0x1c62: 0x1f41, 0x1c63: 0x00c9, + 0x1c64: 0x0069, 0x1c65: 0x0079, 0x1c66: 0x1f51, 0x1c67: 0x1f61, 0x1c68: 0x1f71, 0x1c69: 0x1f81, + 0x1c6a: 0x1f91, 0x1c6b: 0x1fa1, 0x1c6c: 0x1f41, 0x1c6d: 0x00c9, 0x1c6e: 0x0069, 0x1c6f: 0x0079, + 0x1c70: 0x1f51, 0x1c71: 0x1f61, 0x1c72: 0x1f71, 0x1c73: 0x1f81, 0x1c74: 0x1f91, 0x1c75: 0x1fa1, + 0x1c76: 0x1f41, 0x1c77: 0x00c9, 0x1c78: 0x0069, 0x1c79: 0x0079, 0x1c7a: 0x1f51, 0x1c7b: 0x1f61, + 0x1c7c: 0x1f71, 0x1c7d: 0x1f81, 0x1c7e: 0x1f91, 0x1c7f: 0x1fa1, + // Block 0x72, offset 0x1c80 + 0x1c80: 0xe115, 0x1c81: 0xe115, 0x1c82: 0xe135, 0x1c83: 0xe135, 0x1c84: 0xe115, 0x1c85: 0xe115, + 0x1c86: 0xe175, 0x1c87: 0xe175, 0x1c88: 0xe115, 0x1c89: 0xe115, 0x1c8a: 0xe135, 0x1c8b: 0xe135, + 0x1c8c: 0xe115, 0x1c8d: 0xe115, 0x1c8e: 0xe1f5, 0x1c8f: 0xe1f5, 0x1c90: 0xe115, 0x1c91: 0xe115, + 0x1c92: 0xe135, 0x1c93: 0xe135, 0x1c94: 0xe115, 0x1c95: 0xe115, 0x1c96: 0xe175, 0x1c97: 0xe175, + 0x1c98: 0xe115, 0x1c99: 0xe115, 0x1c9a: 0xe135, 0x1c9b: 0xe135, 0x1c9c: 0xe115, 0x1c9d: 0xe115, + 0x1c9e: 0x8b3d, 0x1c9f: 0x8b3d, 0x1ca0: 0x04b5, 0x1ca1: 0x04b5, 0x1ca2: 0x0a08, 0x1ca3: 0x0a08, + 0x1ca4: 0x0a08, 0x1ca5: 0x0a08, 0x1ca6: 0x0a08, 0x1ca7: 0x0a08, 0x1ca8: 0x0a08, 0x1ca9: 0x0a08, + 0x1caa: 0x0a08, 0x1cab: 0x0a08, 0x1cac: 0x0a08, 0x1cad: 0x0a08, 0x1cae: 0x0a08, 0x1caf: 0x0a08, + 0x1cb0: 0x0a08, 0x1cb1: 0x0a08, 0x1cb2: 0x0a08, 0x1cb3: 0x0a08, 0x1cb4: 0x0a08, 0x1cb5: 0x0a08, + 0x1cb6: 0x0a08, 0x1cb7: 0x0a08, 0x1cb8: 0x0a08, 0x1cb9: 0x0a08, 0x1cba: 0x0a08, 0x1cbb: 0x0a08, + 0x1cbc: 0x0a08, 0x1cbd: 0x0a08, 0x1cbe: 0x0a08, 0x1cbf: 0x0a08, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0xb1d9, 0x1cc1: 0xb1f1, 0x1cc2: 0xb251, 0x1cc3: 0xb299, 0x1cc4: 0x0040, 0x1cc5: 0xb461, + 0x1cc6: 0xb2e1, 0x1cc7: 0xb269, 0x1cc8: 0xb359, 0x1cc9: 0xb479, 0x1cca: 0xb3e9, 0x1ccb: 0xb401, + 0x1ccc: 0xb419, 0x1ccd: 0xb431, 0x1cce: 0xb2f9, 0x1ccf: 0xb389, 0x1cd0: 0xb3b9, 0x1cd1: 0xb329, + 0x1cd2: 0xb3d1, 0x1cd3: 0xb2c9, 0x1cd4: 0xb311, 0x1cd5: 0xb221, 0x1cd6: 0xb239, 0x1cd7: 0xb281, + 0x1cd8: 0xb2b1, 0x1cd9: 0xb341, 0x1cda: 0xb371, 0x1cdb: 0xb3a1, 0x1cdc: 0xbca9, 0x1cdd: 0x7999, + 0x1cde: 0xbcc1, 0x1cdf: 0xbcd9, 0x1ce0: 0x0040, 0x1ce1: 0xb1f1, 0x1ce2: 0xb251, 0x1ce3: 0x0040, + 0x1ce4: 0xb449, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb269, 0x1ce8: 0x0040, 0x1ce9: 0xb479, + 0x1cea: 0xb3e9, 0x1ceb: 0xb401, 0x1cec: 0xb419, 0x1ced: 0xb431, 0x1cee: 0xb2f9, 0x1cef: 0xb389, + 0x1cf0: 0xb3b9, 0x1cf1: 0xb329, 0x1cf2: 0xb3d1, 0x1cf3: 0x0040, 0x1cf4: 0xb311, 0x1cf5: 0xb221, + 0x1cf6: 0xb239, 0x1cf7: 0xb281, 0x1cf8: 0x0040, 0x1cf9: 0xb341, 0x1cfa: 0x0040, 0x1cfb: 0xb3a1, + 0x1cfc: 0x0040, 0x1cfd: 0x0040, 0x1cfe: 0x0040, 0x1cff: 0x0040, + // Block 0x74, offset 0x1d00 + 0x1d00: 0x0040, 0x1d01: 0x0040, 0x1d02: 0xb251, 0x1d03: 0x0040, 0x1d04: 0x0040, 0x1d05: 0x0040, + 0x1d06: 0x0040, 0x1d07: 0xb269, 0x1d08: 0x0040, 0x1d09: 0xb479, 0x1d0a: 0x0040, 0x1d0b: 0xb401, + 0x1d0c: 0x0040, 0x1d0d: 0xb431, 0x1d0e: 0xb2f9, 0x1d0f: 0xb389, 0x1d10: 0x0040, 0x1d11: 0xb329, + 0x1d12: 0xb3d1, 0x1d13: 0x0040, 0x1d14: 0xb311, 0x1d15: 0x0040, 0x1d16: 0x0040, 0x1d17: 0xb281, + 0x1d18: 0x0040, 0x1d19: 0xb341, 0x1d1a: 0x0040, 0x1d1b: 0xb3a1, 0x1d1c: 0x0040, 0x1d1d: 0x7999, + 0x1d1e: 0x0040, 0x1d1f: 0xbcd9, 0x1d20: 0x0040, 0x1d21: 0xb1f1, 0x1d22: 0xb251, 0x1d23: 0x0040, + 0x1d24: 0xb449, 0x1d25: 0x0040, 0x1d26: 0x0040, 0x1d27: 0xb269, 0x1d28: 0xb359, 0x1d29: 0xb479, + 0x1d2a: 0xb3e9, 0x1d2b: 0x0040, 0x1d2c: 0xb419, 0x1d2d: 0xb431, 0x1d2e: 0xb2f9, 0x1d2f: 0xb389, + 0x1d30: 0xb3b9, 0x1d31: 0xb329, 0x1d32: 0xb3d1, 0x1d33: 0x0040, 0x1d34: 0xb311, 0x1d35: 0xb221, + 0x1d36: 0xb239, 0x1d37: 0xb281, 0x1d38: 0x0040, 0x1d39: 0xb341, 0x1d3a: 0xb371, 0x1d3b: 0xb3a1, + 0x1d3c: 0xbca9, 0x1d3d: 0x0040, 0x1d3e: 0xbcc1, 0x1d3f: 0x0040, + // Block 0x75, offset 0x1d40 + 0x1d40: 0xb1d9, 0x1d41: 0xb1f1, 0x1d42: 0xb251, 0x1d43: 0xb299, 0x1d44: 0xb449, 0x1d45: 0xb461, + 0x1d46: 0xb2e1, 0x1d47: 0xb269, 0x1d48: 0xb359, 0x1d49: 0xb479, 0x1d4a: 0x0040, 0x1d4b: 0xb401, + 0x1d4c: 0xb419, 0x1d4d: 0xb431, 0x1d4e: 0xb2f9, 0x1d4f: 0xb389, 0x1d50: 0xb3b9, 0x1d51: 0xb329, + 0x1d52: 0xb3d1, 0x1d53: 0xb2c9, 0x1d54: 0xb311, 0x1d55: 0xb221, 0x1d56: 0xb239, 0x1d57: 0xb281, + 0x1d58: 0xb2b1, 0x1d59: 0xb341, 0x1d5a: 0xb371, 0x1d5b: 0xb3a1, 0x1d5c: 0x0040, 0x1d5d: 0x0040, + 0x1d5e: 0x0040, 0x1d5f: 0x0040, 0x1d60: 0x0040, 0x1d61: 0xb1f1, 0x1d62: 0xb251, 0x1d63: 0xb299, + 0x1d64: 0x0040, 0x1d65: 0xb461, 0x1d66: 0xb2e1, 0x1d67: 0xb269, 0x1d68: 0xb359, 0x1d69: 0xb479, + 0x1d6a: 0x0040, 0x1d6b: 0xb401, 0x1d6c: 0xb419, 0x1d6d: 0xb431, 0x1d6e: 0xb2f9, 0x1d6f: 0xb389, + 0x1d70: 0xb3b9, 0x1d71: 0xb329, 0x1d72: 0xb3d1, 0x1d73: 0xb2c9, 0x1d74: 0xb311, 0x1d75: 0xb221, + 0x1d76: 0xb239, 0x1d77: 0xb281, 0x1d78: 0xb2b1, 0x1d79: 0xb341, 0x1d7a: 0xb371, 0x1d7b: 0xb3a1, + 0x1d7c: 0x0040, 0x1d7d: 0x0040, 0x1d7e: 0x0040, 0x1d7f: 0x0040, + // Block 0x76, offset 0x1d80 + 0x1d80: 0x0040, 0x1d81: 0xbcf2, 0x1d82: 0xbd0a, 0x1d83: 0xbd22, 0x1d84: 0xbd3a, 0x1d85: 0xbd52, + 0x1d86: 0xbd6a, 0x1d87: 0xbd82, 0x1d88: 0xbd9a, 0x1d89: 0xbdb2, 0x1d8a: 0xbdca, 0x1d8b: 0x0018, + 0x1d8c: 0x0018, 0x1d8d: 0x0018, 0x1d8e: 0x0018, 0x1d8f: 0x0018, 0x1d90: 0xbde2, 0x1d91: 0xbe02, + 0x1d92: 0xbe22, 0x1d93: 0xbe42, 0x1d94: 0xbe62, 0x1d95: 0xbe82, 0x1d96: 0xbea2, 0x1d97: 0xbec2, + 0x1d98: 0xbee2, 0x1d99: 0xbf02, 0x1d9a: 0xbf22, 0x1d9b: 0xbf42, 0x1d9c: 0xbf62, 0x1d9d: 0xbf82, + 0x1d9e: 0xbfa2, 0x1d9f: 0xbfc2, 0x1da0: 0xbfe2, 0x1da1: 0xc002, 0x1da2: 0xc022, 0x1da3: 0xc042, + 0x1da4: 0xc062, 0x1da5: 0xc082, 0x1da6: 0xc0a2, 0x1da7: 0xc0c2, 0x1da8: 0xc0e2, 0x1da9: 0xc102, + 0x1daa: 0xc121, 0x1dab: 0x1159, 0x1dac: 0x0269, 0x1dad: 0x66a9, 0x1dae: 0xc161, 0x1daf: 0x0018, + 0x1db0: 0x0039, 0x1db1: 0x0ee9, 0x1db2: 0x1159, 0x1db3: 0x0ef9, 0x1db4: 0x0f09, 0x1db5: 0x1199, + 0x1db6: 0x0f31, 0x1db7: 0x0249, 0x1db8: 0x0f41, 0x1db9: 0x0259, 0x1dba: 0x0f51, 0x1dbb: 0x0359, + 0x1dbc: 0x0f61, 0x1dbd: 0x0f71, 0x1dbe: 0x00d9, 0x1dbf: 0x0f99, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0x2039, 0x1dc1: 0x0269, 0x1dc2: 0x01d9, 0x1dc3: 0x0fa9, 0x1dc4: 0x0fb9, 0x1dc5: 0x1089, + 0x1dc6: 0x0279, 0x1dc7: 0x0369, 0x1dc8: 0x0289, 0x1dc9: 0x13d1, 0x1dca: 0xc179, 0x1dcb: 0x65e9, + 0x1dcc: 0xc191, 0x1dcd: 0x1441, 0x1dce: 0xc1a9, 0x1dcf: 0xc1c9, 0x1dd0: 0x0018, 0x1dd1: 0x0018, + 0x1dd2: 0x0018, 0x1dd3: 0x0018, 0x1dd4: 0x0018, 0x1dd5: 0x0018, 0x1dd6: 0x0018, 0x1dd7: 0x0018, + 0x1dd8: 0x0018, 0x1dd9: 0x0018, 0x1dda: 0x0018, 0x1ddb: 0x0018, 0x1ddc: 0x0018, 0x1ddd: 0x0018, + 0x1dde: 0x0018, 0x1ddf: 0x0018, 0x1de0: 0x0018, 0x1de1: 0x0018, 0x1de2: 0x0018, 0x1de3: 0x0018, + 0x1de4: 0x0018, 0x1de5: 0x0018, 0x1de6: 0x0018, 0x1de7: 0x0018, 0x1de8: 0x0018, 0x1de9: 0x0018, + 0x1dea: 0xc1e1, 0x1deb: 0xc1f9, 0x1dec: 0xc211, 0x1ded: 0x0018, 0x1dee: 0x0018, 0x1def: 0x0018, + 0x1df0: 0x0018, 0x1df1: 0x0018, 0x1df2: 0x0018, 0x1df3: 0x0018, 0x1df4: 0x0018, 0x1df5: 0x0018, + 0x1df6: 0x0018, 0x1df7: 0x0018, 0x1df8: 0x0018, 0x1df9: 0x0018, 0x1dfa: 0x0018, 0x1dfb: 0x0018, + 0x1dfc: 0x0018, 0x1dfd: 0x0018, 0x1dfe: 0x0018, 0x1dff: 0x0018, + // Block 0x78, offset 0x1e00 + 0x1e00: 0xc241, 0x1e01: 0xc279, 0x1e02: 0xc2b1, 0x1e03: 0x0040, 0x1e04: 0x0040, 0x1e05: 0x0040, + 0x1e06: 0x0040, 0x1e07: 0x0040, 0x1e08: 0x0040, 0x1e09: 0x0040, 0x1e0a: 0x0040, 0x1e0b: 0x0040, + 0x1e0c: 0x0040, 0x1e0d: 0x0040, 0x1e0e: 0x0040, 0x1e0f: 0x0040, 0x1e10: 0xc2d1, 0x1e11: 0xc2f1, + 0x1e12: 0xc311, 0x1e13: 0xc331, 0x1e14: 0xc351, 0x1e15: 0xc371, 0x1e16: 0xc391, 0x1e17: 0xc3b1, + 0x1e18: 0xc3d1, 0x1e19: 0xc3f1, 0x1e1a: 0xc411, 0x1e1b: 0xc431, 0x1e1c: 0xc451, 0x1e1d: 0xc471, + 0x1e1e: 0xc491, 0x1e1f: 0xc4b1, 0x1e20: 0xc4d1, 0x1e21: 0xc4f1, 0x1e22: 0xc511, 0x1e23: 0xc531, + 0x1e24: 0xc551, 0x1e25: 0xc571, 0x1e26: 0xc591, 0x1e27: 0xc5b1, 0x1e28: 0xc5d1, 0x1e29: 0xc5f1, + 0x1e2a: 0xc611, 0x1e2b: 0xc631, 0x1e2c: 0xc651, 0x1e2d: 0xc671, 0x1e2e: 0xc691, 0x1e2f: 0xc6b1, + 0x1e30: 0xc6d1, 0x1e31: 0xc6f1, 0x1e32: 0xc711, 0x1e33: 0xc731, 0x1e34: 0xc751, 0x1e35: 0xc771, + 0x1e36: 0xc791, 0x1e37: 0xc7b1, 0x1e38: 0xc7d1, 0x1e39: 0xc7f1, 0x1e3a: 0xc811, 0x1e3b: 0xc831, + 0x1e3c: 0x0040, 0x1e3d: 0x0040, 0x1e3e: 0x0040, 0x1e3f: 0x0040, + // Block 0x79, offset 0x1e40 + 0x1e40: 0xcb61, 0x1e41: 0xcb81, 0x1e42: 0xcba1, 0x1e43: 0x8b55, 0x1e44: 0xcbc1, 0x1e45: 0xcbe1, + 0x1e46: 0xcc01, 0x1e47: 0xcc21, 0x1e48: 0xcc41, 0x1e49: 0xcc61, 0x1e4a: 0xcc81, 0x1e4b: 0xcca1, + 0x1e4c: 0xccc1, 0x1e4d: 0x8b75, 0x1e4e: 0xcce1, 0x1e4f: 0xcd01, 0x1e50: 0xcd21, 0x1e51: 0xcd41, + 0x1e52: 0x8b95, 0x1e53: 0xcd61, 0x1e54: 0xcd81, 0x1e55: 0xc491, 0x1e56: 0x8bb5, 0x1e57: 0xcda1, + 0x1e58: 0xcdc1, 0x1e59: 0xcde1, 0x1e5a: 0xce01, 0x1e5b: 0xce21, 0x1e5c: 0x8bd5, 0x1e5d: 0xce41, + 0x1e5e: 0xce61, 0x1e5f: 0xce81, 0x1e60: 0xcea1, 0x1e61: 0xcec1, 0x1e62: 0xc7f1, 0x1e63: 0xcee1, + 0x1e64: 0xcf01, 0x1e65: 0xcf21, 0x1e66: 0xcf41, 0x1e67: 0xcf61, 0x1e68: 0xcf81, 0x1e69: 0xcfa1, + 0x1e6a: 0xcfc1, 0x1e6b: 0xcfe1, 0x1e6c: 0xd001, 0x1e6d: 0xd021, 0x1e6e: 0xd041, 0x1e6f: 0xd061, + 0x1e70: 0xd081, 0x1e71: 0xd0a1, 0x1e72: 0xd0a1, 0x1e73: 0xd0a1, 0x1e74: 0x8bf5, 0x1e75: 0xd0c1, + 0x1e76: 0xd0e1, 0x1e77: 0xd101, 0x1e78: 0x8c15, 0x1e79: 0xd121, 0x1e7a: 0xd141, 0x1e7b: 0xd161, + 0x1e7c: 0xd181, 0x1e7d: 0xd1a1, 0x1e7e: 0xd1c1, 0x1e7f: 0xd1e1, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0xd201, 0x1e81: 0xd221, 0x1e82: 0xd241, 0x1e83: 0xd261, 0x1e84: 0xd281, 0x1e85: 0xd2a1, + 0x1e86: 0xd2a1, 0x1e87: 0xd2c1, 0x1e88: 0xd2e1, 0x1e89: 0xd301, 0x1e8a: 0xd321, 0x1e8b: 0xd341, + 0x1e8c: 0xd361, 0x1e8d: 0xd381, 0x1e8e: 0xd3a1, 0x1e8f: 0xd3c1, 0x1e90: 0xd3e1, 0x1e91: 0xd401, + 0x1e92: 0xd421, 0x1e93: 0xd441, 0x1e94: 0xd461, 0x1e95: 0xd481, 0x1e96: 0xd4a1, 0x1e97: 0xd4c1, + 0x1e98: 0xd4e1, 0x1e99: 0x8c35, 0x1e9a: 0xd501, 0x1e9b: 0xd521, 0x1e9c: 0xd541, 0x1e9d: 0xc371, + 0x1e9e: 0xd561, 0x1e9f: 0xd581, 0x1ea0: 0x8c55, 0x1ea1: 0x8c75, 0x1ea2: 0xd5a1, 0x1ea3: 0xd5c1, + 0x1ea4: 0xd5e1, 0x1ea5: 0xd601, 0x1ea6: 0xd621, 0x1ea7: 0xd641, 0x1ea8: 0x2040, 0x1ea9: 0xd661, + 0x1eaa: 0xd681, 0x1eab: 0xd681, 0x1eac: 0x8c95, 0x1ead: 0xd6a1, 0x1eae: 0xd6c1, 0x1eaf: 0xd6e1, + 0x1eb0: 0xd701, 0x1eb1: 0x8cb5, 0x1eb2: 0xd721, 0x1eb3: 0xd741, 0x1eb4: 0x2040, 0x1eb5: 0xd761, + 0x1eb6: 0xd781, 0x1eb7: 0xd7a1, 0x1eb8: 0xd7c1, 0x1eb9: 0xd7e1, 0x1eba: 0xd801, 0x1ebb: 0x8cd5, + 0x1ebc: 0xd821, 0x1ebd: 0x8cf5, 0x1ebe: 0xd841, 0x1ebf: 0xd861, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0xd881, 0x1ec1: 0xd8a1, 0x1ec2: 0xd8c1, 0x1ec3: 0xd8e1, 0x1ec4: 0xd901, 0x1ec5: 0xd921, + 0x1ec6: 0xd941, 0x1ec7: 0xd961, 0x1ec8: 0xd981, 0x1ec9: 0x8d15, 0x1eca: 0xd9a1, 0x1ecb: 0xd9c1, + 0x1ecc: 0xd9e1, 0x1ecd: 0xda01, 0x1ece: 0xda21, 0x1ecf: 0x8d35, 0x1ed0: 0xda41, 0x1ed1: 0x8d55, + 0x1ed2: 0x8d75, 0x1ed3: 0xda61, 0x1ed4: 0xda81, 0x1ed5: 0xda81, 0x1ed6: 0xdaa1, 0x1ed7: 0x8d95, + 0x1ed8: 0x8db5, 0x1ed9: 0xdac1, 0x1eda: 0xdae1, 0x1edb: 0xdb01, 0x1edc: 0xdb21, 0x1edd: 0xdb41, + 0x1ede: 0xdb61, 0x1edf: 0xdb81, 0x1ee0: 0xdba1, 0x1ee1: 0xdbc1, 0x1ee2: 0xdbe1, 0x1ee3: 0xdc01, + 0x1ee4: 0x8dd5, 0x1ee5: 0xdc21, 0x1ee6: 0xdc41, 0x1ee7: 0xdc61, 0x1ee8: 0xdc81, 0x1ee9: 0xdc61, + 0x1eea: 0xdca1, 0x1eeb: 0xdcc1, 0x1eec: 0xdce1, 0x1eed: 0xdd01, 0x1eee: 0xdd21, 0x1eef: 0xdd41, + 0x1ef0: 0xdd61, 0x1ef1: 0xdd81, 0x1ef2: 0xdda1, 0x1ef3: 0xddc1, 0x1ef4: 0xdde1, 0x1ef5: 0xde01, + 0x1ef6: 0xde21, 0x1ef7: 0xde41, 0x1ef8: 0x8df5, 0x1ef9: 0xde61, 0x1efa: 0xde81, 0x1efb: 0xdea1, + 0x1efc: 0xdec1, 0x1efd: 0xdee1, 0x1efe: 0x8e15, 0x1eff: 0xdf01, + // Block 0x7c, offset 0x1f00 + 0x1f00: 0xe601, 0x1f01: 0xe621, 0x1f02: 0xe641, 0x1f03: 0xe661, 0x1f04: 0xe681, 0x1f05: 0xe6a1, + 0x1f06: 0x8f35, 0x1f07: 0xe6c1, 0x1f08: 0xe6e1, 0x1f09: 0xe701, 0x1f0a: 0xe721, 0x1f0b: 0xe741, + 0x1f0c: 0xe761, 0x1f0d: 0x8f55, 0x1f0e: 0xe781, 0x1f0f: 0xe7a1, 0x1f10: 0x8f75, 0x1f11: 0x8f95, + 0x1f12: 0xe7c1, 0x1f13: 0xe7e1, 0x1f14: 0xe801, 0x1f15: 0xe821, 0x1f16: 0xe841, 0x1f17: 0xe861, + 0x1f18: 0xe881, 0x1f19: 0xe8a1, 0x1f1a: 0xe8c1, 0x1f1b: 0x8fb5, 0x1f1c: 0xe8e1, 0x1f1d: 0x8fd5, + 0x1f1e: 0xe901, 0x1f1f: 0x2040, 0x1f20: 0xe921, 0x1f21: 0xe941, 0x1f22: 0xe961, 0x1f23: 0x8ff5, + 0x1f24: 0xe981, 0x1f25: 0xe9a1, 0x1f26: 0x9015, 0x1f27: 0x9035, 0x1f28: 0xe9c1, 0x1f29: 0xe9e1, + 0x1f2a: 0xea01, 0x1f2b: 0xea21, 0x1f2c: 0xea41, 0x1f2d: 0xea41, 0x1f2e: 0xea61, 0x1f2f: 0xea81, + 0x1f30: 0xeaa1, 0x1f31: 0xeac1, 0x1f32: 0xeae1, 0x1f33: 0xeb01, 0x1f34: 0xeb21, 0x1f35: 0x9055, + 0x1f36: 0xeb41, 0x1f37: 0x9075, 0x1f38: 0xeb61, 0x1f39: 0x9095, 0x1f3a: 0xeb81, 0x1f3b: 0x90b5, + 0x1f3c: 0x90d5, 0x1f3d: 0x90f5, 0x1f3e: 0xeba1, 0x1f3f: 0xebc1, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0xebe1, 0x1f41: 0x9115, 0x1f42: 0x9135, 0x1f43: 0x9155, 0x1f44: 0x9175, 0x1f45: 0xec01, + 0x1f46: 0xec21, 0x1f47: 0xec21, 0x1f48: 0xec41, 0x1f49: 0xec61, 0x1f4a: 0xec81, 0x1f4b: 0xeca1, + 0x1f4c: 0xecc1, 0x1f4d: 0x9195, 0x1f4e: 0xece1, 0x1f4f: 0xed01, 0x1f50: 0xed21, 0x1f51: 0xed41, + 0x1f52: 0x91b5, 0x1f53: 0xed61, 0x1f54: 0x91d5, 0x1f55: 0x91f5, 0x1f56: 0xed81, 0x1f57: 0xeda1, + 0x1f58: 0xedc1, 0x1f59: 0xede1, 0x1f5a: 0xee01, 0x1f5b: 0xee21, 0x1f5c: 0x9215, 0x1f5d: 0x9235, + 0x1f5e: 0x9255, 0x1f5f: 0x2040, 0x1f60: 0xee41, 0x1f61: 0x9275, 0x1f62: 0xee61, 0x1f63: 0xee81, + 0x1f64: 0xeea1, 0x1f65: 0x9295, 0x1f66: 0xeec1, 0x1f67: 0xeee1, 0x1f68: 0xef01, 0x1f69: 0xef21, + 0x1f6a: 0xef41, 0x1f6b: 0x92b5, 0x1f6c: 0xef61, 0x1f6d: 0xef81, 0x1f6e: 0xefa1, 0x1f6f: 0xefc1, + 0x1f70: 0xefe1, 0x1f71: 0xf001, 0x1f72: 0x92d5, 0x1f73: 0x92f5, 0x1f74: 0xf021, 0x1f75: 0x9315, + 0x1f76: 0xf041, 0x1f77: 0x9335, 0x1f78: 0xf061, 0x1f79: 0xf081, 0x1f7a: 0xf0a1, 0x1f7b: 0x9355, + 0x1f7c: 0x9375, 0x1f7d: 0xf0c1, 0x1f7e: 0x9395, 0x1f7f: 0xf0e1, + // Block 0x7e, offset 0x1f80 + 0x1f80: 0xf721, 0x1f81: 0xf741, 0x1f82: 0xf761, 0x1f83: 0xf781, 0x1f84: 0xf7a1, 0x1f85: 0x9555, + 0x1f86: 0xf7c1, 0x1f87: 0xf7e1, 0x1f88: 0xf801, 0x1f89: 0xf821, 0x1f8a: 0xf841, 0x1f8b: 0x9575, + 0x1f8c: 0x9595, 0x1f8d: 0xf861, 0x1f8e: 0xf881, 0x1f8f: 0xf8a1, 0x1f90: 0xf8c1, 0x1f91: 0xf8e1, + 0x1f92: 0xf901, 0x1f93: 0x95b5, 0x1f94: 0xf921, 0x1f95: 0xf941, 0x1f96: 0xf961, 0x1f97: 0xf981, + 0x1f98: 0x95d5, 0x1f99: 0x95f5, 0x1f9a: 0xf9a1, 0x1f9b: 0xf9c1, 0x1f9c: 0xf9e1, 0x1f9d: 0x9615, + 0x1f9e: 0xfa01, 0x1f9f: 0xfa21, 0x1fa0: 0x684d, 0x1fa1: 0x9635, 0x1fa2: 0xfa41, 0x1fa3: 0xfa61, + 0x1fa4: 0xfa81, 0x1fa5: 0x9655, 0x1fa6: 0xfaa1, 0x1fa7: 0xfac1, 0x1fa8: 0xfae1, 0x1fa9: 0xfb01, + 0x1faa: 0xfb21, 0x1fab: 0xfb41, 0x1fac: 0xfb61, 0x1fad: 0x9675, 0x1fae: 0xfb81, 0x1faf: 0xfba1, + 0x1fb0: 0xfbc1, 0x1fb1: 0x9695, 0x1fb2: 0xfbe1, 0x1fb3: 0xfc01, 0x1fb4: 0xfc21, 0x1fb5: 0xfc41, + 0x1fb6: 0x7b6d, 0x1fb7: 0x96b5, 0x1fb8: 0xfc61, 0x1fb9: 0xfc81, 0x1fba: 0xfca1, 0x1fbb: 0x96d5, + 0x1fbc: 0xfcc1, 0x1fbd: 0x96f5, 0x1fbe: 0xfce1, 0x1fbf: 0xfce1, + // Block 0x7f, offset 0x1fc0 + 0x1fc0: 0xfd01, 0x1fc1: 0x9715, 0x1fc2: 0xfd21, 0x1fc3: 0xfd41, 0x1fc4: 0xfd61, 0x1fc5: 0xfd81, + 0x1fc6: 0xfda1, 0x1fc7: 0xfdc1, 0x1fc8: 0xfde1, 0x1fc9: 0x9735, 0x1fca: 0xfe01, 0x1fcb: 0xfe21, + 0x1fcc: 0xfe41, 0x1fcd: 0xfe61, 0x1fce: 0xfe81, 0x1fcf: 0xfea1, 0x1fd0: 0x9755, 0x1fd1: 0xfec1, + 0x1fd2: 0x9775, 0x1fd3: 0x9795, 0x1fd4: 0x97b5, 0x1fd5: 0xfee1, 0x1fd6: 0xff01, 0x1fd7: 0xff21, + 0x1fd8: 0xff41, 0x1fd9: 0xff61, 0x1fda: 0xff81, 0x1fdb: 0xffa1, 0x1fdc: 0xffc1, 0x1fdd: 0x97d5, + 0x1fde: 0x0040, 0x1fdf: 0x0040, 0x1fe0: 0x0040, 0x1fe1: 0x0040, 0x1fe2: 0x0040, 0x1fe3: 0x0040, + 0x1fe4: 0x0040, 0x1fe5: 0x0040, 0x1fe6: 0x0040, 0x1fe7: 0x0040, 0x1fe8: 0x0040, 0x1fe9: 0x0040, + 0x1fea: 0x0040, 0x1feb: 0x0040, 0x1fec: 0x0040, 0x1fed: 0x0040, 0x1fee: 0x0040, 0x1fef: 0x0040, + 0x1ff0: 0x0040, 0x1ff1: 0x0040, 0x1ff2: 0x0040, 0x1ff3: 0x0040, 0x1ff4: 0x0040, 0x1ff5: 0x0040, + 0x1ff6: 0x0040, 0x1ff7: 0x0040, 0x1ff8: 0x0040, 0x1ff9: 0x0040, 0x1ffa: 0x0040, 0x1ffb: 0x0040, + 0x1ffc: 0x0040, 0x1ffd: 0x0040, 0x1ffe: 0x0040, 0x1fff: 0x0040, +} + +// idnaIndex: 37 blocks, 2368 entries, 4736 bytes +// Block 0 is the zero block. +var idnaIndex = [2368]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x7e, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x7f, 0xca: 0x80, 0xcb: 0x07, 0xcc: 0x81, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x82, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x83, 0xd6: 0x84, 0xd7: 0x85, + 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x86, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x87, 0xde: 0x88, 0xdf: 0x89, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, + 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, + 0xf0: 0x1e, 0xf1: 0x1f, 0xf2: 0x1f, 0xf3: 0x21, 0xf4: 0x22, + // Block 0x4, offset 0x100 + 0x120: 0x8a, 0x121: 0x13, 0x122: 0x8b, 0x123: 0x8c, 0x124: 0x8d, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16, + 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8e, + 0x130: 0x8f, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x90, 0x135: 0x21, 0x136: 0x91, 0x137: 0x92, + 0x138: 0x93, 0x139: 0x94, 0x13a: 0x22, 0x13b: 0x95, 0x13c: 0x96, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x97, + // Block 0x5, offset 0x140 + 0x140: 0x98, 0x141: 0x99, 0x142: 0x9a, 0x143: 0x9b, 0x144: 0x9c, 0x145: 0x9d, 0x146: 0x9e, 0x147: 0x9f, + 0x148: 0xa0, 0x149: 0xa1, 0x14a: 0xa2, 0x14b: 0xa3, 0x14c: 0xa4, 0x14d: 0xa5, 0x14e: 0xa6, 0x14f: 0xa7, + 0x150: 0xa8, 0x151: 0xa0, 0x152: 0xa0, 0x153: 0xa0, 0x154: 0xa0, 0x155: 0xa0, 0x156: 0xa0, 0x157: 0xa0, + 0x158: 0xa0, 0x159: 0xa9, 0x15a: 0xaa, 0x15b: 0xab, 0x15c: 0xac, 0x15d: 0xad, 0x15e: 0xae, 0x15f: 0xaf, + 0x160: 0xb0, 0x161: 0xb1, 0x162: 0xb2, 0x163: 0xb3, 0x164: 0xb4, 0x165: 0xb5, 0x166: 0xb6, 0x167: 0xb7, + 0x168: 0xb8, 0x169: 0xb9, 0x16a: 0xba, 0x16b: 0xbb, 0x16c: 0xbc, 0x16d: 0xbd, 0x16e: 0xbe, 0x16f: 0xbf, + 0x170: 0xc0, 0x171: 0xc1, 0x172: 0xc2, 0x173: 0xc3, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc4, + 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc5, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c, + // Block 0x6, offset 0x180 + 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc6, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc7, 0x187: 0x9c, + 0x188: 0xc8, 0x189: 0xc9, 0x18a: 0x9c, 0x18b: 0x9c, 0x18c: 0xca, 0x18d: 0x9c, 0x18e: 0x9c, 0x18f: 0x9c, + 0x190: 0xcb, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9c, 0x195: 0x9c, 0x196: 0x9c, 0x197: 0x9c, + 0x198: 0x9c, 0x199: 0x9c, 0x19a: 0x9c, 0x19b: 0x9c, 0x19c: 0x9c, 0x19d: 0x9c, 0x19e: 0x9c, 0x19f: 0x9c, + 0x1a0: 0x9c, 0x1a1: 0x9c, 0x1a2: 0x9c, 0x1a3: 0x9c, 0x1a4: 0x9c, 0x1a5: 0x9c, 0x1a6: 0x9c, 0x1a7: 0x9c, + 0x1a8: 0xcc, 0x1a9: 0xcd, 0x1aa: 0x9c, 0x1ab: 0xce, 0x1ac: 0x9c, 0x1ad: 0xcf, 0x1ae: 0xd0, 0x1af: 0x9c, + 0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5, + 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1, + 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41, + 0x1d0: 0xa0, 0x1d1: 0xa0, 0x1d2: 0xa0, 0x1d3: 0xa0, 0x1d4: 0xa0, 0x1d5: 0xa0, 0x1d6: 0xa0, 0x1d7: 0xa0, + 0x1d8: 0xa0, 0x1d9: 0xa0, 0x1da: 0xa0, 0x1db: 0xa0, 0x1dc: 0xa0, 0x1dd: 0xa0, 0x1de: 0xa0, 0x1df: 0xa0, + 0x1e0: 0xa0, 0x1e1: 0xa0, 0x1e2: 0xa0, 0x1e3: 0xa0, 0x1e4: 0xa0, 0x1e5: 0xa0, 0x1e6: 0xa0, 0x1e7: 0xa0, + 0x1e8: 0xa0, 0x1e9: 0xa0, 0x1ea: 0xa0, 0x1eb: 0xa0, 0x1ec: 0xa0, 0x1ed: 0xa0, 0x1ee: 0xa0, 0x1ef: 0xa0, + 0x1f0: 0xa0, 0x1f1: 0xa0, 0x1f2: 0xa0, 0x1f3: 0xa0, 0x1f4: 0xa0, 0x1f5: 0xa0, 0x1f6: 0xa0, 0x1f7: 0xa0, + 0x1f8: 0xa0, 0x1f9: 0xa0, 0x1fa: 0xa0, 0x1fb: 0xa0, 0x1fc: 0xa0, 0x1fd: 0xa0, 0x1fe: 0xa0, 0x1ff: 0xa0, + // Block 0x8, offset 0x200 + 0x200: 0xa0, 0x201: 0xa0, 0x202: 0xa0, 0x203: 0xa0, 0x204: 0xa0, 0x205: 0xa0, 0x206: 0xa0, 0x207: 0xa0, + 0x208: 0xa0, 0x209: 0xa0, 0x20a: 0xa0, 0x20b: 0xa0, 0x20c: 0xa0, 0x20d: 0xa0, 0x20e: 0xa0, 0x20f: 0xa0, + 0x210: 0xa0, 0x211: 0xa0, 0x212: 0xa0, 0x213: 0xa0, 0x214: 0xa0, 0x215: 0xa0, 0x216: 0xa0, 0x217: 0xa0, + 0x218: 0xa0, 0x219: 0xa0, 0x21a: 0xa0, 0x21b: 0xa0, 0x21c: 0xa0, 0x21d: 0xa0, 0x21e: 0xa0, 0x21f: 0xa0, + 0x220: 0xa0, 0x221: 0xa0, 0x222: 0xa0, 0x223: 0xa0, 0x224: 0xa0, 0x225: 0xa0, 0x226: 0xa0, 0x227: 0xa0, + 0x228: 0xa0, 0x229: 0xa0, 0x22a: 0xa0, 0x22b: 0xa0, 0x22c: 0xa0, 0x22d: 0xa0, 0x22e: 0xa0, 0x22f: 0xa0, + 0x230: 0xa0, 0x231: 0xa0, 0x232: 0xa0, 0x233: 0xa0, 0x234: 0xa0, 0x235: 0xa0, 0x236: 0xa0, 0x237: 0x9c, + 0x238: 0xa0, 0x239: 0xa0, 0x23a: 0xa0, 0x23b: 0xa0, 0x23c: 0xa0, 0x23d: 0xa0, 0x23e: 0xa0, 0x23f: 0xa0, + // Block 0x9, offset 0x240 + 0x240: 0xa0, 0x241: 0xa0, 0x242: 0xa0, 0x243: 0xa0, 0x244: 0xa0, 0x245: 0xa0, 0x246: 0xa0, 0x247: 0xa0, + 0x248: 0xa0, 0x249: 0xa0, 0x24a: 0xa0, 0x24b: 0xa0, 0x24c: 0xa0, 0x24d: 0xa0, 0x24e: 0xa0, 0x24f: 0xa0, + 0x250: 0xa0, 0x251: 0xa0, 0x252: 0xa0, 0x253: 0xa0, 0x254: 0xa0, 0x255: 0xa0, 0x256: 0xa0, 0x257: 0xa0, + 0x258: 0xa0, 0x259: 0xa0, 0x25a: 0xa0, 0x25b: 0xa0, 0x25c: 0xa0, 0x25d: 0xa0, 0x25e: 0xa0, 0x25f: 0xa0, + 0x260: 0xa0, 0x261: 0xa0, 0x262: 0xa0, 0x263: 0xa0, 0x264: 0xa0, 0x265: 0xa0, 0x266: 0xa0, 0x267: 0xa0, + 0x268: 0xa0, 0x269: 0xa0, 0x26a: 0xa0, 0x26b: 0xa0, 0x26c: 0xa0, 0x26d: 0xa0, 0x26e: 0xa0, 0x26f: 0xa0, + 0x270: 0xa0, 0x271: 0xa0, 0x272: 0xa0, 0x273: 0xa0, 0x274: 0xa0, 0x275: 0xa0, 0x276: 0xa0, 0x277: 0xa0, + 0x278: 0xa0, 0x279: 0xa0, 0x27a: 0xa0, 0x27b: 0xa0, 0x27c: 0xa0, 0x27d: 0xa0, 0x27e: 0xa0, 0x27f: 0xa0, + // Block 0xa, offset 0x280 + 0x280: 0xa0, 0x281: 0xa0, 0x282: 0xa0, 0x283: 0xa0, 0x284: 0xa0, 0x285: 0xa0, 0x286: 0xa0, 0x287: 0xa0, + 0x288: 0xa0, 0x289: 0xa0, 0x28a: 0xa0, 0x28b: 0xa0, 0x28c: 0xa0, 0x28d: 0xa0, 0x28e: 0xa0, 0x28f: 0xa0, + 0x290: 0xa0, 0x291: 0xa0, 0x292: 0xa0, 0x293: 0xa0, 0x294: 0xa0, 0x295: 0xa0, 0x296: 0xa0, 0x297: 0xa0, + 0x298: 0xa0, 0x299: 0xa0, 0x29a: 0xa0, 0x29b: 0xa0, 0x29c: 0xa0, 0x29d: 0xa0, 0x29e: 0xa0, 0x29f: 0xa0, + 0x2a0: 0xa0, 0x2a1: 0xa0, 0x2a2: 0xa0, 0x2a3: 0xa0, 0x2a4: 0xa0, 0x2a5: 0xa0, 0x2a6: 0xa0, 0x2a7: 0xa0, + 0x2a8: 0xa0, 0x2a9: 0xa0, 0x2aa: 0xa0, 0x2ab: 0xa0, 0x2ac: 0xa0, 0x2ad: 0xa0, 0x2ae: 0xa0, 0x2af: 0xa0, + 0x2b0: 0xa0, 0x2b1: 0xa0, 0x2b2: 0xa0, 0x2b3: 0xa0, 0x2b4: 0xa0, 0x2b5: 0xa0, 0x2b6: 0xa0, 0x2b7: 0xa0, + 0x2b8: 0xa0, 0x2b9: 0xa0, 0x2ba: 0xa0, 0x2bb: 0xa0, 0x2bc: 0xa0, 0x2bd: 0xa0, 0x2be: 0xa0, 0x2bf: 0xe3, + // Block 0xb, offset 0x2c0 + 0x2c0: 0xa0, 0x2c1: 0xa0, 0x2c2: 0xa0, 0x2c3: 0xa0, 0x2c4: 0xa0, 0x2c5: 0xa0, 0x2c6: 0xa0, 0x2c7: 0xa0, + 0x2c8: 0xa0, 0x2c9: 0xa0, 0x2ca: 0xa0, 0x2cb: 0xa0, 0x2cc: 0xa0, 0x2cd: 0xa0, 0x2ce: 0xa0, 0x2cf: 0xa0, + 0x2d0: 0xa0, 0x2d1: 0xa0, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0xa0, 0x2d5: 0xa0, 0x2d6: 0xa0, 0x2d7: 0xa0, + 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8, + 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0, + 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8, + 0x2f0: 0xa0, 0x2f1: 0xa0, 0x2f2: 0xa0, 0x2f3: 0xa0, 0x2f4: 0xa0, 0x2f5: 0xa0, 0x2f6: 0xa0, 0x2f7: 0xa0, + 0x2f8: 0xa0, 0x2f9: 0xa0, 0x2fa: 0xa0, 0x2fb: 0xa0, 0x2fc: 0xa0, 0x2fd: 0xa0, 0x2fe: 0xa0, 0x2ff: 0xa0, + // Block 0xc, offset 0x300 + 0x300: 0xa0, 0x301: 0xa0, 0x302: 0xa0, 0x303: 0xa0, 0x304: 0xa0, 0x305: 0xa0, 0x306: 0xa0, 0x307: 0xa0, + 0x308: 0xa0, 0x309: 0xa0, 0x30a: 0xa0, 0x30b: 0xa0, 0x30c: 0xa0, 0x30d: 0xa0, 0x30e: 0xa0, 0x30f: 0xa0, + 0x310: 0xa0, 0x311: 0xa0, 0x312: 0xa0, 0x313: 0xa0, 0x314: 0xa0, 0x315: 0xa0, 0x316: 0xa0, 0x317: 0xa0, + 0x318: 0xa0, 0x319: 0xa0, 0x31a: 0xa0, 0x31b: 0xa0, 0x31c: 0xa0, 0x31d: 0xa0, 0x31e: 0xf9, 0x31f: 0xfa, + // Block 0xd, offset 0x340 + 0x340: 0xfb, 0x341: 0xfb, 0x342: 0xfb, 0x343: 0xfb, 0x344: 0xfb, 0x345: 0xfb, 0x346: 0xfb, 0x347: 0xfb, + 0x348: 0xfb, 0x349: 0xfb, 0x34a: 0xfb, 0x34b: 0xfb, 0x34c: 0xfb, 0x34d: 0xfb, 0x34e: 0xfb, 0x34f: 0xfb, + 0x350: 0xfb, 0x351: 0xfb, 0x352: 0xfb, 0x353: 0xfb, 0x354: 0xfb, 0x355: 0xfb, 0x356: 0xfb, 0x357: 0xfb, + 0x358: 0xfb, 0x359: 0xfb, 0x35a: 0xfb, 0x35b: 0xfb, 0x35c: 0xfb, 0x35d: 0xfb, 0x35e: 0xfb, 0x35f: 0xfb, + 0x360: 0xfb, 0x361: 0xfb, 0x362: 0xfb, 0x363: 0xfb, 0x364: 0xfb, 0x365: 0xfb, 0x366: 0xfb, 0x367: 0xfb, + 0x368: 0xfb, 0x369: 0xfb, 0x36a: 0xfb, 0x36b: 0xfb, 0x36c: 0xfb, 0x36d: 0xfb, 0x36e: 0xfb, 0x36f: 0xfb, + 0x370: 0xfb, 0x371: 0xfb, 0x372: 0xfb, 0x373: 0xfb, 0x374: 0xfb, 0x375: 0xfb, 0x376: 0xfb, 0x377: 0xfb, + 0x378: 0xfb, 0x379: 0xfb, 0x37a: 0xfb, 0x37b: 0xfb, 0x37c: 0xfb, 0x37d: 0xfb, 0x37e: 0xfb, 0x37f: 0xfb, + // Block 0xe, offset 0x380 + 0x380: 0xfb, 0x381: 0xfb, 0x382: 0xfb, 0x383: 0xfb, 0x384: 0xfb, 0x385: 0xfb, 0x386: 0xfb, 0x387: 0xfb, + 0x388: 0xfb, 0x389: 0xfb, 0x38a: 0xfb, 0x38b: 0xfb, 0x38c: 0xfb, 0x38d: 0xfb, 0x38e: 0xfb, 0x38f: 0xfb, + 0x390: 0xfb, 0x391: 0xfb, 0x392: 0xfb, 0x393: 0xfb, 0x394: 0xfb, 0x395: 0xfb, 0x396: 0xfb, 0x397: 0xfb, + 0x398: 0xfb, 0x399: 0xfb, 0x39a: 0xfb, 0x39b: 0xfb, 0x39c: 0xfb, 0x39d: 0xfb, 0x39e: 0xfb, 0x39f: 0xfb, + 0x3a0: 0xfb, 0x3a1: 0xfb, 0x3a2: 0xfb, 0x3a3: 0xfb, 0x3a4: 0xfc, 0x3a5: 0xfd, 0x3a6: 0xfe, 0x3a7: 0xff, + 0x3a8: 0x47, 0x3a9: 0x100, 0x3aa: 0x101, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c, + 0x3b0: 0x102, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x103, 0x3b7: 0x52, + 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x104, 0x3c1: 0x105, 0x3c2: 0xa0, 0x3c3: 0x106, 0x3c4: 0x107, 0x3c5: 0x9c, 0x3c6: 0x108, 0x3c7: 0x109, + 0x3c8: 0xfb, 0x3c9: 0xfb, 0x3ca: 0x10a, 0x3cb: 0x10b, 0x3cc: 0x10c, 0x3cd: 0x10d, 0x3ce: 0x10e, 0x3cf: 0x10f, + 0x3d0: 0x110, 0x3d1: 0xa0, 0x3d2: 0x111, 0x3d3: 0x112, 0x3d4: 0x113, 0x3d5: 0x114, 0x3d6: 0xfb, 0x3d7: 0xfb, + 0x3d8: 0xa0, 0x3d9: 0xa0, 0x3da: 0xa0, 0x3db: 0xa0, 0x3dc: 0x115, 0x3dd: 0x116, 0x3de: 0xfb, 0x3df: 0xfb, + 0x3e0: 0x117, 0x3e1: 0x118, 0x3e2: 0x119, 0x3e3: 0x11a, 0x3e4: 0x11b, 0x3e5: 0xfb, 0x3e6: 0x11c, 0x3e7: 0x11d, + 0x3e8: 0x11e, 0x3e9: 0x11f, 0x3ea: 0x120, 0x3eb: 0x5b, 0x3ec: 0x121, 0x3ed: 0x122, 0x3ee: 0x5c, 0x3ef: 0xfb, + 0x3f0: 0x123, 0x3f1: 0x124, 0x3f2: 0x125, 0x3f3: 0x126, 0x3f4: 0x127, 0x3f5: 0xfb, 0x3f6: 0xfb, 0x3f7: 0xfb, + 0x3f8: 0xfb, 0x3f9: 0x128, 0x3fa: 0x129, 0x3fb: 0xfb, 0x3fc: 0x12a, 0x3fd: 0x12b, 0x3fe: 0x12c, 0x3ff: 0x12d, + // Block 0x10, offset 0x400 + 0x400: 0x12e, 0x401: 0x12f, 0x402: 0x130, 0x403: 0x131, 0x404: 0x132, 0x405: 0x133, 0x406: 0x134, 0x407: 0x135, + 0x408: 0x136, 0x409: 0xfb, 0x40a: 0x137, 0x40b: 0x138, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xfb, 0x40f: 0xfb, + 0x410: 0x139, 0x411: 0x13a, 0x412: 0x13b, 0x413: 0x13c, 0x414: 0xfb, 0x415: 0xfb, 0x416: 0x13d, 0x417: 0x13e, + 0x418: 0x13f, 0x419: 0x140, 0x41a: 0x141, 0x41b: 0x142, 0x41c: 0x143, 0x41d: 0xfb, 0x41e: 0xfb, 0x41f: 0xfb, + 0x420: 0x144, 0x421: 0xfb, 0x422: 0x145, 0x423: 0x146, 0x424: 0x5f, 0x425: 0x147, 0x426: 0x148, 0x427: 0x149, + 0x428: 0x14a, 0x429: 0x14b, 0x42a: 0x14c, 0x42b: 0x14d, 0x42c: 0xfb, 0x42d: 0xfb, 0x42e: 0xfb, 0x42f: 0xfb, + 0x430: 0x14e, 0x431: 0x14f, 0x432: 0x150, 0x433: 0xfb, 0x434: 0x151, 0x435: 0x152, 0x436: 0x153, 0x437: 0xfb, + 0x438: 0xfb, 0x439: 0xfb, 0x43a: 0xfb, 0x43b: 0x154, 0x43c: 0xfb, 0x43d: 0xfb, 0x43e: 0x155, 0x43f: 0x156, + // Block 0x11, offset 0x440 + 0x440: 0xa0, 0x441: 0xa0, 0x442: 0xa0, 0x443: 0xa0, 0x444: 0xa0, 0x445: 0xa0, 0x446: 0xa0, 0x447: 0xa0, + 0x448: 0xa0, 0x449: 0xa0, 0x44a: 0xa0, 0x44b: 0xa0, 0x44c: 0xa0, 0x44d: 0xa0, 0x44e: 0x157, 0x44f: 0xfb, + 0x450: 0x9c, 0x451: 0x158, 0x452: 0xa0, 0x453: 0xa0, 0x454: 0xa0, 0x455: 0x159, 0x456: 0xfb, 0x457: 0xfb, + 0x458: 0xfb, 0x459: 0xfb, 0x45a: 0xfb, 0x45b: 0xfb, 0x45c: 0xfb, 0x45d: 0xfb, 0x45e: 0xfb, 0x45f: 0xfb, + 0x460: 0xfb, 0x461: 0xfb, 0x462: 0xfb, 0x463: 0xfb, 0x464: 0xfb, 0x465: 0xfb, 0x466: 0xfb, 0x467: 0xfb, + 0x468: 0xfb, 0x469: 0xfb, 0x46a: 0xfb, 0x46b: 0xfb, 0x46c: 0xfb, 0x46d: 0xfb, 0x46e: 0xfb, 0x46f: 0xfb, + 0x470: 0xfb, 0x471: 0xfb, 0x472: 0xfb, 0x473: 0xfb, 0x474: 0xfb, 0x475: 0xfb, 0x476: 0xfb, 0x477: 0xfb, + 0x478: 0xfb, 0x479: 0xfb, 0x47a: 0xfb, 0x47b: 0xfb, 0x47c: 0xfb, 0x47d: 0xfb, 0x47e: 0xfb, 0x47f: 0xfb, + // Block 0x12, offset 0x480 + 0x480: 0xa0, 0x481: 0xa0, 0x482: 0xa0, 0x483: 0xa0, 0x484: 0xa0, 0x485: 0xa0, 0x486: 0xa0, 0x487: 0xa0, + 0x488: 0xa0, 0x489: 0xa0, 0x48a: 0xa0, 0x48b: 0xa0, 0x48c: 0xa0, 0x48d: 0xa0, 0x48e: 0xa0, 0x48f: 0xa0, + 0x490: 0x15a, 0x491: 0xfb, 0x492: 0xfb, 0x493: 0xfb, 0x494: 0xfb, 0x495: 0xfb, 0x496: 0xfb, 0x497: 0xfb, + 0x498: 0xfb, 0x499: 0xfb, 0x49a: 0xfb, 0x49b: 0xfb, 0x49c: 0xfb, 0x49d: 0xfb, 0x49e: 0xfb, 0x49f: 0xfb, + 0x4a0: 0xfb, 0x4a1: 0xfb, 0x4a2: 0xfb, 0x4a3: 0xfb, 0x4a4: 0xfb, 0x4a5: 0xfb, 0x4a6: 0xfb, 0x4a7: 0xfb, + 0x4a8: 0xfb, 0x4a9: 0xfb, 0x4aa: 0xfb, 0x4ab: 0xfb, 0x4ac: 0xfb, 0x4ad: 0xfb, 0x4ae: 0xfb, 0x4af: 0xfb, + 0x4b0: 0xfb, 0x4b1: 0xfb, 0x4b2: 0xfb, 0x4b3: 0xfb, 0x4b4: 0xfb, 0x4b5: 0xfb, 0x4b6: 0xfb, 0x4b7: 0xfb, + 0x4b8: 0xfb, 0x4b9: 0xfb, 0x4ba: 0xfb, 0x4bb: 0xfb, 0x4bc: 0xfb, 0x4bd: 0xfb, 0x4be: 0xfb, 0x4bf: 0xfb, + // Block 0x13, offset 0x4c0 + 0x4c0: 0xfb, 0x4c1: 0xfb, 0x4c2: 0xfb, 0x4c3: 0xfb, 0x4c4: 0xfb, 0x4c5: 0xfb, 0x4c6: 0xfb, 0x4c7: 0xfb, + 0x4c8: 0xfb, 0x4c9: 0xfb, 0x4ca: 0xfb, 0x4cb: 0xfb, 0x4cc: 0xfb, 0x4cd: 0xfb, 0x4ce: 0xfb, 0x4cf: 0xfb, + 0x4d0: 0xa0, 0x4d1: 0xa0, 0x4d2: 0xa0, 0x4d3: 0xa0, 0x4d4: 0xa0, 0x4d5: 0xa0, 0x4d6: 0xa0, 0x4d7: 0xa0, + 0x4d8: 0xa0, 0x4d9: 0x15b, 0x4da: 0xfb, 0x4db: 0xfb, 0x4dc: 0xfb, 0x4dd: 0xfb, 0x4de: 0xfb, 0x4df: 0xfb, + 0x4e0: 0xfb, 0x4e1: 0xfb, 0x4e2: 0xfb, 0x4e3: 0xfb, 0x4e4: 0xfb, 0x4e5: 0xfb, 0x4e6: 0xfb, 0x4e7: 0xfb, + 0x4e8: 0xfb, 0x4e9: 0xfb, 0x4ea: 0xfb, 0x4eb: 0xfb, 0x4ec: 0xfb, 0x4ed: 0xfb, 0x4ee: 0xfb, 0x4ef: 0xfb, + 0x4f0: 0xfb, 0x4f1: 0xfb, 0x4f2: 0xfb, 0x4f3: 0xfb, 0x4f4: 0xfb, 0x4f5: 0xfb, 0x4f6: 0xfb, 0x4f7: 0xfb, + 0x4f8: 0xfb, 0x4f9: 0xfb, 0x4fa: 0xfb, 0x4fb: 0xfb, 0x4fc: 0xfb, 0x4fd: 0xfb, 0x4fe: 0xfb, 0x4ff: 0xfb, + // Block 0x14, offset 0x500 + 0x500: 0xfb, 0x501: 0xfb, 0x502: 0xfb, 0x503: 0xfb, 0x504: 0xfb, 0x505: 0xfb, 0x506: 0xfb, 0x507: 0xfb, + 0x508: 0xfb, 0x509: 0xfb, 0x50a: 0xfb, 0x50b: 0xfb, 0x50c: 0xfb, 0x50d: 0xfb, 0x50e: 0xfb, 0x50f: 0xfb, + 0x510: 0xfb, 0x511: 0xfb, 0x512: 0xfb, 0x513: 0xfb, 0x514: 0xfb, 0x515: 0xfb, 0x516: 0xfb, 0x517: 0xfb, + 0x518: 0xfb, 0x519: 0xfb, 0x51a: 0xfb, 0x51b: 0xfb, 0x51c: 0xfb, 0x51d: 0xfb, 0x51e: 0xfb, 0x51f: 0xfb, + 0x520: 0xa0, 0x521: 0xa0, 0x522: 0xa0, 0x523: 0xa0, 0x524: 0xa0, 0x525: 0xa0, 0x526: 0xa0, 0x527: 0xa0, + 0x528: 0x14d, 0x529: 0x15c, 0x52a: 0xfb, 0x52b: 0x15d, 0x52c: 0x15e, 0x52d: 0x15f, 0x52e: 0x160, 0x52f: 0xfb, + 0x530: 0xfb, 0x531: 0xfb, 0x532: 0xfb, 0x533: 0xfb, 0x534: 0xfb, 0x535: 0xfb, 0x536: 0xfb, 0x537: 0xfb, + 0x538: 0xfb, 0x539: 0x161, 0x53a: 0x162, 0x53b: 0xfb, 0x53c: 0xa0, 0x53d: 0x163, 0x53e: 0x164, 0x53f: 0x165, + // Block 0x15, offset 0x540 + 0x540: 0xa0, 0x541: 0xa0, 0x542: 0xa0, 0x543: 0xa0, 0x544: 0xa0, 0x545: 0xa0, 0x546: 0xa0, 0x547: 0xa0, + 0x548: 0xa0, 0x549: 0xa0, 0x54a: 0xa0, 0x54b: 0xa0, 0x54c: 0xa0, 0x54d: 0xa0, 0x54e: 0xa0, 0x54f: 0xa0, + 0x550: 0xa0, 0x551: 0xa0, 0x552: 0xa0, 0x553: 0xa0, 0x554: 0xa0, 0x555: 0xa0, 0x556: 0xa0, 0x557: 0xa0, + 0x558: 0xa0, 0x559: 0xa0, 0x55a: 0xa0, 0x55b: 0xa0, 0x55c: 0xa0, 0x55d: 0xa0, 0x55e: 0xa0, 0x55f: 0x166, + 0x560: 0xa0, 0x561: 0xa0, 0x562: 0xa0, 0x563: 0xa0, 0x564: 0xa0, 0x565: 0xa0, 0x566: 0xa0, 0x567: 0xa0, + 0x568: 0xa0, 0x569: 0xa0, 0x56a: 0xa0, 0x56b: 0xa0, 0x56c: 0xa0, 0x56d: 0xa0, 0x56e: 0xa0, 0x56f: 0xa0, + 0x570: 0xa0, 0x571: 0xa0, 0x572: 0xa0, 0x573: 0x167, 0x574: 0x168, 0x575: 0xfb, 0x576: 0xfb, 0x577: 0xfb, + 0x578: 0xfb, 0x579: 0xfb, 0x57a: 0xfb, 0x57b: 0xfb, 0x57c: 0xfb, 0x57d: 0xfb, 0x57e: 0xfb, 0x57f: 0xfb, + // Block 0x16, offset 0x580 + 0x580: 0xa0, 0x581: 0xa0, 0x582: 0xa0, 0x583: 0xa0, 0x584: 0x169, 0x585: 0x16a, 0x586: 0xa0, 0x587: 0xa0, + 0x588: 0xa0, 0x589: 0xa0, 0x58a: 0xa0, 0x58b: 0x16b, 0x58c: 0xfb, 0x58d: 0xfb, 0x58e: 0xfb, 0x58f: 0xfb, + 0x590: 0xfb, 0x591: 0xfb, 0x592: 0xfb, 0x593: 0xfb, 0x594: 0xfb, 0x595: 0xfb, 0x596: 0xfb, 0x597: 0xfb, + 0x598: 0xfb, 0x599: 0xfb, 0x59a: 0xfb, 0x59b: 0xfb, 0x59c: 0xfb, 0x59d: 0xfb, 0x59e: 0xfb, 0x59f: 0xfb, + 0x5a0: 0xfb, 0x5a1: 0xfb, 0x5a2: 0xfb, 0x5a3: 0xfb, 0x5a4: 0xfb, 0x5a5: 0xfb, 0x5a6: 0xfb, 0x5a7: 0xfb, + 0x5a8: 0xfb, 0x5a9: 0xfb, 0x5aa: 0xfb, 0x5ab: 0xfb, 0x5ac: 0xfb, 0x5ad: 0xfb, 0x5ae: 0xfb, 0x5af: 0xfb, + 0x5b0: 0xa0, 0x5b1: 0x16c, 0x5b2: 0x16d, 0x5b3: 0xfb, 0x5b4: 0xfb, 0x5b5: 0xfb, 0x5b6: 0xfb, 0x5b7: 0xfb, + 0x5b8: 0xfb, 0x5b9: 0xfb, 0x5ba: 0xfb, 0x5bb: 0xfb, 0x5bc: 0xfb, 0x5bd: 0xfb, 0x5be: 0xfb, 0x5bf: 0xfb, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x9c, 0x5c1: 0x9c, 0x5c2: 0x9c, 0x5c3: 0x16e, 0x5c4: 0x16f, 0x5c5: 0x170, 0x5c6: 0x171, 0x5c7: 0x172, + 0x5c8: 0x9c, 0x5c9: 0x173, 0x5ca: 0xfb, 0x5cb: 0x174, 0x5cc: 0x9c, 0x5cd: 0x175, 0x5ce: 0xfb, 0x5cf: 0xfb, + 0x5d0: 0x60, 0x5d1: 0x61, 0x5d2: 0x62, 0x5d3: 0x63, 0x5d4: 0x64, 0x5d5: 0x65, 0x5d6: 0x66, 0x5d7: 0x67, + 0x5d8: 0x68, 0x5d9: 0x69, 0x5da: 0x6a, 0x5db: 0x6b, 0x5dc: 0x6c, 0x5dd: 0x6d, 0x5de: 0x6e, 0x5df: 0x6f, + 0x5e0: 0x9c, 0x5e1: 0x9c, 0x5e2: 0x9c, 0x5e3: 0x9c, 0x5e4: 0x9c, 0x5e5: 0x9c, 0x5e6: 0x9c, 0x5e7: 0x9c, + 0x5e8: 0x176, 0x5e9: 0x177, 0x5ea: 0x178, 0x5eb: 0xfb, 0x5ec: 0xfb, 0x5ed: 0xfb, 0x5ee: 0xfb, 0x5ef: 0xfb, + 0x5f0: 0xfb, 0x5f1: 0xfb, 0x5f2: 0xfb, 0x5f3: 0xfb, 0x5f4: 0xfb, 0x5f5: 0xfb, 0x5f6: 0xfb, 0x5f7: 0xfb, + 0x5f8: 0xfb, 0x5f9: 0xfb, 0x5fa: 0xfb, 0x5fb: 0xfb, 0x5fc: 0xfb, 0x5fd: 0xfb, 0x5fe: 0xfb, 0x5ff: 0xfb, + // Block 0x18, offset 0x600 + 0x600: 0x179, 0x601: 0xfb, 0x602: 0xfb, 0x603: 0xfb, 0x604: 0x17a, 0x605: 0x17b, 0x606: 0xfb, 0x607: 0xfb, + 0x608: 0xfb, 0x609: 0xfb, 0x60a: 0xfb, 0x60b: 0x17c, 0x60c: 0xfb, 0x60d: 0xfb, 0x60e: 0xfb, 0x60f: 0xfb, + 0x610: 0xfb, 0x611: 0xfb, 0x612: 0xfb, 0x613: 0xfb, 0x614: 0xfb, 0x615: 0xfb, 0x616: 0xfb, 0x617: 0xfb, + 0x618: 0xfb, 0x619: 0xfb, 0x61a: 0xfb, 0x61b: 0xfb, 0x61c: 0xfb, 0x61d: 0xfb, 0x61e: 0xfb, 0x61f: 0xfb, + 0x620: 0x123, 0x621: 0x123, 0x622: 0x123, 0x623: 0x17d, 0x624: 0x70, 0x625: 0x17e, 0x626: 0xfb, 0x627: 0xfb, + 0x628: 0xfb, 0x629: 0xfb, 0x62a: 0xfb, 0x62b: 0xfb, 0x62c: 0xfb, 0x62d: 0xfb, 0x62e: 0xfb, 0x62f: 0xfb, + 0x630: 0xfb, 0x631: 0x17f, 0x632: 0x180, 0x633: 0xfb, 0x634: 0x181, 0x635: 0xfb, 0x636: 0xfb, 0x637: 0xfb, + 0x638: 0x71, 0x639: 0x72, 0x63a: 0x73, 0x63b: 0x182, 0x63c: 0xfb, 0x63d: 0xfb, 0x63e: 0xfb, 0x63f: 0xfb, + // Block 0x19, offset 0x640 + 0x640: 0x183, 0x641: 0x9c, 0x642: 0x184, 0x643: 0x185, 0x644: 0x74, 0x645: 0x75, 0x646: 0x186, 0x647: 0x187, + 0x648: 0x76, 0x649: 0x188, 0x64a: 0xfb, 0x64b: 0xfb, 0x64c: 0x9c, 0x64d: 0x9c, 0x64e: 0x9c, 0x64f: 0x9c, + 0x650: 0x9c, 0x651: 0x9c, 0x652: 0x9c, 0x653: 0x9c, 0x654: 0x9c, 0x655: 0x9c, 0x656: 0x9c, 0x657: 0x9c, + 0x658: 0x9c, 0x659: 0x9c, 0x65a: 0x9c, 0x65b: 0x189, 0x65c: 0x9c, 0x65d: 0x18a, 0x65e: 0x9c, 0x65f: 0x18b, + 0x660: 0x18c, 0x661: 0x18d, 0x662: 0x18e, 0x663: 0xfb, 0x664: 0x9c, 0x665: 0x18f, 0x666: 0x9c, 0x667: 0x190, + 0x668: 0x9c, 0x669: 0x191, 0x66a: 0x192, 0x66b: 0x193, 0x66c: 0x9c, 0x66d: 0x9c, 0x66e: 0x194, 0x66f: 0x195, + 0x670: 0xfb, 0x671: 0xfb, 0x672: 0xfb, 0x673: 0xfb, 0x674: 0xfb, 0x675: 0xfb, 0x676: 0xfb, 0x677: 0xfb, + 0x678: 0xfb, 0x679: 0xfb, 0x67a: 0xfb, 0x67b: 0xfb, 0x67c: 0xfb, 0x67d: 0xfb, 0x67e: 0xfb, 0x67f: 0xfb, + // Block 0x1a, offset 0x680 + 0x680: 0xa0, 0x681: 0xa0, 0x682: 0xa0, 0x683: 0xa0, 0x684: 0xa0, 0x685: 0xa0, 0x686: 0xa0, 0x687: 0xa0, + 0x688: 0xa0, 0x689: 0xa0, 0x68a: 0xa0, 0x68b: 0xa0, 0x68c: 0xa0, 0x68d: 0xa0, 0x68e: 0xa0, 0x68f: 0xa0, + 0x690: 0xa0, 0x691: 0xa0, 0x692: 0xa0, 0x693: 0xa0, 0x694: 0xa0, 0x695: 0xa0, 0x696: 0xa0, 0x697: 0xa0, + 0x698: 0xa0, 0x699: 0xa0, 0x69a: 0xa0, 0x69b: 0x196, 0x69c: 0xa0, 0x69d: 0xa0, 0x69e: 0xa0, 0x69f: 0xa0, + 0x6a0: 0xa0, 0x6a1: 0xa0, 0x6a2: 0xa0, 0x6a3: 0xa0, 0x6a4: 0xa0, 0x6a5: 0xa0, 0x6a6: 0xa0, 0x6a7: 0xa0, + 0x6a8: 0xa0, 0x6a9: 0xa0, 0x6aa: 0xa0, 0x6ab: 0xa0, 0x6ac: 0xa0, 0x6ad: 0xa0, 0x6ae: 0xa0, 0x6af: 0xa0, + 0x6b0: 0xa0, 0x6b1: 0xa0, 0x6b2: 0xa0, 0x6b3: 0xa0, 0x6b4: 0xa0, 0x6b5: 0xa0, 0x6b6: 0xa0, 0x6b7: 0xa0, + 0x6b8: 0xa0, 0x6b9: 0xa0, 0x6ba: 0xa0, 0x6bb: 0xa0, 0x6bc: 0xa0, 0x6bd: 0xa0, 0x6be: 0xa0, 0x6bf: 0xa0, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0xa0, 0x6c1: 0xa0, 0x6c2: 0xa0, 0x6c3: 0xa0, 0x6c4: 0xa0, 0x6c5: 0xa0, 0x6c6: 0xa0, 0x6c7: 0xa0, + 0x6c8: 0xa0, 0x6c9: 0xa0, 0x6ca: 0xa0, 0x6cb: 0xa0, 0x6cc: 0xa0, 0x6cd: 0xa0, 0x6ce: 0xa0, 0x6cf: 0xa0, + 0x6d0: 0xa0, 0x6d1: 0xa0, 0x6d2: 0xa0, 0x6d3: 0xa0, 0x6d4: 0xa0, 0x6d5: 0xa0, 0x6d6: 0xa0, 0x6d7: 0xa0, + 0x6d8: 0xa0, 0x6d9: 0xa0, 0x6da: 0xa0, 0x6db: 0xa0, 0x6dc: 0x197, 0x6dd: 0xa0, 0x6de: 0xa0, 0x6df: 0xa0, + 0x6e0: 0x198, 0x6e1: 0xa0, 0x6e2: 0xa0, 0x6e3: 0xa0, 0x6e4: 0xa0, 0x6e5: 0xa0, 0x6e6: 0xa0, 0x6e7: 0xa0, + 0x6e8: 0xa0, 0x6e9: 0xa0, 0x6ea: 0xa0, 0x6eb: 0xa0, 0x6ec: 0xa0, 0x6ed: 0xa0, 0x6ee: 0xa0, 0x6ef: 0xa0, + 0x6f0: 0xa0, 0x6f1: 0xa0, 0x6f2: 0xa0, 0x6f3: 0xa0, 0x6f4: 0xa0, 0x6f5: 0xa0, 0x6f6: 0xa0, 0x6f7: 0xa0, + 0x6f8: 0xa0, 0x6f9: 0xa0, 0x6fa: 0xa0, 0x6fb: 0xa0, 0x6fc: 0xa0, 0x6fd: 0xa0, 0x6fe: 0xa0, 0x6ff: 0xa0, + // Block 0x1c, offset 0x700 + 0x700: 0xa0, 0x701: 0xa0, 0x702: 0xa0, 0x703: 0xa0, 0x704: 0xa0, 0x705: 0xa0, 0x706: 0xa0, 0x707: 0xa0, + 0x708: 0xa0, 0x709: 0xa0, 0x70a: 0xa0, 0x70b: 0xa0, 0x70c: 0xa0, 0x70d: 0xa0, 0x70e: 0xa0, 0x70f: 0xa0, + 0x710: 0xa0, 0x711: 0xa0, 0x712: 0xa0, 0x713: 0xa0, 0x714: 0xa0, 0x715: 0xa0, 0x716: 0xa0, 0x717: 0xa0, + 0x718: 0xa0, 0x719: 0xa0, 0x71a: 0xa0, 0x71b: 0xa0, 0x71c: 0xa0, 0x71d: 0xa0, 0x71e: 0xa0, 0x71f: 0xa0, + 0x720: 0xa0, 0x721: 0xa0, 0x722: 0xa0, 0x723: 0xa0, 0x724: 0xa0, 0x725: 0xa0, 0x726: 0xa0, 0x727: 0xa0, + 0x728: 0xa0, 0x729: 0xa0, 0x72a: 0xa0, 0x72b: 0xa0, 0x72c: 0xa0, 0x72d: 0xa0, 0x72e: 0xa0, 0x72f: 0xa0, + 0x730: 0xa0, 0x731: 0xa0, 0x732: 0xa0, 0x733: 0xa0, 0x734: 0xa0, 0x735: 0xa0, 0x736: 0xa0, 0x737: 0xa0, + 0x738: 0xa0, 0x739: 0xa0, 0x73a: 0x199, 0x73b: 0xa0, 0x73c: 0xa0, 0x73d: 0xa0, 0x73e: 0xa0, 0x73f: 0xa0, + // Block 0x1d, offset 0x740 + 0x740: 0xa0, 0x741: 0xa0, 0x742: 0xa0, 0x743: 0xa0, 0x744: 0xa0, 0x745: 0xa0, 0x746: 0xa0, 0x747: 0xa0, + 0x748: 0xa0, 0x749: 0xa0, 0x74a: 0xa0, 0x74b: 0xa0, 0x74c: 0xa0, 0x74d: 0xa0, 0x74e: 0xa0, 0x74f: 0xa0, + 0x750: 0xa0, 0x751: 0xa0, 0x752: 0xa0, 0x753: 0xa0, 0x754: 0xa0, 0x755: 0xa0, 0x756: 0xa0, 0x757: 0xa0, + 0x758: 0xa0, 0x759: 0xa0, 0x75a: 0xa0, 0x75b: 0xa0, 0x75c: 0xa0, 0x75d: 0xa0, 0x75e: 0xa0, 0x75f: 0xa0, + 0x760: 0xa0, 0x761: 0xa0, 0x762: 0xa0, 0x763: 0xa0, 0x764: 0xa0, 0x765: 0xa0, 0x766: 0xa0, 0x767: 0xa0, + 0x768: 0xa0, 0x769: 0xa0, 0x76a: 0xa0, 0x76b: 0xa0, 0x76c: 0xa0, 0x76d: 0xa0, 0x76e: 0xa0, 0x76f: 0x19a, + 0x770: 0xfb, 0x771: 0xfb, 0x772: 0xfb, 0x773: 0xfb, 0x774: 0xfb, 0x775: 0xfb, 0x776: 0xfb, 0x777: 0xfb, + 0x778: 0xfb, 0x779: 0xfb, 0x77a: 0xfb, 0x77b: 0xfb, 0x77c: 0xfb, 0x77d: 0xfb, 0x77e: 0xfb, 0x77f: 0xfb, + // Block 0x1e, offset 0x780 + 0x780: 0xfb, 0x781: 0xfb, 0x782: 0xfb, 0x783: 0xfb, 0x784: 0xfb, 0x785: 0xfb, 0x786: 0xfb, 0x787: 0xfb, + 0x788: 0xfb, 0x789: 0xfb, 0x78a: 0xfb, 0x78b: 0xfb, 0x78c: 0xfb, 0x78d: 0xfb, 0x78e: 0xfb, 0x78f: 0xfb, + 0x790: 0xfb, 0x791: 0xfb, 0x792: 0xfb, 0x793: 0xfb, 0x794: 0xfb, 0x795: 0xfb, 0x796: 0xfb, 0x797: 0xfb, + 0x798: 0xfb, 0x799: 0xfb, 0x79a: 0xfb, 0x79b: 0xfb, 0x79c: 0xfb, 0x79d: 0xfb, 0x79e: 0xfb, 0x79f: 0xfb, + 0x7a0: 0x77, 0x7a1: 0x78, 0x7a2: 0x79, 0x7a3: 0x19b, 0x7a4: 0x7a, 0x7a5: 0x7b, 0x7a6: 0x19c, 0x7a7: 0x7c, + 0x7a8: 0x7d, 0x7a9: 0xfb, 0x7aa: 0xfb, 0x7ab: 0xfb, 0x7ac: 0xfb, 0x7ad: 0xfb, 0x7ae: 0xfb, 0x7af: 0xfb, + 0x7b0: 0xfb, 0x7b1: 0xfb, 0x7b2: 0xfb, 0x7b3: 0xfb, 0x7b4: 0xfb, 0x7b5: 0xfb, 0x7b6: 0xfb, 0x7b7: 0xfb, + 0x7b8: 0xfb, 0x7b9: 0xfb, 0x7ba: 0xfb, 0x7bb: 0xfb, 0x7bc: 0xfb, 0x7bd: 0xfb, 0x7be: 0xfb, 0x7bf: 0xfb, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0xa0, 0x7c1: 0xa0, 0x7c2: 0xa0, 0x7c3: 0xa0, 0x7c4: 0xa0, 0x7c5: 0xa0, 0x7c6: 0xa0, 0x7c7: 0xa0, + 0x7c8: 0xa0, 0x7c9: 0xa0, 0x7ca: 0xa0, 0x7cb: 0xa0, 0x7cc: 0xa0, 0x7cd: 0x19d, 0x7ce: 0xfb, 0x7cf: 0xfb, + 0x7d0: 0xfb, 0x7d1: 0xfb, 0x7d2: 0xfb, 0x7d3: 0xfb, 0x7d4: 0xfb, 0x7d5: 0xfb, 0x7d6: 0xfb, 0x7d7: 0xfb, + 0x7d8: 0xfb, 0x7d9: 0xfb, 0x7da: 0xfb, 0x7db: 0xfb, 0x7dc: 0xfb, 0x7dd: 0xfb, 0x7de: 0xfb, 0x7df: 0xfb, + 0x7e0: 0xfb, 0x7e1: 0xfb, 0x7e2: 0xfb, 0x7e3: 0xfb, 0x7e4: 0xfb, 0x7e5: 0xfb, 0x7e6: 0xfb, 0x7e7: 0xfb, + 0x7e8: 0xfb, 0x7e9: 0xfb, 0x7ea: 0xfb, 0x7eb: 0xfb, 0x7ec: 0xfb, 0x7ed: 0xfb, 0x7ee: 0xfb, 0x7ef: 0xfb, + 0x7f0: 0xfb, 0x7f1: 0xfb, 0x7f2: 0xfb, 0x7f3: 0xfb, 0x7f4: 0xfb, 0x7f5: 0xfb, 0x7f6: 0xfb, 0x7f7: 0xfb, + 0x7f8: 0xfb, 0x7f9: 0xfb, 0x7fa: 0xfb, 0x7fb: 0xfb, 0x7fc: 0xfb, 0x7fd: 0xfb, 0x7fe: 0xfb, 0x7ff: 0xfb, + // Block 0x20, offset 0x800 + 0x810: 0x0d, 0x811: 0x0e, 0x812: 0x0f, 0x813: 0x10, 0x814: 0x11, 0x815: 0x0b, 0x816: 0x12, 0x817: 0x07, + 0x818: 0x13, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x14, 0x81c: 0x0b, 0x81d: 0x15, 0x81e: 0x16, 0x81f: 0x17, + 0x820: 0x07, 0x821: 0x07, 0x822: 0x07, 0x823: 0x07, 0x824: 0x07, 0x825: 0x07, 0x826: 0x07, 0x827: 0x07, + 0x828: 0x07, 0x829: 0x07, 0x82a: 0x18, 0x82b: 0x19, 0x82c: 0x1a, 0x82d: 0x07, 0x82e: 0x1b, 0x82f: 0x1c, + 0x830: 0x07, 0x831: 0x1d, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b, + 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b, + // Block 0x21, offset 0x840 + 0x840: 0x0b, 0x841: 0x0b, 0x842: 0x0b, 0x843: 0x0b, 0x844: 0x0b, 0x845: 0x0b, 0x846: 0x0b, 0x847: 0x0b, + 0x848: 0x0b, 0x849: 0x0b, 0x84a: 0x0b, 0x84b: 0x0b, 0x84c: 0x0b, 0x84d: 0x0b, 0x84e: 0x0b, 0x84f: 0x0b, + 0x850: 0x0b, 0x851: 0x0b, 0x852: 0x0b, 0x853: 0x0b, 0x854: 0x0b, 0x855: 0x0b, 0x856: 0x0b, 0x857: 0x0b, + 0x858: 0x0b, 0x859: 0x0b, 0x85a: 0x0b, 0x85b: 0x0b, 0x85c: 0x0b, 0x85d: 0x0b, 0x85e: 0x0b, 0x85f: 0x0b, + 0x860: 0x0b, 0x861: 0x0b, 0x862: 0x0b, 0x863: 0x0b, 0x864: 0x0b, 0x865: 0x0b, 0x866: 0x0b, 0x867: 0x0b, + 0x868: 0x0b, 0x869: 0x0b, 0x86a: 0x0b, 0x86b: 0x0b, 0x86c: 0x0b, 0x86d: 0x0b, 0x86e: 0x0b, 0x86f: 0x0b, + 0x870: 0x0b, 0x871: 0x0b, 0x872: 0x0b, 0x873: 0x0b, 0x874: 0x0b, 0x875: 0x0b, 0x876: 0x0b, 0x877: 0x0b, + 0x878: 0x0b, 0x879: 0x0b, 0x87a: 0x0b, 0x87b: 0x0b, 0x87c: 0x0b, 0x87d: 0x0b, 0x87e: 0x0b, 0x87f: 0x0b, + // Block 0x22, offset 0x880 + 0x880: 0x19e, 0x881: 0x19f, 0x882: 0xfb, 0x883: 0xfb, 0x884: 0x1a0, 0x885: 0x1a0, 0x886: 0x1a0, 0x887: 0x1a1, + 0x888: 0xfb, 0x889: 0xfb, 0x88a: 0xfb, 0x88b: 0xfb, 0x88c: 0xfb, 0x88d: 0xfb, 0x88e: 0xfb, 0x88f: 0xfb, + 0x890: 0xfb, 0x891: 0xfb, 0x892: 0xfb, 0x893: 0xfb, 0x894: 0xfb, 0x895: 0xfb, 0x896: 0xfb, 0x897: 0xfb, + 0x898: 0xfb, 0x899: 0xfb, 0x89a: 0xfb, 0x89b: 0xfb, 0x89c: 0xfb, 0x89d: 0xfb, 0x89e: 0xfb, 0x89f: 0xfb, + 0x8a0: 0xfb, 0x8a1: 0xfb, 0x8a2: 0xfb, 0x8a3: 0xfb, 0x8a4: 0xfb, 0x8a5: 0xfb, 0x8a6: 0xfb, 0x8a7: 0xfb, + 0x8a8: 0xfb, 0x8a9: 0xfb, 0x8aa: 0xfb, 0x8ab: 0xfb, 0x8ac: 0xfb, 0x8ad: 0xfb, 0x8ae: 0xfb, 0x8af: 0xfb, + 0x8b0: 0xfb, 0x8b1: 0xfb, 0x8b2: 0xfb, 0x8b3: 0xfb, 0x8b4: 0xfb, 0x8b5: 0xfb, 0x8b6: 0xfb, 0x8b7: 0xfb, + 0x8b8: 0xfb, 0x8b9: 0xfb, 0x8ba: 0xfb, 0x8bb: 0xfb, 0x8bc: 0xfb, 0x8bd: 0xfb, 0x8be: 0xfb, 0x8bf: 0xfb, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b, + 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b, + 0x8d0: 0x0b, 0x8d1: 0x0b, 0x8d2: 0x0b, 0x8d3: 0x0b, 0x8d4: 0x0b, 0x8d5: 0x0b, 0x8d6: 0x0b, 0x8d7: 0x0b, + 0x8d8: 0x0b, 0x8d9: 0x0b, 0x8da: 0x0b, 0x8db: 0x0b, 0x8dc: 0x0b, 0x8dd: 0x0b, 0x8de: 0x0b, 0x8df: 0x0b, + 0x8e0: 0x20, 0x8e1: 0x0b, 0x8e2: 0x0b, 0x8e3: 0x0b, 0x8e4: 0x0b, 0x8e5: 0x0b, 0x8e6: 0x0b, 0x8e7: 0x0b, + 0x8e8: 0x0b, 0x8e9: 0x0b, 0x8ea: 0x0b, 0x8eb: 0x0b, 0x8ec: 0x0b, 0x8ed: 0x0b, 0x8ee: 0x0b, 0x8ef: 0x0b, + 0x8f0: 0x0b, 0x8f1: 0x0b, 0x8f2: 0x0b, 0x8f3: 0x0b, 0x8f4: 0x0b, 0x8f5: 0x0b, 0x8f6: 0x0b, 0x8f7: 0x0b, + 0x8f8: 0x0b, 0x8f9: 0x0b, 0x8fa: 0x0b, 0x8fb: 0x0b, 0x8fc: 0x0b, 0x8fd: 0x0b, 0x8fe: 0x0b, 0x8ff: 0x0b, + // Block 0x24, offset 0x900 + 0x900: 0x0b, 0x901: 0x0b, 0x902: 0x0b, 0x903: 0x0b, 0x904: 0x0b, 0x905: 0x0b, 0x906: 0x0b, 0x907: 0x0b, + 0x908: 0x0b, 0x909: 0x0b, 0x90a: 0x0b, 0x90b: 0x0b, 0x90c: 0x0b, 0x90d: 0x0b, 0x90e: 0x0b, 0x90f: 0x0b, +} + +// idnaSparseOffset: 292 entries, 584 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x33, 0x3e, 0x4a, 0x4e, 0x5d, 0x62, 0x6c, 0x78, 0x85, 0x8b, 0x94, 0xa4, 0xb2, 0xbd, 0xca, 0xdb, 0xe5, 0xec, 0xf9, 0x10a, 0x111, 0x11c, 0x12b, 0x139, 0x143, 0x145, 0x14a, 0x14d, 0x150, 0x152, 0x15e, 0x169, 0x171, 0x177, 0x17d, 0x182, 0x187, 0x18a, 0x18e, 0x194, 0x199, 0x1a5, 0x1af, 0x1b5, 0x1c6, 0x1d0, 0x1d3, 0x1db, 0x1de, 0x1eb, 0x1f3, 0x1f7, 0x1fe, 0x206, 0x216, 0x222, 0x225, 0x22f, 0x23b, 0x247, 0x253, 0x25b, 0x260, 0x26d, 0x27e, 0x282, 0x28d, 0x291, 0x29a, 0x2a2, 0x2a8, 0x2ad, 0x2b0, 0x2b4, 0x2ba, 0x2be, 0x2c2, 0x2c6, 0x2cc, 0x2d4, 0x2db, 0x2e6, 0x2f0, 0x2f4, 0x2f7, 0x2fd, 0x301, 0x303, 0x306, 0x308, 0x30b, 0x315, 0x318, 0x327, 0x32b, 0x330, 0x333, 0x337, 0x33c, 0x341, 0x347, 0x358, 0x368, 0x36e, 0x372, 0x381, 0x386, 0x38e, 0x398, 0x3a3, 0x3ab, 0x3bc, 0x3c5, 0x3d5, 0x3e2, 0x3ee, 0x3f3, 0x400, 0x404, 0x409, 0x40b, 0x40d, 0x411, 0x413, 0x417, 0x420, 0x426, 0x42a, 0x43a, 0x444, 0x449, 0x44c, 0x452, 0x459, 0x45e, 0x462, 0x468, 0x46d, 0x476, 0x47b, 0x481, 0x488, 0x48f, 0x496, 0x49a, 0x49f, 0x4a2, 0x4a7, 0x4b3, 0x4b9, 0x4be, 0x4c5, 0x4cd, 0x4d2, 0x4d6, 0x4e6, 0x4ed, 0x4f1, 0x4f5, 0x4fc, 0x4fe, 0x501, 0x504, 0x508, 0x511, 0x515, 0x51d, 0x525, 0x52d, 0x539, 0x545, 0x54b, 0x554, 0x560, 0x567, 0x570, 0x57b, 0x582, 0x591, 0x59e, 0x5ab, 0x5b4, 0x5b8, 0x5c7, 0x5cf, 0x5da, 0x5e3, 0x5e9, 0x5f1, 0x5fa, 0x605, 0x608, 0x614, 0x61d, 0x620, 0x625, 0x62e, 0x633, 0x640, 0x64b, 0x654, 0x65e, 0x661, 0x66b, 0x674, 0x680, 0x68d, 0x69a, 0x6a8, 0x6af, 0x6b3, 0x6b7, 0x6ba, 0x6bf, 0x6c2, 0x6c7, 0x6ca, 0x6d1, 0x6d8, 0x6dc, 0x6e7, 0x6ea, 0x6ed, 0x6f0, 0x6f6, 0x6fc, 0x705, 0x708, 0x70b, 0x70e, 0x711, 0x718, 0x71b, 0x720, 0x72a, 0x72d, 0x731, 0x740, 0x74c, 0x750, 0x755, 0x759, 0x75e, 0x762, 0x767, 0x770, 0x77b, 0x781, 0x787, 0x78d, 0x793, 0x79c, 0x79f, 0x7a2, 0x7a6, 0x7aa, 0x7ae, 0x7b4, 0x7ba, 0x7bf, 0x7c2, 0x7d2, 0x7d9, 0x7dc, 0x7e1, 0x7e5, 0x7eb, 0x7f2, 0x7f6, 0x7fa, 0x803, 0x80a, 0x80f, 0x813, 0x821, 0x824, 0x827, 0x82b, 0x82f, 0x832, 0x842, 0x853, 0x856, 0x85b, 0x85d, 0x85f} + +// idnaSparseValues: 2146 entries, 8584 bytes +var idnaSparseValues = [2146]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x07}, + {value: 0xe105, lo: 0x80, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0x97}, + {value: 0xe105, lo: 0x98, hi: 0x9e}, + {value: 0x001f, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbf}, + // Block 0x1, offset 0x8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0xe01d, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0335, lo: 0x83, hi: 0x83}, + {value: 0x034d, lo: 0x84, hi: 0x84}, + {value: 0x0365, lo: 0x85, hi: 0x85}, + {value: 0xe00d, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0xe00d, lo: 0x88, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x89}, + {value: 0xe00d, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe00d, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0x8d}, + {value: 0xe00d, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0xbf}, + // Block 0x2, offset 0x19 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0249, lo: 0xb0, hi: 0xb0}, + {value: 0x037d, lo: 0xb1, hi: 0xb1}, + {value: 0x0259, lo: 0xb2, hi: 0xb2}, + {value: 0x0269, lo: 0xb3, hi: 0xb3}, + {value: 0x034d, lo: 0xb4, hi: 0xb4}, + {value: 0x0395, lo: 0xb5, hi: 0xb5}, + {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, + {value: 0x0279, lo: 0xb7, hi: 0xb7}, + {value: 0x0289, lo: 0xb8, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbf}, + // Block 0x3, offset 0x25 + {value: 0x0000, lo: 0x01}, + {value: 0x3308, lo: 0x80, hi: 0xbf}, + // Block 0x4, offset 0x27 + {value: 0x0000, lo: 0x04}, + {value: 0x03f5, lo: 0x80, hi: 0x8f}, + {value: 0xe105, lo: 0x90, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x5, offset 0x2c + {value: 0x0000, lo: 0x06}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x0545, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x0008, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x6, offset 0x33 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0401, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0018, lo: 0x89, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x7, offset 0x3e + {value: 0x0000, lo: 0x0b}, + {value: 0x0818, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x82}, + {value: 0x0818, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xae}, + {value: 0x0808, lo: 0xaf, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x8, offset 0x4a + {value: 0x0000, lo: 0x03}, + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0c08, lo: 0x88, hi: 0x99}, + {value: 0x0a08, lo: 0x9a, hi: 0xbf}, + // Block 0x9, offset 0x4e + {value: 0x0000, lo: 0x0e}, + {value: 0x3308, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0c08, lo: 0x8d, hi: 0x8d}, + {value: 0x0a08, lo: 0x8e, hi: 0x98}, + {value: 0x0c08, lo: 0x99, hi: 0x9b}, + {value: 0x0a08, lo: 0x9c, hi: 0xaa}, + {value: 0x0c08, lo: 0xab, hi: 0xac}, + {value: 0x0a08, lo: 0xad, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xb5, hi: 0xb7}, + {value: 0x0c08, lo: 0xb8, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xa, offset 0x5d + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xb, offset 0x62 + {value: 0x0000, lo: 0x09}, + {value: 0x0808, lo: 0x80, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbf}, + // Block 0xc, offset 0x6c + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x99}, + {value: 0x0808, lo: 0x9a, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa3}, + {value: 0x0808, lo: 0xa4, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa7}, + {value: 0x0808, lo: 0xa8, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0818, lo: 0xb0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd, offset 0x78 + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0a08, lo: 0xa0, hi: 0xa9}, + {value: 0x0c08, lo: 0xaa, hi: 0xac}, + {value: 0x0808, lo: 0xad, hi: 0xad}, + {value: 0x0c08, lo: 0xae, hi: 0xae}, + {value: 0x0a08, lo: 0xaf, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb2}, + {value: 0x0a08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0a08, lo: 0xb6, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xe, offset 0x85 + {value: 0x0000, lo: 0x05}, + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0xa1}, + {value: 0x0840, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xbf}, + // Block 0xf, offset 0x8b + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x10, offset 0x94 + {value: 0x0000, lo: 0x0f}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x85}, + {value: 0x3008, lo: 0x86, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8c}, + {value: 0x3b08, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x11, offset 0xa4 + {value: 0x0000, lo: 0x0d}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x12, offset 0xb2 + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xba}, + {value: 0x3b08, lo: 0xbb, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x13, offset 0xbd + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x14, offset 0xca + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x89}, + {value: 0x3b08, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x3008, lo: 0x98, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x15, offset 0xdb + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb2}, + {value: 0x08f1, lo: 0xb3, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb9}, + {value: 0x3b08, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x16, offset 0xe5 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0xbf}, + // Block 0x17, offset 0xec + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0961, lo: 0x9c, hi: 0x9c}, + {value: 0x0999, lo: 0x9d, hi: 0x9d}, + {value: 0x0008, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x18, offset 0xf9 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe03d, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x19, offset 0x10a + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0x1a, offset 0x111 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x1b, offset 0x11c + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x3008, lo: 0xa2, hi: 0xa4}, + {value: 0x0008, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xbf}, + // Block 0x1c, offset 0x12b + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x8c}, + {value: 0x3308, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x3008, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x1d, offset 0x139 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x86}, + {value: 0x055d, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8c}, + {value: 0x055d, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0xe105, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0x1e, offset 0x143 + {value: 0x0000, lo: 0x01}, + {value: 0x0018, lo: 0x80, hi: 0xbf}, + // Block 0x1f, offset 0x145 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa0}, + {value: 0x2018, lo: 0xa1, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x20, offset 0x14a + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa7}, + {value: 0x2018, lo: 0xa8, hi: 0xbf}, + // Block 0x21, offset 0x14d + {value: 0x0000, lo: 0x02}, + {value: 0x2018, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0xbf}, + // Block 0x22, offset 0x150 + {value: 0x0000, lo: 0x01}, + {value: 0x0008, lo: 0x80, hi: 0xbf}, + // Block 0x23, offset 0x152 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x24, offset 0x15e + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x25, offset 0x169 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x26, offset 0x171 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x27, offset 0x177 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x28, offset 0x17d + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x29, offset 0x182 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x2a, offset 0x187 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x2b, offset 0x18a + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xbf}, + // Block 0x2c, offset 0x18e + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2d, offset 0x194 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x2e, offset 0x199 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x3b08, lo: 0x94, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x2f, offset 0x1a5 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x30, offset 0x1af + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xb3}, + {value: 0x3340, lo: 0xb4, hi: 0xb5}, + {value: 0x3008, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x31, offset 0x1b5 + {value: 0x0000, lo: 0x10}, + {value: 0x3008, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x91}, + {value: 0x3b08, lo: 0x92, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0x96}, + {value: 0x0008, lo: 0x97, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x32, offset 0x1c6 + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x86}, + {value: 0x0218, lo: 0x87, hi: 0x87}, + {value: 0x0018, lo: 0x88, hi: 0x8a}, + {value: 0x33c0, lo: 0x8b, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0208, lo: 0xa0, hi: 0xbf}, + // Block 0x33, offset 0x1d0 + {value: 0x0000, lo: 0x02}, + {value: 0x0208, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x34, offset 0x1d3 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0208, lo: 0x87, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xa9}, + {value: 0x0208, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x35, offset 0x1db + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x36, offset 0x1de + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x37, offset 0x1eb + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x38, offset 0x1f3 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x39, offset 0x1f7 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0028, lo: 0x9a, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xbf}, + // Block 0x3a, offset 0x1fe + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x3308, lo: 0x97, hi: 0x98}, + {value: 0x3008, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x3b, offset 0x206 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x94}, + {value: 0x3008, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xac}, + {value: 0x3008, lo: 0xad, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x216 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xbd}, + {value: 0x3318, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x222 + {value: 0x0000, lo: 0x02}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0xbf}, + // Block 0x3e, offset 0x225 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3008, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x3f, offset 0x22f + {value: 0x0000, lo: 0x0b}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x3808, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x40, offset 0x23b + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3808, lo: 0xaa, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xbf}, + // Block 0x41, offset 0x247 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3008, lo: 0xaa, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3808, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x42, offset 0x253 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x43, offset 0x25b + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x44, offset 0x260 + {value: 0x0000, lo: 0x0c}, + {value: 0x0e29, lo: 0x80, hi: 0x80}, + {value: 0x0e41, lo: 0x81, hi: 0x81}, + {value: 0x0e59, lo: 0x82, hi: 0x82}, + {value: 0x0e71, lo: 0x83, hi: 0x83}, + {value: 0x0e89, lo: 0x84, hi: 0x85}, + {value: 0x0ea1, lo: 0x86, hi: 0x86}, + {value: 0x0eb9, lo: 0x87, hi: 0x87}, + {value: 0x057d, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x059d, lo: 0x90, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbc}, + {value: 0x059d, lo: 0xbd, hi: 0xbf}, + // Block 0x45, offset 0x26d + {value: 0x0000, lo: 0x10}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x92}, + {value: 0x0018, lo: 0x93, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa8}, + {value: 0x0008, lo: 0xa9, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x46, offset 0x27e + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0x47, offset 0x282 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x87}, + {value: 0xe045, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0xe045, lo: 0x98, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0xe045, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbf}, + // Block 0x48, offset 0x28d + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x3318, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0x49, offset 0x291 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x24c1, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x4a, offset 0x29a + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x24f1, lo: 0xac, hi: 0xac}, + {value: 0x2529, lo: 0xad, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xae}, + {value: 0x2579, lo: 0xaf, hi: 0xaf}, + {value: 0x25b1, lo: 0xb0, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x4b, offset 0x2a2 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x9f}, + {value: 0x0080, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xad}, + {value: 0x0080, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x4c, offset 0x2a8 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa8}, + {value: 0x09dd, lo: 0xa9, hi: 0xa9}, + {value: 0x09fd, lo: 0xaa, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xbf}, + // Block 0x4d, offset 0x2ad + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0x4e, offset 0x2b0 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x28c1, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x4f, offset 0x2b4 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0e7e, lo: 0xb4, hi: 0xb4}, + {value: 0x292a, lo: 0xb5, hi: 0xb5}, + {value: 0x0e9e, lo: 0xb6, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x50, offset 0x2ba + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x9b}, + {value: 0x2941, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0xbf}, + // Block 0x51, offset 0x2be + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x52, offset 0x2c2 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0xbf}, + // Block 0x53, offset 0x2c6 + {value: 0x0000, lo: 0x05}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x03f5, lo: 0x90, hi: 0x9f}, + {value: 0x0ebd, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x54, offset 0x2cc + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x55, offset 0x2d4 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xae}, + {value: 0xe075, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x56, offset 0x2db + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x57, offset 0x2e6 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xbf}, + // Block 0x58, offset 0x2f0 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x59, offset 0x2f4 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0xbf}, + // Block 0x5a, offset 0x2f7 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9e}, + {value: 0x0ef5, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x5b, offset 0x2fd + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb2}, + {value: 0x0f15, lo: 0xb3, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x5c, offset 0x301 + {value: 0x0020, lo: 0x01}, + {value: 0x0f35, lo: 0x80, hi: 0xbf}, + // Block 0x5d, offset 0x303 + {value: 0x0020, lo: 0x02}, + {value: 0x1735, lo: 0x80, hi: 0x8f}, + {value: 0x1915, lo: 0x90, hi: 0xbf}, + // Block 0x5e, offset 0x306 + {value: 0x0020, lo: 0x01}, + {value: 0x1f15, lo: 0x80, hi: 0xbf}, + // Block 0x5f, offset 0x308 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x60, offset 0x30b + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9a}, + {value: 0x29e2, lo: 0x9b, hi: 0x9b}, + {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9e}, + {value: 0x2a31, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x61, offset 0x315 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbe}, + {value: 0x2a69, lo: 0xbf, hi: 0xbf}, + // Block 0x62, offset 0x318 + {value: 0x0000, lo: 0x0e}, + {value: 0x0040, lo: 0x80, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb0}, + {value: 0x2a35, lo: 0xb1, hi: 0xb1}, + {value: 0x2a55, lo: 0xb2, hi: 0xb2}, + {value: 0x2a75, lo: 0xb3, hi: 0xb3}, + {value: 0x2a95, lo: 0xb4, hi: 0xb4}, + {value: 0x2a75, lo: 0xb5, hi: 0xb5}, + {value: 0x2ab5, lo: 0xb6, hi: 0xb6}, + {value: 0x2ad5, lo: 0xb7, hi: 0xb7}, + {value: 0x2af5, lo: 0xb8, hi: 0xb9}, + {value: 0x2b15, lo: 0xba, hi: 0xbb}, + {value: 0x2b35, lo: 0xbc, hi: 0xbd}, + {value: 0x2b15, lo: 0xbe, hi: 0xbf}, + // Block 0x63, offset 0x327 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x64, offset 0x32b + {value: 0x0030, lo: 0x04}, + {value: 0x2aa2, lo: 0x80, hi: 0x9d}, + {value: 0x305a, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x30a2, lo: 0xa0, hi: 0xbf}, + // Block 0x65, offset 0x330 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x66, offset 0x333 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x67, offset 0x337 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x68, offset 0x33c + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x69, offset 0x341 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb1}, + {value: 0x0018, lo: 0xb2, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6a, offset 0x347 + {value: 0x0000, lo: 0x10}, + {value: 0x0040, lo: 0x80, hi: 0x81}, + {value: 0xe00d, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x83}, + {value: 0x03f5, lo: 0x84, hi: 0x84}, + {value: 0x1329, lo: 0x85, hi: 0x85}, + {value: 0x447d, lo: 0x86, hi: 0x86}, + {value: 0xe07d, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0xe01d, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0xb4}, + {value: 0xe01d, lo: 0xb5, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xb7}, + {value: 0x2009, lo: 0xb8, hi: 0xb8}, + {value: 0x6ec1, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xbf}, + // Block 0x6b, offset 0x358 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x3308, lo: 0x8b, hi: 0x8b}, + {value: 0x0008, lo: 0x8c, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xab}, + {value: 0x3b08, lo: 0xac, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x6c, offset 0x368 + {value: 0x0000, lo: 0x05}, + {value: 0x0208, lo: 0x80, hi: 0xb1}, + {value: 0x0108, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6d, offset 0x36e + {value: 0x0000, lo: 0x03}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xbf}, + // Block 0x6e, offset 0x372 + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0008, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x6f, offset 0x381 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x70, offset 0x386 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x91}, + {value: 0x3008, lo: 0x92, hi: 0x92}, + {value: 0x3808, lo: 0x93, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x71, offset 0x38e + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb9}, + {value: 0x3008, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x72, offset 0x398 + {value: 0x0000, lo: 0x0a}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x73, offset 0x3a3 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x74, offset 0x3ab + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8c}, + {value: 0x3008, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbd}, + {value: 0x0008, lo: 0xbe, hi: 0xbf}, + // Block 0x75, offset 0x3bc + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x76, offset 0x3c5 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x9a}, + {value: 0x0008, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3b08, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x77, offset 0x3d5 + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x90}, + {value: 0x0008, lo: 0x91, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x78, offset 0x3e2 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x449d, lo: 0x9c, hi: 0x9c}, + {value: 0x44b5, lo: 0x9d, hi: 0x9d}, + {value: 0x2971, lo: 0x9e, hi: 0x9e}, + {value: 0xe06d, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa8}, + {value: 0x6ed9, lo: 0xa9, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x44cd, lo: 0xb0, hi: 0xbf}, + // Block 0x79, offset 0x3ee + {value: 0x0000, lo: 0x04}, + {value: 0x44ed, lo: 0x80, hi: 0x8f}, + {value: 0x450d, lo: 0x90, hi: 0x9f}, + {value: 0x452d, lo: 0xa0, hi: 0xaf}, + {value: 0x450d, lo: 0xb0, hi: 0xbf}, + // Block 0x7a, offset 0x3f3 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3b08, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x7b, offset 0x400 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x7c, offset 0x404 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x7d, offset 0x409 + {value: 0x0000, lo: 0x01}, + {value: 0x0040, lo: 0x80, hi: 0xbf}, + // Block 0x7e, offset 0x40b + {value: 0x0020, lo: 0x01}, + {value: 0x454d, lo: 0x80, hi: 0xbf}, + // Block 0x7f, offset 0x40d + {value: 0x0020, lo: 0x03}, + {value: 0x4d4d, lo: 0x80, hi: 0x94}, + {value: 0x4b0d, lo: 0x95, hi: 0x95}, + {value: 0x4fed, lo: 0x96, hi: 0xbf}, + // Block 0x80, offset 0x411 + {value: 0x0020, lo: 0x01}, + {value: 0x552d, lo: 0x80, hi: 0xbf}, + // Block 0x81, offset 0x413 + {value: 0x0020, lo: 0x03}, + {value: 0x5d2d, lo: 0x80, hi: 0x84}, + {value: 0x568d, lo: 0x85, hi: 0x85}, + {value: 0x5dcd, lo: 0x86, hi: 0xbf}, + // Block 0x82, offset 0x417 + {value: 0x0020, lo: 0x08}, + {value: 0x6b8d, lo: 0x80, hi: 0x8f}, + {value: 0x6d4d, lo: 0x90, hi: 0x90}, + {value: 0x6d8d, lo: 0x91, hi: 0xab}, + {value: 0x6ef1, lo: 0xac, hi: 0xac}, + {value: 0x70ed, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x710d, lo: 0xb0, hi: 0xbf}, + // Block 0x83, offset 0x420 + {value: 0x0020, lo: 0x05}, + {value: 0x730d, lo: 0x80, hi: 0xad}, + {value: 0x656d, lo: 0xae, hi: 0xae}, + {value: 0x78cd, lo: 0xaf, hi: 0xb5}, + {value: 0x6f8d, lo: 0xb6, hi: 0xb6}, + {value: 0x79ad, lo: 0xb7, hi: 0xbf}, + // Block 0x84, offset 0x426 + {value: 0x0028, lo: 0x03}, + {value: 0x7c71, lo: 0x80, hi: 0x82}, + {value: 0x7c31, lo: 0x83, hi: 0x83}, + {value: 0x7ce9, lo: 0x84, hi: 0xbf}, + // Block 0x85, offset 0x42a + {value: 0x0038, lo: 0x0f}, + {value: 0x9e01, lo: 0x80, hi: 0x83}, + {value: 0x9ea9, lo: 0x84, hi: 0x85}, + {value: 0x9ee1, lo: 0x86, hi: 0x87}, + {value: 0x9f19, lo: 0x88, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0xa0d9, lo: 0x92, hi: 0x97}, + {value: 0xa1f1, lo: 0x98, hi: 0x9c}, + {value: 0xa2d1, lo: 0x9d, hi: 0xb3}, + {value: 0x9d91, lo: 0xb4, hi: 0xb4}, + {value: 0x9e01, lo: 0xb5, hi: 0xb5}, + {value: 0xa7d9, lo: 0xb6, hi: 0xbb}, + {value: 0xa8b9, lo: 0xbc, hi: 0xbc}, + {value: 0xa849, lo: 0xbd, hi: 0xbd}, + {value: 0xa929, lo: 0xbe, hi: 0xbf}, + // Block 0x86, offset 0x43a + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0008, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x87, offset 0x444 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x88, offset 0x449 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x89, offset 0x44c + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x8a, offset 0x452 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x8b, offset 0x459 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x8c, offset 0x45e + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x8d, offset 0x462 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x8e, offset 0x468 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xbf}, + // Block 0x8f, offset 0x46d + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x90, offset 0x476 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x91, offset 0x47b + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x92, offset 0x481 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x97}, + {value: 0x8b0d, lo: 0x98, hi: 0x9f}, + {value: 0x8b25, lo: 0xa0, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xbf}, + // Block 0x93, offset 0x488 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x8b25, lo: 0xb0, hi: 0xb7}, + {value: 0x8b0d, lo: 0xb8, hi: 0xbf}, + // Block 0x94, offset 0x48f + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x95, offset 0x496 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x96, offset 0x49a + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xae}, + {value: 0x0018, lo: 0xaf, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x97, offset 0x49f + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x98, offset 0x4a2 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xbf}, + // Block 0x99, offset 0x4a7 + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0808, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0808, lo: 0x8a, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb6}, + {value: 0x0808, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbb}, + {value: 0x0808, lo: 0xbc, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x0808, lo: 0xbf, hi: 0xbf}, + // Block 0x9a, offset 0x4b3 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0818, lo: 0x97, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0818, lo: 0xb7, hi: 0xbf}, + // Block 0x9b, offset 0x4b9 + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa6}, + {value: 0x0818, lo: 0xa7, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x9c, offset 0x4be + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x0818, lo: 0xbb, hi: 0xbf}, + // Block 0x9d, offset 0x4c5 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0818, lo: 0x96, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0818, lo: 0xbf, hi: 0xbf}, + // Block 0x9e, offset 0x4cd + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbb}, + {value: 0x0818, lo: 0xbc, hi: 0xbd}, + {value: 0x0808, lo: 0xbe, hi: 0xbf}, + // Block 0x9f, offset 0x4d2 + {value: 0x0000, lo: 0x03}, + {value: 0x0818, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x0818, lo: 0x92, hi: 0xbf}, + // Block 0xa0, offset 0x4d6 + {value: 0x0000, lo: 0x0f}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x94}, + {value: 0x0808, lo: 0x95, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x98}, + {value: 0x0808, lo: 0x99, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xa1, offset 0x4e6 + {value: 0x0000, lo: 0x06}, + {value: 0x0818, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0818, lo: 0x90, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xbc}, + {value: 0x0818, lo: 0xbd, hi: 0xbf}, + // Block 0xa2, offset 0x4ed + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xa3, offset 0x4f1 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb8}, + {value: 0x0018, lo: 0xb9, hi: 0xbf}, + // Block 0xa4, offset 0x4f5 + {value: 0x0000, lo: 0x06}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0818, lo: 0x98, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb7}, + {value: 0x0818, lo: 0xb8, hi: 0xbf}, + // Block 0xa5, offset 0x4fc + {value: 0x0000, lo: 0x01}, + {value: 0x0808, lo: 0x80, hi: 0xbf}, + // Block 0xa6, offset 0x4fe + {value: 0x0000, lo: 0x02}, + {value: 0x0808, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xa7, offset 0x501 + {value: 0x0000, lo: 0x02}, + {value: 0x03dd, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xa8, offset 0x504 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xbf}, + // Block 0xa9, offset 0x508 + {value: 0x0000, lo: 0x08}, + {value: 0x0908, lo: 0x80, hi: 0x80}, + {value: 0x0a08, lo: 0x81, hi: 0xa1}, + {value: 0x0c08, lo: 0xa2, hi: 0xa2}, + {value: 0x0a08, lo: 0xa3, hi: 0xa3}, + {value: 0x3308, lo: 0xa4, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0808, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xaa, offset 0x511 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0818, lo: 0xa0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xab, offset 0x515 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xac}, + {value: 0x0818, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0808, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xac, offset 0x51d + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0xa6}, + {value: 0x0808, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0a08, lo: 0xb0, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb3}, + {value: 0x0a08, lo: 0xb4, hi: 0xbf}, + // Block 0xad, offset 0x525 + {value: 0x0000, lo: 0x07}, + {value: 0x0a08, lo: 0x80, hi: 0x84}, + {value: 0x0808, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x90}, + {value: 0x0a18, lo: 0x91, hi: 0x93}, + {value: 0x0c18, lo: 0x94, hi: 0x94}, + {value: 0x0818, lo: 0x95, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xae, offset 0x52d + {value: 0x0000, lo: 0x0b}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0a08, lo: 0xb0, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb3}, + {value: 0x0c08, lo: 0xb4, hi: 0xb6}, + {value: 0x0808, lo: 0xb7, hi: 0xb7}, + {value: 0x0a08, lo: 0xb8, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xba}, + {value: 0x0a08, lo: 0xbb, hi: 0xbc}, + {value: 0x0c08, lo: 0xbd, hi: 0xbd}, + {value: 0x0a08, lo: 0xbe, hi: 0xbf}, + // Block 0xaf, offset 0x539 + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x0a08, lo: 0x81, hi: 0x81}, + {value: 0x0c08, lo: 0x82, hi: 0x83}, + {value: 0x0a08, lo: 0x84, hi: 0x84}, + {value: 0x0818, lo: 0x85, hi: 0x88}, + {value: 0x0c18, lo: 0x89, hi: 0x89}, + {value: 0x0a18, lo: 0x8a, hi: 0x8a}, + {value: 0x0918, lo: 0x8b, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xb0, offset 0x545 + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xb1, offset 0x54b + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x91}, + {value: 0x0018, lo: 0x92, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xb2, offset 0x554 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0xb3, offset 0x560 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb4, offset 0x567 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb2}, + {value: 0x3b08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xbf}, + // Block 0xb5, offset 0x570 + {value: 0x0000, lo: 0x0a}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xb6, offset 0x57b + {value: 0x0000, lo: 0x06}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xbe}, + {value: 0x3008, lo: 0xbf, hi: 0xbf}, + // Block 0xb7, offset 0x582 + {value: 0x0000, lo: 0x0e}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8d}, + {value: 0x3008, lo: 0x8e, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xb8, offset 0x591 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3808, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xb9, offset 0x59e + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xba, offset 0x5ab + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x3308, lo: 0x9f, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa9}, + {value: 0x3b08, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xbb, offset 0x5b4 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xbc, offset 0x5b8 + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xbf}, + // Block 0xbd, offset 0x5c7 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xbe, offset 0x5cf + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x85}, + {value: 0x0018, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xbf, offset 0x5da + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xc0, offset 0x5e3 + {value: 0x0000, lo: 0x05}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9b}, + {value: 0x3308, lo: 0x9c, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xc1, offset 0x5e9 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xc2, offset 0x5f1 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xc3, offset 0x5fa + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb5}, + {value: 0x3808, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xc4, offset 0x605 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0xc5, offset 0x608 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0xc6, offset 0x614 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xc7, offset 0x61d + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xbf}, + // Block 0xc8, offset 0x620 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0xc9, offset 0x625 + {value: 0x0000, lo: 0x08}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xca, offset 0x62e + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xbf}, + // Block 0xcb, offset 0x633 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x99}, + {value: 0x3308, lo: 0x9a, hi: 0x9b}, + {value: 0x3008, lo: 0x9c, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x0018, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xbf}, + // Block 0xcc, offset 0x640 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xcd, offset 0x64b + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x3b08, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0xbf}, + // Block 0xce, offset 0x654 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x98}, + {value: 0x3b08, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xbf}, + // Block 0xcf, offset 0x65e + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xd0, offset 0x661 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xd1, offset 0x66b + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xbf}, + // Block 0xd2, offset 0x674 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xaa, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xd3, offset 0x680 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xd4, offset 0x68d + {value: 0x0000, lo: 0x0c}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xbf}, + // Block 0xd5, offset 0x69a + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x3008, lo: 0x93, hi: 0x94}, + {value: 0x3308, lo: 0x95, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x96}, + {value: 0x3b08, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xbf}, + // Block 0xd6, offset 0x6a8 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xd7, offset 0x6af + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0xd8, offset 0x6b3 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xd9, offset 0x6b7 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xda, offset 0x6ba + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xdb, offset 0x6bf + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0xbf}, + // Block 0xdc, offset 0x6c2 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0340, lo: 0xb0, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xdd, offset 0x6c7 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0xbf}, + // Block 0xde, offset 0x6ca + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xdf, offset 0x6d1 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xe0, offset 0x6d8 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0xe1, offset 0x6dc + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0xe2, offset 0x6e7 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xe3, offset 0x6ea + {value: 0x0000, lo: 0x02}, + {value: 0xe105, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0xe4, offset 0x6ed + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0xe5, offset 0x6f0 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0xbf}, + // Block 0xe6, offset 0x6f6 + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xe7, offset 0x6fc + {value: 0x0000, lo: 0x08}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa1}, + {value: 0x0018, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xa3}, + {value: 0x3308, lo: 0xa4, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xe8, offset 0x705 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0xe9, offset 0x708 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0xea, offset 0x70b + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xeb, offset 0x70e + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xec, offset 0x711 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0xa3}, + {value: 0x0008, lo: 0xa4, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xed, offset 0x718 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xee, offset 0x71b + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0xef, offset 0x720 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x03c0, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xbf}, + // Block 0xf0, offset 0x72a + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xf1, offset 0x72d + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xbf}, + // Block 0xf2, offset 0x731 + {value: 0x0000, lo: 0x0e}, + {value: 0x0018, lo: 0x80, hi: 0x9d}, + {value: 0xb609, lo: 0x9e, hi: 0x9e}, + {value: 0xb651, lo: 0x9f, hi: 0x9f}, + {value: 0xb699, lo: 0xa0, hi: 0xa0}, + {value: 0xb701, lo: 0xa1, hi: 0xa1}, + {value: 0xb769, lo: 0xa2, hi: 0xa2}, + {value: 0xb7d1, lo: 0xa3, hi: 0xa3}, + {value: 0xb839, lo: 0xa4, hi: 0xa4}, + {value: 0x3018, lo: 0xa5, hi: 0xa6}, + {value: 0x3318, lo: 0xa7, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xac}, + {value: 0x3018, lo: 0xad, hi: 0xb2}, + {value: 0x0340, lo: 0xb3, hi: 0xba}, + {value: 0x3318, lo: 0xbb, hi: 0xbf}, + // Block 0xf3, offset 0x740 + {value: 0x0000, lo: 0x0b}, + {value: 0x3318, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x84}, + {value: 0x3318, lo: 0x85, hi: 0x8b}, + {value: 0x0018, lo: 0x8c, hi: 0xa9}, + {value: 0x3318, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xba}, + {value: 0xb8a1, lo: 0xbb, hi: 0xbb}, + {value: 0xb8e9, lo: 0xbc, hi: 0xbc}, + {value: 0xb931, lo: 0xbd, hi: 0xbd}, + {value: 0xb999, lo: 0xbe, hi: 0xbe}, + {value: 0xba01, lo: 0xbf, hi: 0xbf}, + // Block 0xf4, offset 0x74c + {value: 0x0000, lo: 0x03}, + {value: 0xba69, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xbf}, + // Block 0xf5, offset 0x750 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x3318, lo: 0x82, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0xbf}, + // Block 0xf6, offset 0x755 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0xf7, offset 0x759 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xf8, offset 0x75e + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0xf9, offset 0x762 + {value: 0x0000, lo: 0x04}, + {value: 0x3308, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0xfa, offset 0x767 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x3308, lo: 0xa1, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xfb, offset 0x770 + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0xfc, offset 0x77b + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0xfd, offset 0x781 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xfe, offset 0x787 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xff, offset 0x78d + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x86}, + {value: 0x0818, lo: 0x87, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0x100, offset 0x793 + {value: 0x0000, lo: 0x08}, + {value: 0x0a08, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x8a}, + {value: 0x0b08, lo: 0x8b, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0818, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x101, offset 0x79c + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xb0}, + {value: 0x0818, lo: 0xb1, hi: 0xbf}, + // Block 0x102, offset 0x79f + {value: 0x0000, lo: 0x02}, + {value: 0x0818, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x103, offset 0x7a2 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0818, lo: 0x81, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x104, offset 0x7a6 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0x105, offset 0x7aa + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x106, offset 0x7ae + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x107, offset 0x7b4 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x108, offset 0x7ba + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0xc229, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0x109, offset 0x7bf + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xbf}, + // Block 0x10a, offset 0x7c2 + {value: 0x0000, lo: 0x0f}, + {value: 0xc851, lo: 0x80, hi: 0x80}, + {value: 0xc8a1, lo: 0x81, hi: 0x81}, + {value: 0xc8f1, lo: 0x82, hi: 0x82}, + {value: 0xc941, lo: 0x83, hi: 0x83}, + {value: 0xc991, lo: 0x84, hi: 0x84}, + {value: 0xc9e1, lo: 0x85, hi: 0x85}, + {value: 0xca31, lo: 0x86, hi: 0x86}, + {value: 0xca81, lo: 0x87, hi: 0x87}, + {value: 0xcad1, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0xcb21, lo: 0x90, hi: 0x90}, + {value: 0xcb41, lo: 0x91, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xbf}, + // Block 0x10b, offset 0x7d2 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x10c, offset 0x7d9 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x10d, offset 0x7dc + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x10e, offset 0x7e1 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x10f, offset 0x7e5 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x110, offset 0x7eb + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0x111, offset 0x7f2 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0x112, offset 0x7f6 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x113, offset 0x7fa + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x114, offset 0x803 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x115, offset 0x80a + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0x116, offset 0x80f + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0xbf}, + // Block 0x117, offset 0x813 + {value: 0x0000, lo: 0x0d}, + {value: 0x0018, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0xaf}, + {value: 0x1f41, lo: 0xb0, hi: 0xb0}, + {value: 0x00c9, lo: 0xb1, hi: 0xb1}, + {value: 0x0069, lo: 0xb2, hi: 0xb2}, + {value: 0x0079, lo: 0xb3, hi: 0xb3}, + {value: 0x1f51, lo: 0xb4, hi: 0xb4}, + {value: 0x1f61, lo: 0xb5, hi: 0xb5}, + {value: 0x1f71, lo: 0xb6, hi: 0xb6}, + {value: 0x1f81, lo: 0xb7, hi: 0xb7}, + {value: 0x1f91, lo: 0xb8, hi: 0xb8}, + {value: 0x1fa1, lo: 0xb9, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x118, offset 0x821 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x119, offset 0x824 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x11a, offset 0x827 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x11b, offset 0x82b + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x11c, offset 0x82f + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x11d, offset 0x832 + {value: 0x0020, lo: 0x0f}, + {value: 0xdf21, lo: 0x80, hi: 0x89}, + {value: 0x8e35, lo: 0x8a, hi: 0x8a}, + {value: 0xe061, lo: 0x8b, hi: 0x9c}, + {value: 0x8e55, lo: 0x9d, hi: 0x9d}, + {value: 0xe2a1, lo: 0x9e, hi: 0xa2}, + {value: 0x8e75, lo: 0xa3, hi: 0xa3}, + {value: 0xe341, lo: 0xa4, hi: 0xab}, + {value: 0x7f0d, lo: 0xac, hi: 0xac}, + {value: 0xe441, lo: 0xad, hi: 0xaf}, + {value: 0x8e95, lo: 0xb0, hi: 0xb0}, + {value: 0xe4a1, lo: 0xb1, hi: 0xb6}, + {value: 0x8eb5, lo: 0xb7, hi: 0xb9}, + {value: 0xe561, lo: 0xba, hi: 0xba}, + {value: 0x8f15, lo: 0xbb, hi: 0xbb}, + {value: 0xe581, lo: 0xbc, hi: 0xbf}, + // Block 0x11e, offset 0x842 + {value: 0x0020, lo: 0x10}, + {value: 0x93b5, lo: 0x80, hi: 0x80}, + {value: 0xf101, lo: 0x81, hi: 0x86}, + {value: 0x93d5, lo: 0x87, hi: 0x8a}, + {value: 0xda61, lo: 0x8b, hi: 0x8b}, + {value: 0xf1c1, lo: 0x8c, hi: 0x96}, + {value: 0x9455, lo: 0x97, hi: 0x97}, + {value: 0xf321, lo: 0x98, hi: 0xa3}, + {value: 0x9475, lo: 0xa4, hi: 0xa6}, + {value: 0xf4a1, lo: 0xa7, hi: 0xaa}, + {value: 0x94d5, lo: 0xab, hi: 0xab}, + {value: 0xf521, lo: 0xac, hi: 0xac}, + {value: 0x94f5, lo: 0xad, hi: 0xad}, + {value: 0xf541, lo: 0xae, hi: 0xaf}, + {value: 0x9515, lo: 0xb0, hi: 0xb1}, + {value: 0xf581, lo: 0xb2, hi: 0xbe}, + {value: 0x2040, lo: 0xbf, hi: 0xbf}, + // Block 0x11f, offset 0x853 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0xbf}, + // Block 0x120, offset 0x856 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0340, lo: 0x81, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x9f}, + {value: 0x0340, lo: 0xa0, hi: 0xbf}, + // Block 0x121, offset 0x85b + {value: 0x0000, lo: 0x01}, + {value: 0x0340, lo: 0x80, hi: 0xbf}, + // Block 0x122, offset 0x85d + {value: 0x0000, lo: 0x01}, + {value: 0x33c0, lo: 0x80, hi: 0xbf}, + // Block 0x123, offset 0x85f + {value: 0x0000, lo: 0x02}, + {value: 0x33c0, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, +} + +// Total table size 43370 bytes (42KiB); checksum: EBD909C0 diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 0f443e693..8cfd6063e 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -16,15 +16,16 @@ Or you can manually git clone the repository to See godoc for further documentation and examples. -* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) -* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) +* [godoc.org/golang.org/x/oauth2](https://godoc.org/golang.org/x/oauth2) +* [godoc.org/golang.org/x/oauth2/google](https://godoc.org/golang.org/x/oauth2/google) ## Policy for new packages -We no longer accept new provider-specific packages in this repo. For -defining provider endpoints and provider-specific OAuth2 behavior, we -encourage you to create packages elsewhere. We'll keep the existing -packages for compatibility. +We no longer accept new provider-specific packages in this repo if all +they do is add a single endpoint variable. If you just want to add a +single endpoint, add it to the +[godoc.org/golang.org/x/oauth2/endpoints](https://godoc.org/golang.org/x/oauth2/endpoints) +package. ## Report Issues / Send Patches diff --git a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s new file mode 100644 index 000000000..06f84b855 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go +// + +TEXT ·syscall6(SB),NOSPLIT,$0-88 + JMP syscall·syscall6(SB) + +TEXT ·rawSyscall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/cpu/byteorder.go b/vendor/golang.org/x/sys/cpu/byteorder.go new file mode 100644 index 000000000..dcbb14ef3 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/byteorder.go @@ -0,0 +1,65 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "runtime" +) + +// byteOrder is a subset of encoding/binary.ByteOrder. +type byteOrder interface { + Uint32([]byte) uint32 + Uint64([]byte) uint64 +} + +type littleEndian struct{} +type bigEndian struct{} + +func (littleEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func (littleEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func (bigEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 +} + +func (bigEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} + +// hostByteOrder returns littleEndian on little-endian machines and +// bigEndian on big-endian machines. +func hostByteOrder() byteOrder { + switch runtime.GOARCH { + case "386", "amd64", "amd64p32", + "alpha", + "arm", "arm64", + "mipsle", "mips64le", "mips64p32le", + "nios2", + "ppc64le", + "riscv", "riscv64", + "sh": + return littleEndian{} + case "armbe", "arm64be", + "m68k", + "mips", "mips64", "mips64p32", + "ppc", "ppc64", + "s390", "s390x", + "shbe", + "sparc", "sparc64": + return bigEndian{} + } + panic("unknown architecture") +} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go new file mode 100644 index 000000000..f77701fe8 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -0,0 +1,287 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cpu implements processor feature detection for +// various CPU architectures. +package cpu + +import ( + "os" + "strings" +) + +// Initialized reports whether the CPU features were initialized. +// +// For some GOOS/GOARCH combinations initialization of the CPU features depends +// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm +// Initialized will report false if reading the file fails. +var Initialized bool + +// CacheLinePad is used to pad structs to avoid false sharing. +type CacheLinePad struct{ _ [cacheLineSize]byte } + +// X86 contains the supported CPU features of the +// current X86/AMD64 platform. If the current platform +// is not X86/AMD64 then all feature flags are false. +// +// X86 is padded to avoid false sharing. Further the HasAVX +// and HasAVX2 are only set if the OS supports XMM and YMM +// registers in addition to the CPUID feature bit being set. +var X86 struct { + _ CacheLinePad + HasAES bool // AES hardware implementation (AES NI) + HasADX bool // Multi-precision add-carry instruction extensions + HasAVX bool // Advanced vector extension + HasAVX2 bool // Advanced vector extension 2 + HasAVX512 bool // Advanced vector extension 512 + HasAVX512F bool // Advanced vector extension 512 Foundation Instructions + HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions + HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions + HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions Instructions + HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions + HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions + HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions + HasAVX512IFMA bool // Advanced vector extension 512 Integer Fused Multiply Add + HasAVX512VBMI bool // Advanced vector extension 512 Vector Byte Manipulation Instructions + HasAVX5124VNNIW bool // Advanced vector extension 512 Vector Neural Network Instructions Word variable precision + HasAVX5124FMAPS bool // Advanced vector extension 512 Fused Multiply Accumulation Packed Single precision + HasAVX512VPOPCNTDQ bool // Advanced vector extension 512 Double and quad word population count instructions + HasAVX512VPCLMULQDQ bool // Advanced vector extension 512 Vector carry-less multiply operations + HasAVX512VNNI bool // Advanced vector extension 512 Vector Neural Network Instructions + HasAVX512GFNI bool // Advanced vector extension 512 Galois field New Instructions + HasAVX512VAES bool // Advanced vector extension 512 Vector AES instructions + HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2 + HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms + HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions + HasBMI1 bool // Bit manipulation instruction set 1 + HasBMI2 bool // Bit manipulation instruction set 2 + HasERMS bool // Enhanced REP for MOVSB and STOSB + HasFMA bool // Fused-multiply-add instructions + HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. + HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM + HasPOPCNT bool // Hamming weight instruction POPCNT. + HasRDRAND bool // RDRAND instruction (on-chip random number generator) + HasRDSEED bool // RDSEED instruction (on-chip random number generator) + HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64) + HasSSE3 bool // Streaming SIMD extension 3 + HasSSSE3 bool // Supplemental streaming SIMD extension 3 + HasSSE41 bool // Streaming SIMD extension 4 and 4.1 + HasSSE42 bool // Streaming SIMD extension 4 and 4.2 + _ CacheLinePad +} + +// ARM64 contains the supported CPU features of the +// current ARMv8(aarch64) platform. If the current platform +// is not arm64 then all feature flags are false. +var ARM64 struct { + _ CacheLinePad + HasFP bool // Floating-point instruction set (always available) + HasASIMD bool // Advanced SIMD (always available) + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + HasATOMICS bool // Atomic memory operation instruction set + HasFPHP bool // Half precision floating-point instruction set + HasASIMDHP bool // Advanced SIMD half precision instruction set + HasCPUID bool // CPUID identification scheme registers + HasASIMDRDM bool // Rounding double multiply add/subtract instruction set + HasJSCVT bool // Javascript conversion from floating-point to integer + HasFCMA bool // Floating-point multiplication and addition of complex numbers + HasLRCPC bool // Release Consistent processor consistent support + HasDCPOP bool // Persistent memory support + HasSHA3 bool // SHA3 hardware implementation + HasSM3 bool // SM3 hardware implementation + HasSM4 bool // SM4 hardware implementation + HasASIMDDP bool // Advanced SIMD double precision instruction set + HasSHA512 bool // SHA512 hardware implementation + HasSVE bool // Scalable Vector Extensions + HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 + _ CacheLinePad +} + +// ARM contains the supported CPU features of the current ARM (32-bit) platform. +// All feature flags are false if: +// 1. the current platform is not arm, or +// 2. the current operating system is not Linux. +var ARM struct { + _ CacheLinePad + HasSWP bool // SWP instruction support + HasHALF bool // Half-word load and store support + HasTHUMB bool // ARM Thumb instruction set + Has26BIT bool // Address space limited to 26-bits + HasFASTMUL bool // 32-bit operand, 64-bit result multiplication support + HasFPA bool // Floating point arithmetic support + HasVFP bool // Vector floating point support + HasEDSP bool // DSP Extensions support + HasJAVA bool // Java instruction set + HasIWMMXT bool // Intel Wireless MMX technology support + HasCRUNCH bool // MaverickCrunch context switching and handling + HasTHUMBEE bool // Thumb EE instruction set + HasNEON bool // NEON instruction set + HasVFPv3 bool // Vector floating point version 3 support + HasVFPv3D16 bool // Vector floating point version 3 D8-D15 + HasTLS bool // Thread local storage support + HasVFPv4 bool // Vector floating point version 4 support + HasIDIVA bool // Integer divide instruction support in ARM mode + HasIDIVT bool // Integer divide instruction support in Thumb mode + HasVFPD32 bool // Vector floating point version 3 D15-D31 + HasLPAE bool // Large Physical Address Extensions + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + _ CacheLinePad +} + +// MIPS64X contains the supported CPU features of the current mips64/mips64le +// platforms. If the current platform is not mips64/mips64le or the current +// operating system is not Linux then all feature flags are false. +var MIPS64X struct { + _ CacheLinePad + HasMSA bool // MIPS SIMD architecture + _ CacheLinePad +} + +// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms. +// If the current platform is not ppc64/ppc64le then all feature flags are false. +// +// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00, +// since there are no optional categories. There are some exceptions that also +// require kernel support to work (DARN, SCV), so there are feature bits for +// those as well. The minimum processor requirement is POWER8 (ISA 2.07). +// The struct is padded to avoid false sharing. +var PPC64 struct { + _ CacheLinePad + HasDARN bool // Hardware random number generator (requires kernel enablement) + HasSCV bool // Syscall vectored (requires kernel enablement) + IsPOWER8 bool // ISA v2.07 (POWER8) + IsPOWER9 bool // ISA v3.00 (POWER9) + _ CacheLinePad +} + +// S390X contains the supported CPU features of the current IBM Z +// (s390x) platform. If the current platform is not IBM Z then all +// feature flags are false. +// +// S390X is padded to avoid false sharing. Further HasVX is only set +// if the OS supports vector registers in addition to the STFLE +// feature bit being set. +var S390X struct { + _ CacheLinePad + HasZARCH bool // z/Architecture mode is active [mandatory] + HasSTFLE bool // store facility list extended + HasLDISP bool // long (20-bit) displacements + HasEIMM bool // 32-bit immediates + HasDFP bool // decimal floating point + HasETF3EH bool // ETF-3 enhanced + HasMSA bool // message security assist (CPACF) + HasAES bool // KM-AES{128,192,256} functions + HasAESCBC bool // KMC-AES{128,192,256} functions + HasAESCTR bool // KMCTR-AES{128,192,256} functions + HasAESGCM bool // KMA-GCM-AES{128,192,256} functions + HasGHASH bool // KIMD-GHASH function + HasSHA1 bool // K{I,L}MD-SHA-1 functions + HasSHA256 bool // K{I,L}MD-SHA-256 functions + HasSHA512 bool // K{I,L}MD-SHA-512 functions + HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions + HasVX bool // vector facility + HasVXE bool // vector-enhancements facility 1 + _ CacheLinePad +} + +func init() { + archInit() + initOptions() + processOptions() +} + +// options contains the cpu debug options that can be used in GODEBUG. +// Options are arch dependent and are added by the arch specific initOptions functions. +// Features that are mandatory for the specific GOARCH should have the Required field set +// (e.g. SSE2 on amd64). +var options []option + +// Option names should be lower case. e.g. avx instead of AVX. +type option struct { + Name string + Feature *bool + Specified bool // whether feature value was specified in GODEBUG + Enable bool // whether feature should be enabled + Required bool // whether feature is mandatory and can not be disabled +} + +func processOptions() { + env := os.Getenv("GODEBUG") +field: + for env != "" { + field := "" + i := strings.IndexByte(env, ',') + if i < 0 { + field, env = env, "" + } else { + field, env = env[:i], env[i+1:] + } + if len(field) < 4 || field[:4] != "cpu." { + continue + } + i = strings.IndexByte(field, '=') + if i < 0 { + print("GODEBUG sys/cpu: no value specified for \"", field, "\"\n") + continue + } + key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on" + + var enable bool + switch value { + case "on": + enable = true + case "off": + enable = false + default: + print("GODEBUG sys/cpu: value \"", value, "\" not supported for cpu option \"", key, "\"\n") + continue field + } + + if key == "all" { + for i := range options { + options[i].Specified = true + options[i].Enable = enable || options[i].Required + } + continue field + } + + for i := range options { + if options[i].Name == key { + options[i].Specified = true + options[i].Enable = enable + continue field + } + } + + print("GODEBUG sys/cpu: unknown cpu feature \"", key, "\"\n") + } + + for _, o := range options { + if !o.Specified { + continue + } + + if o.Enable && !*o.Feature { + print("GODEBUG sys/cpu: can not enable \"", o.Name, "\", missing CPU support\n") + continue + } + + if !o.Enable && o.Required { + print("GODEBUG sys/cpu: can not disable \"", o.Name, "\", required CPU feature\n") + continue + } + + *o.Feature = o.Enable + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix.go b/vendor/golang.org/x/sys/cpu/cpu_aix.go new file mode 100644 index 000000000..464a209cf --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_aix.go @@ -0,0 +1,32 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix + +package cpu + +const ( + // getsystemcfg constants + _SC_IMPL = 2 + _IMPL_POWER8 = 0x10000 + _IMPL_POWER9 = 0x20000 +) + +func archInit() { + impl := getsystemcfg(_SC_IMPL) + if impl&_IMPL_POWER8 != 0 { + PPC64.IsPOWER8 = true + } + if impl&_IMPL_POWER9 != 0 { + PPC64.IsPOWER9 = true + } + + Initialized = true +} + +func getsystemcfg(label int) (n uint64) { + r0, _ := callgetsystemcfg(label) + n = uint64(r0) + return +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm.go b/vendor/golang.org/x/sys/cpu/cpu_arm.go new file mode 100644 index 000000000..301b752e9 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm.go @@ -0,0 +1,73 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 32 + +// HWCAP/HWCAP2 bits. +// These are specific to Linux. +const ( + hwcap_SWP = 1 << 0 + hwcap_HALF = 1 << 1 + hwcap_THUMB = 1 << 2 + hwcap_26BIT = 1 << 3 + hwcap_FAST_MULT = 1 << 4 + hwcap_FPA = 1 << 5 + hwcap_VFP = 1 << 6 + hwcap_EDSP = 1 << 7 + hwcap_JAVA = 1 << 8 + hwcap_IWMMXT = 1 << 9 + hwcap_CRUNCH = 1 << 10 + hwcap_THUMBEE = 1 << 11 + hwcap_NEON = 1 << 12 + hwcap_VFPv3 = 1 << 13 + hwcap_VFPv3D16 = 1 << 14 + hwcap_TLS = 1 << 15 + hwcap_VFPv4 = 1 << 16 + hwcap_IDIVA = 1 << 17 + hwcap_IDIVT = 1 << 18 + hwcap_VFPD32 = 1 << 19 + hwcap_LPAE = 1 << 20 + hwcap_EVTSTRM = 1 << 21 + + hwcap2_AES = 1 << 0 + hwcap2_PMULL = 1 << 1 + hwcap2_SHA1 = 1 << 2 + hwcap2_SHA2 = 1 << 3 + hwcap2_CRC32 = 1 << 4 +) + +func initOptions() { + options = []option{ + {Name: "pmull", Feature: &ARM.HasPMULL}, + {Name: "sha1", Feature: &ARM.HasSHA1}, + {Name: "sha2", Feature: &ARM.HasSHA2}, + {Name: "swp", Feature: &ARM.HasSWP}, + {Name: "thumb", Feature: &ARM.HasTHUMB}, + {Name: "thumbee", Feature: &ARM.HasTHUMBEE}, + {Name: "tls", Feature: &ARM.HasTLS}, + {Name: "vfp", Feature: &ARM.HasVFP}, + {Name: "vfpd32", Feature: &ARM.HasVFPD32}, + {Name: "vfpv3", Feature: &ARM.HasVFPv3}, + {Name: "vfpv3d16", Feature: &ARM.HasVFPv3D16}, + {Name: "vfpv4", Feature: &ARM.HasVFPv4}, + {Name: "half", Feature: &ARM.HasHALF}, + {Name: "26bit", Feature: &ARM.Has26BIT}, + {Name: "fastmul", Feature: &ARM.HasFASTMUL}, + {Name: "fpa", Feature: &ARM.HasFPA}, + {Name: "edsp", Feature: &ARM.HasEDSP}, + {Name: "java", Feature: &ARM.HasJAVA}, + {Name: "iwmmxt", Feature: &ARM.HasIWMMXT}, + {Name: "crunch", Feature: &ARM.HasCRUNCH}, + {Name: "neon", Feature: &ARM.HasNEON}, + {Name: "idivt", Feature: &ARM.HasIDIVT}, + {Name: "idiva", Feature: &ARM.HasIDIVA}, + {Name: "lpae", Feature: &ARM.HasLPAE}, + {Name: "evtstrm", Feature: &ARM.HasEVTSTRM}, + {Name: "aes", Feature: &ARM.HasAES}, + {Name: "crc32", Feature: &ARM.HasCRC32}, + } + +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go new file mode 100644 index 000000000..87dd5e302 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -0,0 +1,172 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import "runtime" + +const cacheLineSize = 64 + +func initOptions() { + options = []option{ + {Name: "fp", Feature: &ARM64.HasFP}, + {Name: "asimd", Feature: &ARM64.HasASIMD}, + {Name: "evstrm", Feature: &ARM64.HasEVTSTRM}, + {Name: "aes", Feature: &ARM64.HasAES}, + {Name: "fphp", Feature: &ARM64.HasFPHP}, + {Name: "jscvt", Feature: &ARM64.HasJSCVT}, + {Name: "lrcpc", Feature: &ARM64.HasLRCPC}, + {Name: "pmull", Feature: &ARM64.HasPMULL}, + {Name: "sha1", Feature: &ARM64.HasSHA1}, + {Name: "sha2", Feature: &ARM64.HasSHA2}, + {Name: "sha3", Feature: &ARM64.HasSHA3}, + {Name: "sha512", Feature: &ARM64.HasSHA512}, + {Name: "sm3", Feature: &ARM64.HasSM3}, + {Name: "sm4", Feature: &ARM64.HasSM4}, + {Name: "sve", Feature: &ARM64.HasSVE}, + {Name: "crc32", Feature: &ARM64.HasCRC32}, + {Name: "atomics", Feature: &ARM64.HasATOMICS}, + {Name: "asimdhp", Feature: &ARM64.HasASIMDHP}, + {Name: "cpuid", Feature: &ARM64.HasCPUID}, + {Name: "asimrdm", Feature: &ARM64.HasASIMDRDM}, + {Name: "fcma", Feature: &ARM64.HasFCMA}, + {Name: "dcpop", Feature: &ARM64.HasDCPOP}, + {Name: "asimddp", Feature: &ARM64.HasASIMDDP}, + {Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM}, + } +} + +func archInit() { + switch runtime.GOOS { + case "freebsd": + readARM64Registers() + case "linux", "netbsd": + doinit() + default: + // Most platforms don't seem to allow reading these registers. + // + // OpenBSD: + // See https://golang.org/issue/31746 + setMinimalFeatures() + } +} + +// setMinimalFeatures fakes the minimal ARM64 features expected by +// TestARM64minimalFeatures. +func setMinimalFeatures() { + ARM64.HasASIMD = true + ARM64.HasFP = true +} + +func readARM64Registers() { + Initialized = true + + parseARM64SystemRegisters(getisar0(), getisar1(), getpfr0()) +} + +func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { + // ID_AA64ISAR0_EL1 + switch extractBits(isar0, 4, 7) { + case 1: + ARM64.HasAES = true + case 2: + ARM64.HasAES = true + ARM64.HasPMULL = true + } + + switch extractBits(isar0, 8, 11) { + case 1: + ARM64.HasSHA1 = true + } + + switch extractBits(isar0, 12, 15) { + case 1: + ARM64.HasSHA2 = true + case 2: + ARM64.HasSHA2 = true + ARM64.HasSHA512 = true + } + + switch extractBits(isar0, 16, 19) { + case 1: + ARM64.HasCRC32 = true + } + + switch extractBits(isar0, 20, 23) { + case 2: + ARM64.HasATOMICS = true + } + + switch extractBits(isar0, 28, 31) { + case 1: + ARM64.HasASIMDRDM = true + } + + switch extractBits(isar0, 32, 35) { + case 1: + ARM64.HasSHA3 = true + } + + switch extractBits(isar0, 36, 39) { + case 1: + ARM64.HasSM3 = true + } + + switch extractBits(isar0, 40, 43) { + case 1: + ARM64.HasSM4 = true + } + + switch extractBits(isar0, 44, 47) { + case 1: + ARM64.HasASIMDDP = true + } + + // ID_AA64ISAR1_EL1 + switch extractBits(isar1, 0, 3) { + case 1: + ARM64.HasDCPOP = true + } + + switch extractBits(isar1, 12, 15) { + case 1: + ARM64.HasJSCVT = true + } + + switch extractBits(isar1, 16, 19) { + case 1: + ARM64.HasFCMA = true + } + + switch extractBits(isar1, 20, 23) { + case 1: + ARM64.HasLRCPC = true + } + + // ID_AA64PFR0_EL1 + switch extractBits(pfr0, 16, 19) { + case 0: + ARM64.HasFP = true + case 1: + ARM64.HasFP = true + ARM64.HasFPHP = true + } + + switch extractBits(pfr0, 20, 23) { + case 0: + ARM64.HasASIMD = true + case 1: + ARM64.HasASIMD = true + ARM64.HasASIMDHP = true + } + + switch extractBits(pfr0, 32, 35) { + case 1: + ARM64.HasSVE = true + } +} + +func extractBits(data uint64, start, end uint) uint { + return (uint)(data>>start) & ((1 << (end - start + 1)) - 1) +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s new file mode 100644 index 000000000..a54436e39 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// func getisar0() uint64 +TEXT ·getisar0(SB),NOSPLIT,$0-8 + // get Instruction Set Attributes 0 into x0 + // mrs x0, ID_AA64ISAR0_EL1 = d5380600 + WORD $0xd5380600 + MOVD R0, ret+0(FP) + RET + +// func getisar1() uint64 +TEXT ·getisar1(SB),NOSPLIT,$0-8 + // get Instruction Set Attributes 1 into x0 + // mrs x0, ID_AA64ISAR1_EL1 = d5380620 + WORD $0xd5380620 + MOVD R0, ret+0(FP) + RET + +// func getpfr0() uint64 +TEXT ·getpfr0(SB),NOSPLIT,$0-8 + // get Processor Feature Register 0 into x0 + // mrs x0, ID_AA64PFR0_EL1 = d5380400 + WORD $0xd5380400 + MOVD R0, ret+0(FP) + RET diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go similarity index 57% rename from vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go rename to vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go index b5130a6dd..7b88e865a 100644 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -2,9 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Code generated by generate-protos. DO NOT EDIT. +// +build !gccgo -package fieldnum +package cpu -// Field numbers for google.protobuf.Empty. -const () +func getisar0() uint64 +func getisar1() uint64 +func getpfr0() uint64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go new file mode 100644 index 000000000..568bcd031 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return true } + +// The following feature detection functions are defined in cpu_s390x.s. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList +func kmQuery() queryResult +func kmcQuery() queryResult +func kmctrQuery() queryResult +func kmaQuery() queryResult +func kimdQuery() queryResult +func klmdQuery() queryResult diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go new file mode 100644 index 000000000..f7cb46971 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build !gccgo + +package cpu + +// cpuid is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) + +// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go similarity index 54% rename from vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go rename to vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go index 0e3f25aca..53ca8d65c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go @@ -2,10 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin,arm,!go1.12 +// +build gccgo -package unix +package cpu -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - return 0, ENOSYS -} +func getisar0() uint64 { return 0 } +func getisar1() uint64 { return 0 } +func getpfr0() uint64 { return 0 } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go new file mode 100644 index 000000000..aa986f778 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gccgo + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return false } + +// TODO(mundaym): the following feature detection functions are currently +// stubs. See https://golang.org/cl/162887 for how to fix this. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList { panic("not implemented for gccgo") } +func kmQuery() queryResult { panic("not implemented for gccgo") } +func kmcQuery() queryResult { panic("not implemented for gccgo") } +func kmctrQuery() queryResult { panic("not implemented for gccgo") } +func kmaQuery() queryResult { panic("not implemented for gccgo") } +func kimdQuery() queryResult { panic("not implemented for gccgo") } +func klmdQuery() queryResult { panic("not implemented for gccgo") } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c new file mode 100644 index 000000000..e363c7d13 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c @@ -0,0 +1,43 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build gccgo + +#include +#include + +// Need to wrap __get_cpuid_count because it's declared as static. +int +gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); +} + +// xgetbv reads the contents of an XCR (Extended Control Register) +// specified in the ECX register into registers EDX:EAX. +// Currently, the only supported value for XCR is 0. +// +// TODO: Replace with a better alternative: +// +// #include +// +// #pragma GCC target("xsave") +// +// void gccgoXgetbv(uint32_t *eax, uint32_t *edx) { +// unsigned long long x = _xgetbv(0); +// *eax = x & 0xffffffff; +// *edx = (x >> 32) & 0xffffffff; +// } +// +// Note that _xgetbv is defined starting with GCC 8. +void +gccgoXgetbv(uint32_t *eax, uint32_t *edx) +{ + __asm(" xorl %%ecx, %%ecx\n" + " xgetbv" + : "=a"(*eax), "=d"(*edx)); +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go new file mode 100644 index 000000000..ba49b91bd --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build gccgo + +package cpu + +//extern gccgoGetCpuidCount +func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) + +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { + var a, b, c, d uint32 + gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) + return a, b, c, d +} + +//extern gccgoXgetbv +func gccgoXgetbv(eax, edx *uint32) + +func xgetbv() (eax, edx uint32) { + var a, d uint32 + gccgoXgetbv(&a, &d) + return a, d +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go new file mode 100644 index 000000000..6fc874f7f --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !386,!amd64,!amd64p32,!arm64 + +package cpu + +func archInit() { + if err := readHWCAP(); err != nil { + return + } + doinit() + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go new file mode 100644 index 000000000..2057006dc --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +func doinit() { + ARM.HasSWP = isSet(hwCap, hwcap_SWP) + ARM.HasHALF = isSet(hwCap, hwcap_HALF) + ARM.HasTHUMB = isSet(hwCap, hwcap_THUMB) + ARM.Has26BIT = isSet(hwCap, hwcap_26BIT) + ARM.HasFASTMUL = isSet(hwCap, hwcap_FAST_MULT) + ARM.HasFPA = isSet(hwCap, hwcap_FPA) + ARM.HasVFP = isSet(hwCap, hwcap_VFP) + ARM.HasEDSP = isSet(hwCap, hwcap_EDSP) + ARM.HasJAVA = isSet(hwCap, hwcap_JAVA) + ARM.HasIWMMXT = isSet(hwCap, hwcap_IWMMXT) + ARM.HasCRUNCH = isSet(hwCap, hwcap_CRUNCH) + ARM.HasTHUMBEE = isSet(hwCap, hwcap_THUMBEE) + ARM.HasNEON = isSet(hwCap, hwcap_NEON) + ARM.HasVFPv3 = isSet(hwCap, hwcap_VFPv3) + ARM.HasVFPv3D16 = isSet(hwCap, hwcap_VFPv3D16) + ARM.HasTLS = isSet(hwCap, hwcap_TLS) + ARM.HasVFPv4 = isSet(hwCap, hwcap_VFPv4) + ARM.HasIDIVA = isSet(hwCap, hwcap_IDIVA) + ARM.HasIDIVT = isSet(hwCap, hwcap_IDIVT) + ARM.HasVFPD32 = isSet(hwCap, hwcap_VFPD32) + ARM.HasLPAE = isSet(hwCap, hwcap_LPAE) + ARM.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM.HasAES = isSet(hwCap2, hwcap2_AES) + ARM.HasPMULL = isSet(hwCap2, hwcap2_PMULL) + ARM.HasSHA1 = isSet(hwCap2, hwcap2_SHA1) + ARM.HasSHA2 = isSet(hwCap2, hwcap2_SHA2) + ARM.HasCRC32 = isSet(hwCap2, hwcap2_CRC32) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go new file mode 100644 index 000000000..79a38a0b9 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -0,0 +1,71 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +// HWCAP/HWCAP2 bits. These are exposed by Linux. +const ( + hwcap_FP = 1 << 0 + hwcap_ASIMD = 1 << 1 + hwcap_EVTSTRM = 1 << 2 + hwcap_AES = 1 << 3 + hwcap_PMULL = 1 << 4 + hwcap_SHA1 = 1 << 5 + hwcap_SHA2 = 1 << 6 + hwcap_CRC32 = 1 << 7 + hwcap_ATOMICS = 1 << 8 + hwcap_FPHP = 1 << 9 + hwcap_ASIMDHP = 1 << 10 + hwcap_CPUID = 1 << 11 + hwcap_ASIMDRDM = 1 << 12 + hwcap_JSCVT = 1 << 13 + hwcap_FCMA = 1 << 14 + hwcap_LRCPC = 1 << 15 + hwcap_DCPOP = 1 << 16 + hwcap_SHA3 = 1 << 17 + hwcap_SM3 = 1 << 18 + hwcap_SM4 = 1 << 19 + hwcap_ASIMDDP = 1 << 20 + hwcap_SHA512 = 1 << 21 + hwcap_SVE = 1 << 22 + hwcap_ASIMDFHM = 1 << 23 +) + +func doinit() { + if err := readHWCAP(); err != nil { + // failed to read /proc/self/auxv, try reading registers directly + readARM64Registers() + return + } + + // HWCAP feature bits + ARM64.HasFP = isSet(hwCap, hwcap_FP) + ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD) + ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM64.HasAES = isSet(hwCap, hwcap_AES) + ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL) + ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1) + ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2) + ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32) + ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS) + ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP) + ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP) + ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID) + ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM) + ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT) + ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA) + ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC) + ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP) + ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3) + ARM64.HasSM3 = isSet(hwCap, hwcap_SM3) + ARM64.HasSM4 = isSet(hwCap, hwcap_SM4) + ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP) + ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) + ARM64.HasSVE = isSet(hwCap, hwcap_SVE) + ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go new file mode 100644 index 000000000..5a4189005 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go @@ -0,0 +1,23 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build mips64 mips64le + +package cpu + +// HWCAP bits. These are exposed by the Linux kernel 5.4. +const ( + // CPU features + hwcap_MIPS_MSA = 1 << 1 +) + +func doinit() { + // HWCAP feature bits + MIPS64X.HasMSA = isSet(hwCap, hwcap_MIPS_MSA) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.1_11.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go similarity index 53% rename from vendor/golang.org/x/sys/unix/syscall_darwin_386.1_11.go rename to vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go index 6b223f91a..42b5d33cb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.1_11.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin,386,!go1.12 +// +build linux,!arm,!arm64,!mips64,!mips64le,!ppc64,!ppc64le,!s390x -package unix +package cpu -//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS_GETDIRENTRIES64 +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go new file mode 100644 index 000000000..99f8a6399 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go @@ -0,0 +1,31 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build ppc64 ppc64le + +package cpu + +// HWCAP/HWCAP2 bits. These are exposed by the kernel. +const ( + // ISA Level + _PPC_FEATURE2_ARCH_2_07 = 0x80000000 + _PPC_FEATURE2_ARCH_3_00 = 0x00800000 + + // CPU features + _PPC_FEATURE2_DARN = 0x00200000 + _PPC_FEATURE2_SCV = 0x00100000 +) + +func doinit() { + // HWCAP2 feature bits + PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07) + PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00) + PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN) + PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go new file mode 100644 index 000000000..1517ac61d --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go @@ -0,0 +1,40 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const ( + // bit mask values from /usr/include/bits/hwcap.h + hwcap_ZARCH = 2 + hwcap_STFLE = 4 + hwcap_MSA = 8 + hwcap_LDISP = 16 + hwcap_EIMM = 32 + hwcap_DFP = 64 + hwcap_ETF3EH = 256 + hwcap_VX = 2048 + hwcap_VXE = 8192 +) + +func initS390Xbase() { + // test HWCAP bit vector + has := func(featureMask uint) bool { + return hwCap&featureMask == featureMask + } + + // mandatory + S390X.HasZARCH = has(hwcap_ZARCH) + + // optional + S390X.HasSTFLE = has(hwcap_STFLE) + S390X.HasLDISP = has(hwcap_LDISP) + S390X.HasEIMM = has(hwcap_EIMM) + S390X.HasETF3EH = has(hwcap_ETF3EH) + S390X.HasDFP = has(hwcap_DFP) + S390X.HasMSA = has(hwcap_MSA) + S390X.HasVX = has(hwcap_VX) + if S390X.HasVX { + S390X.HasVXE = has(hwcap_VXE) + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go new file mode 100644 index 000000000..57b5b677d --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build mips64 mips64le + +package cpu + +const cacheLineSize = 32 + +func initOptions() { + options = []option{ + {Name: "msa", Feature: &MIPS64X.HasMSA}, + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go new file mode 100644 index 000000000..cfc1946b7 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build mips mipsle + +package cpu + +const cacheLineSize = 32 + +func initOptions() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go new file mode 100644 index 000000000..ebfb3fc8e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go @@ -0,0 +1,173 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// Minimal copy of functionality from x/sys/unix so the cpu package can call +// sysctl without depending on x/sys/unix. + +const ( + _CTL_QUERY = -2 + + _SYSCTL_VERS_1 = 0x1000000 +) + +var _zero uintptr + +func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6( + syscall.SYS___SYSCTL, + uintptr(_p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen)) + if errno != 0 { + return errno + } + return nil +} + +type sysctlNode struct { + Flags uint32 + Num int32 + Name [32]int8 + Ver uint32 + __rsvd uint32 + Un [16]byte + _sysctl_size [8]byte + _sysctl_func [8]byte + _sysctl_parent [8]byte + _sysctl_desc [8]byte +} + +func sysctlNodes(mib []int32) ([]sysctlNode, error) { + var olen uintptr + + // Get a list of all sysctl nodes below the given MIB by performing + // a sysctl for the given MIB with CTL_QUERY appended. + mib = append(mib, _CTL_QUERY) + qnode := sysctlNode{Flags: _SYSCTL_VERS_1} + qp := (*byte)(unsafe.Pointer(&qnode)) + sz := unsafe.Sizeof(qnode) + if err := sysctl(mib, nil, &olen, qp, sz); err != nil { + return nil, err + } + + // Now that we know the size, get the actual nodes. + nodes := make([]sysctlNode, olen/sz) + np := (*byte)(unsafe.Pointer(&nodes[0])) + if err := sysctl(mib, np, &olen, qp, sz); err != nil { + return nil, err + } + + return nodes, nil +} + +func nametomib(name string) ([]int32, error) { + // Split name into components. + var parts []string + last := 0 + for i := 0; i < len(name); i++ { + if name[i] == '.' { + parts = append(parts, name[last:i]) + last = i + 1 + } + } + parts = append(parts, name[last:]) + + mib := []int32{} + // Discover the nodes and construct the MIB OID. + for partno, part := range parts { + nodes, err := sysctlNodes(mib) + if err != nil { + return nil, err + } + for _, node := range nodes { + n := make([]byte, 0) + for i := range node.Name { + if node.Name[i] != 0 { + n = append(n, byte(node.Name[i])) + } + } + if string(n) == part { + mib = append(mib, int32(node.Num)) + break + } + } + if len(mib) != partno+1 { + return nil, err + } + } + + return mib, nil +} + +// aarch64SysctlCPUID is struct aarch64_sysctl_cpu_id from NetBSD's +type aarch64SysctlCPUID struct { + midr uint64 /* Main ID Register */ + revidr uint64 /* Revision ID Register */ + mpidr uint64 /* Multiprocessor Affinity Register */ + aa64dfr0 uint64 /* A64 Debug Feature Register 0 */ + aa64dfr1 uint64 /* A64 Debug Feature Register 1 */ + aa64isar0 uint64 /* A64 Instruction Set Attribute Register 0 */ + aa64isar1 uint64 /* A64 Instruction Set Attribute Register 1 */ + aa64mmfr0 uint64 /* A64 Memory Model Feature Register 0 */ + aa64mmfr1 uint64 /* A64 Memory Model Feature Register 1 */ + aa64mmfr2 uint64 /* A64 Memory Model Feature Register 2 */ + aa64pfr0 uint64 /* A64 Processor Feature Register 0 */ + aa64pfr1 uint64 /* A64 Processor Feature Register 1 */ + aa64zfr0 uint64 /* A64 SVE Feature ID Register 0 */ + mvfr0 uint32 /* Media and VFP Feature Register 0 */ + mvfr1 uint32 /* Media and VFP Feature Register 1 */ + mvfr2 uint32 /* Media and VFP Feature Register 2 */ + pad uint32 + clidr uint64 /* Cache Level ID Register */ + ctr uint64 /* Cache Type Register */ +} + +func sysctlCPUID(name string) (*aarch64SysctlCPUID, error) { + mib, err := nametomib(name) + if err != nil { + return nil, err + } + + out := aarch64SysctlCPUID{} + n := unsafe.Sizeof(out) + _, _, errno := syscall.Syscall6( + syscall.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), + uintptr(len(mib)), + uintptr(unsafe.Pointer(&out)), + uintptr(unsafe.Pointer(&n)), + uintptr(0), + uintptr(0)) + if errno != 0 { + return nil, errno + } + return &out, nil +} + +func doinit() { + cpuid, err := sysctlCPUID("machdep.cpu0.cpu_id") + if err != nil { + setMinimalFeatures() + return + } + parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0) + + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go new file mode 100644 index 000000000..b412efc1b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go @@ -0,0 +1,9 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux,arm + +package cpu + +func archInit() {} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.1_11.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go similarity index 53% rename from vendor/golang.org/x/sys/unix/syscall_darwin_arm64.1_11.go rename to vendor/golang.org/x/sys/cpu/cpu_other_arm64.go index 01d450406..16c1c4090 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.1_11.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -2,10 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin,arm64,!go1.12 +// +build !linux,!netbsd +// +build arm64 -package unix +package cpu -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - return 0, ENOSYS -} +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go new file mode 100644 index 000000000..f49fad677 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go @@ -0,0 +1,12 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux +// +build mips64 mips64le + +package cpu + +func archInit() { + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go new file mode 100644 index 000000000..d28d675b5 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go @@ -0,0 +1,16 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ppc64 ppc64le + +package cpu + +const cacheLineSize = 128 + +func initOptions() { + options = []option{ + {Name: "darn", Feature: &PPC64.HasDARN}, + {Name: "scv", Feature: &PPC64.HasSCV}, + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go new file mode 100644 index 000000000..8b08de341 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build riscv64 + +package cpu + +const cacheLineSize = 32 + +func initOptions() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_s390x.go new file mode 100644 index 000000000..5881b8833 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.go @@ -0,0 +1,172 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 256 + +func initOptions() { + options = []option{ + {Name: "zarch", Feature: &S390X.HasZARCH, Required: true}, + {Name: "stfle", Feature: &S390X.HasSTFLE, Required: true}, + {Name: "ldisp", Feature: &S390X.HasLDISP, Required: true}, + {Name: "eimm", Feature: &S390X.HasEIMM, Required: true}, + {Name: "dfp", Feature: &S390X.HasDFP}, + {Name: "etf3eh", Feature: &S390X.HasETF3EH}, + {Name: "msa", Feature: &S390X.HasMSA}, + {Name: "aes", Feature: &S390X.HasAES}, + {Name: "aescbc", Feature: &S390X.HasAESCBC}, + {Name: "aesctr", Feature: &S390X.HasAESCTR}, + {Name: "aesgcm", Feature: &S390X.HasAESGCM}, + {Name: "ghash", Feature: &S390X.HasGHASH}, + {Name: "sha1", Feature: &S390X.HasSHA1}, + {Name: "sha256", Feature: &S390X.HasSHA256}, + {Name: "sha3", Feature: &S390X.HasSHA3}, + {Name: "sha512", Feature: &S390X.HasSHA512}, + {Name: "vx", Feature: &S390X.HasVX}, + {Name: "vxe", Feature: &S390X.HasVXE}, + } +} + +// bitIsSet reports whether the bit at index is set. The bit index +// is in big endian order, so bit index 0 is the leftmost bit. +func bitIsSet(bits []uint64, index uint) bool { + return bits[index/64]&((1<<63)>>(index%64)) != 0 +} + +// facility is a bit index for the named facility. +type facility uint8 + +const ( + // mandatory facilities + zarch facility = 1 // z architecture mode is active + stflef facility = 7 // store-facility-list-extended + ldisp facility = 18 // long-displacement + eimm facility = 21 // extended-immediate + + // miscellaneous facilities + dfp facility = 42 // decimal-floating-point + etf3eh facility = 30 // extended-translation 3 enhancement + + // cryptography facilities + msa facility = 17 // message-security-assist + msa3 facility = 76 // message-security-assist extension 3 + msa4 facility = 77 // message-security-assist extension 4 + msa5 facility = 57 // message-security-assist extension 5 + msa8 facility = 146 // message-security-assist extension 8 + msa9 facility = 155 // message-security-assist extension 9 + + // vector facilities + vx facility = 129 // vector facility + vxe facility = 135 // vector-enhancements 1 + vxe2 facility = 148 // vector-enhancements 2 +) + +// facilityList contains the result of an STFLE call. +// Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type facilityList struct { + bits [4]uint64 +} + +// Has reports whether the given facilities are present. +func (s *facilityList) Has(fs ...facility) bool { + if len(fs) == 0 { + panic("no facility bits provided") + } + for _, f := range fs { + if !bitIsSet(s.bits[:], uint(f)) { + return false + } + } + return true +} + +// function is the code for the named cryptographic function. +type function uint8 + +const ( + // KM{,A,C,CTR} function codes + aes128 function = 18 // AES-128 + aes192 function = 19 // AES-192 + aes256 function = 20 // AES-256 + + // K{I,L}MD function codes + sha1 function = 1 // SHA-1 + sha256 function = 2 // SHA-256 + sha512 function = 3 // SHA-512 + sha3_224 function = 32 // SHA3-224 + sha3_256 function = 33 // SHA3-256 + sha3_384 function = 34 // SHA3-384 + sha3_512 function = 35 // SHA3-512 + shake128 function = 36 // SHAKE-128 + shake256 function = 37 // SHAKE-256 + + // KLMD function codes + ghash function = 65 // GHASH +) + +// queryResult contains the result of a Query function +// call. Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type queryResult struct { + bits [2]uint64 +} + +// Has reports whether the given functions are present. +func (q *queryResult) Has(fns ...function) bool { + if len(fns) == 0 { + panic("no function codes provided") + } + for _, f := range fns { + if !bitIsSet(q.bits[:], uint(f)) { + return false + } + } + return true +} + +func doinit() { + initS390Xbase() + + // We need implementations of stfle, km and so on + // to detect cryptographic features. + if !haveAsmFunctions() { + return + } + + // optional cryptographic functions + if S390X.HasMSA { + aes := []function{aes128, aes192, aes256} + + // cipher message + km, kmc := kmQuery(), kmcQuery() + S390X.HasAES = km.Has(aes...) + S390X.HasAESCBC = kmc.Has(aes...) + if S390X.HasSTFLE { + facilities := stfle() + if facilities.Has(msa4) { + kmctr := kmctrQuery() + S390X.HasAESCTR = kmctr.Has(aes...) + } + if facilities.Has(msa8) { + kma := kmaQuery() + S390X.HasAESGCM = kma.Has(aes...) + } + } + + // compute message digest + kimd := kimdQuery() // intermediate (no padding) + klmd := klmdQuery() // last (padding) + S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) + S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) + S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) + S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist + sha3 := []function{ + sha3_224, sha3_256, sha3_384, sha3_512, + shake128, shake256, + } + S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/vendor/golang.org/x/sys/cpu/cpu_s390x.s new file mode 100644 index 000000000..e5037d92e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.s @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// func stfle() facilityList +TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32 + MOVD $ret+0(FP), R1 + MOVD $3, R0 // last doubleword index to store + XC $32, (R1), (R1) // clear 4 doublewords (32 bytes) + WORD $0xb2b01000 // store facility list extended (STFLE) + RET + +// func kmQuery() queryResult +TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KM-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92E0024 // cipher message (KM) + RET + +// func kmcQuery() queryResult +TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMC-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92F0024 // cipher message with chaining (KMC) + RET + +// func kmctrQuery() queryResult +TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMCTR-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92D4024 // cipher message with counter (KMCTR) + RET + +// func kmaQuery() queryResult +TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMA-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xb9296024 // cipher message with authentication (KMA) + RET + +// func kimdQuery() queryResult +TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KIMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93E0024 // compute intermediate message digest (KIMD) + RET + +// func klmdQuery() queryResult +TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KLMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93F0024 // compute last message digest (KLMD) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go new file mode 100644 index 000000000..5382f2a22 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_wasm.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build wasm + +package cpu + +// We're compiling the cpu package for an unknown (software-abstracted) CPU. +// Make CacheLinePad an empty struct and hope that the usual struct alignment +// rules are good enough. + +const cacheLineSize = 0 + +func initOptions() {} + +func archInit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go new file mode 100644 index 000000000..48d429331 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -0,0 +1,135 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 + +package cpu + +import "runtime" + +const cacheLineSize = 64 + +func initOptions() { + options = []option{ + {Name: "adx", Feature: &X86.HasADX}, + {Name: "aes", Feature: &X86.HasAES}, + {Name: "avx", Feature: &X86.HasAVX}, + {Name: "avx2", Feature: &X86.HasAVX2}, + {Name: "avx512", Feature: &X86.HasAVX512}, + {Name: "avx512f", Feature: &X86.HasAVX512F}, + {Name: "avx512cd", Feature: &X86.HasAVX512CD}, + {Name: "avx512er", Feature: &X86.HasAVX512ER}, + {Name: "avx512pf", Feature: &X86.HasAVX512PF}, + {Name: "avx512vl", Feature: &X86.HasAVX512VL}, + {Name: "avx512bw", Feature: &X86.HasAVX512BW}, + {Name: "avx512dq", Feature: &X86.HasAVX512DQ}, + {Name: "avx512ifma", Feature: &X86.HasAVX512IFMA}, + {Name: "avx512vbmi", Feature: &X86.HasAVX512VBMI}, + {Name: "avx512vnniw", Feature: &X86.HasAVX5124VNNIW}, + {Name: "avx5124fmaps", Feature: &X86.HasAVX5124FMAPS}, + {Name: "avx512vpopcntdq", Feature: &X86.HasAVX512VPOPCNTDQ}, + {Name: "avx512vpclmulqdq", Feature: &X86.HasAVX512VPCLMULQDQ}, + {Name: "avx512vnni", Feature: &X86.HasAVX512VNNI}, + {Name: "avx512gfni", Feature: &X86.HasAVX512GFNI}, + {Name: "avx512vaes", Feature: &X86.HasAVX512VAES}, + {Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2}, + {Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG}, + {Name: "avx512bf16", Feature: &X86.HasAVX512BF16}, + {Name: "bmi1", Feature: &X86.HasBMI1}, + {Name: "bmi2", Feature: &X86.HasBMI2}, + {Name: "erms", Feature: &X86.HasERMS}, + {Name: "fma", Feature: &X86.HasFMA}, + {Name: "osxsave", Feature: &X86.HasOSXSAVE}, + {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ}, + {Name: "popcnt", Feature: &X86.HasPOPCNT}, + {Name: "rdrand", Feature: &X86.HasRDRAND}, + {Name: "rdseed", Feature: &X86.HasRDSEED}, + {Name: "sse3", Feature: &X86.HasSSE3}, + {Name: "sse41", Feature: &X86.HasSSE41}, + {Name: "sse42", Feature: &X86.HasSSE42}, + {Name: "ssse3", Feature: &X86.HasSSSE3}, + + // These capabilities should always be enabled on amd64: + {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, + } +} + +func archInit() { + + Initialized = true + + maxID, _, _, _ := cpuid(0, 0) + + if maxID < 1 { + return + } + + _, _, ecx1, edx1 := cpuid(1, 0) + X86.HasSSE2 = isSet(26, edx1) + + X86.HasSSE3 = isSet(0, ecx1) + X86.HasPCLMULQDQ = isSet(1, ecx1) + X86.HasSSSE3 = isSet(9, ecx1) + X86.HasFMA = isSet(12, ecx1) + X86.HasSSE41 = isSet(19, ecx1) + X86.HasSSE42 = isSet(20, ecx1) + X86.HasPOPCNT = isSet(23, ecx1) + X86.HasAES = isSet(25, ecx1) + X86.HasOSXSAVE = isSet(27, ecx1) + X86.HasRDRAND = isSet(30, ecx1) + + var osSupportsAVX, osSupportsAVX512 bool + // For XGETBV, OSXSAVE bit is required and sufficient. + if X86.HasOSXSAVE { + eax, _ := xgetbv() + // Check if XMM and YMM registers have OS support. + osSupportsAVX = isSet(1, eax) && isSet(2, eax) + + // Check if OPMASK and ZMM registers have OS support. + osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) + } + + X86.HasAVX = isSet(28, ecx1) && osSupportsAVX + + if maxID < 7 { + return + } + + _, ebx7, ecx7, edx7 := cpuid(7, 0) + X86.HasBMI1 = isSet(3, ebx7) + X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX + X86.HasBMI2 = isSet(8, ebx7) + X86.HasERMS = isSet(9, ebx7) + X86.HasRDSEED = isSet(18, ebx7) + X86.HasADX = isSet(19, ebx7) + + X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension + if X86.HasAVX512 { + X86.HasAVX512F = true + X86.HasAVX512CD = isSet(28, ebx7) + X86.HasAVX512ER = isSet(27, ebx7) + X86.HasAVX512PF = isSet(26, ebx7) + X86.HasAVX512VL = isSet(31, ebx7) + X86.HasAVX512BW = isSet(30, ebx7) + X86.HasAVX512DQ = isSet(17, ebx7) + X86.HasAVX512IFMA = isSet(21, ebx7) + X86.HasAVX512VBMI = isSet(1, ecx7) + X86.HasAVX5124VNNIW = isSet(2, edx7) + X86.HasAVX5124FMAPS = isSet(3, edx7) + X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7) + X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7) + X86.HasAVX512VNNI = isSet(11, ecx7) + X86.HasAVX512GFNI = isSet(8, ecx7) + X86.HasAVX512VAES = isSet(9, ecx7) + X86.HasAVX512VBMI2 = isSet(6, ecx7) + X86.HasAVX512BITALG = isSet(12, ecx7) + + eax71, _, _, _ := cpuid(7, 1) + X86.HasAVX512BF16 = isSet(5, eax71) + } +} + +func isSet(bitpos uint, value uint32) bool { + return value&(1<> 63)) +) + +// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 +// These are initialized in cpu_$GOARCH.go +// and should not be changed after they are initialized. +var hwCap uint +var hwCap2 uint + +func readHWCAP() error { + buf, err := ioutil.ReadFile(procAuxv) + if err != nil { + // e.g. on android /proc/self/auxv is not accessible, so silently + // ignore the error and leave Initialized = false. On some + // architectures (e.g. arm64) doinit() implements a fallback + // readout and will set Initialized = true again. + return err + } + bo := hostByteOrder() + for len(buf) >= 2*(uintSize/8) { + var tag, val uint + switch uintSize { + case 32: + tag = uint(bo.Uint32(buf[0:])) + val = uint(bo.Uint32(buf[4:])) + buf = buf[8:] + case 64: + tag = uint(bo.Uint64(buf[0:])) + val = uint(bo.Uint64(buf[8:])) + buf = buf[16:] + } + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + return nil +} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go new file mode 100644 index 000000000..76fbe40b7 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go @@ -0,0 +1,27 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Recreate a getsystemcfg syscall handler instead of +// using the one provided by x/sys/unix to avoid having +// the dependency between them. (See golang.org/issue/32102) +// Morever, this file will be used during the building of +// gccgo's libgo and thus must not used a CGo method. + +// +build aix +// +build gccgo + +package cpu + +import ( + "syscall" +) + +//extern getsystemcfg +func gccgoGetsystemcfg(label uint32) (r uint64) + +func callgetsystemcfg(label int) (r1 uintptr, e1 syscall.Errno) { + r1 = uintptr(gccgoGetsystemcfg(uint32(label))) + e1 = syscall.GetErrno() + return +} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go new file mode 100644 index 000000000..78fe25e86 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go @@ -0,0 +1,36 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy of x/sys/unix so the cpu package can make a +// system call on AIX without depending on x/sys/unix. +// (See golang.org/issue/32102) + +// +build aix,ppc64 +// +build !gccgo + +package cpu + +import ( + "syscall" + "unsafe" +) + +//go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" + +//go:linkname libc_getsystemcfg libc_getsystemcfg + +type syscallFunc uintptr + +var libc_getsystemcfg syscallFunc + +type errno = syscall.Errno + +// Implemented in runtime/syscall_aix.go. +func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) +func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) + +func callgetsystemcfg(label int) (r1 uintptr, e1 errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsystemcfg)), 1, uintptr(label), 0, 0, 0, 0, 0) + return +} diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s new file mode 100644 index 000000000..567a4763c --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for mips64, OpenBSD +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-104 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/fcntl_darwin.go b/vendor/golang.org/x/sys/unix/fcntl_darwin.go index 5868a4a47..a9911c7c1 100644 --- a/vendor/golang.org/x/sys/unix/fcntl_darwin.go +++ b/vendor/golang.org/x/sys/unix/fcntl_darwin.go @@ -16,3 +16,9 @@ func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { _, err := fcntl(int(fd), cmd, int(uintptr(unsafe.Pointer(lk)))) return err } + +// FcntlFstore performs a fcntl syscall for the F_PREALLOCATE command. +func FcntlFstore(fd uintptr, cmd int, fstore *Fstore_t) error { + _, err := fcntl(int(fd), cmd, int(uintptr(unsafe.Pointer(fstore)))) + return err +} diff --git a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go index fc0e50e03..8db48e5e0 100644 --- a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go +++ b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go @@ -1,9 +1,9 @@ -// +build linux,386 linux,arm linux,mips linux,mipsle - // Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build linux,386 linux,arm linux,mips linux,mipsle + package unix func init() { diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go index cd6f5a613..86032c11e 100644 --- a/vendor/golang.org/x/sys/unix/gccgo.go +++ b/vendor/golang.org/x/sys/unix/gccgo.go @@ -12,10 +12,8 @@ import "syscall" // We can't use the gc-syntax .s files for gccgo. On the plus side // much of the functionality can be written directly in Go. -//extern gccgoRealSyscallNoError func realSyscallNoError(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r uintptr) -//extern gccgoRealSyscall func realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r, errno uintptr) func SyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) { diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c index c44730c5e..2cb1fefac 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_c.c +++ b/vendor/golang.org/x/sys/unix/gccgo_c.c @@ -21,6 +21,9 @@ struct ret { uintptr_t err; }; +struct ret gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9) + __asm__(GOSYM_PREFIX GOPKGPATH ".realSyscall"); + struct ret gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9) { @@ -32,6 +35,9 @@ gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintp return r; } +uintptr_t gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9) + __asm__(GOSYM_PREFIX GOPKGPATH ".realSyscallNoError"); + uintptr_t gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9) { diff --git a/vendor/golang.org/x/sys/unix/ioctl.go b/vendor/golang.org/x/sys/unix/ioctl.go index 3559e5dcb..564167861 100644 --- a/vendor/golang.org/x/sys/unix/ioctl.go +++ b/vendor/golang.org/x/sys/unix/ioctl.go @@ -20,6 +20,15 @@ func IoctlSetInt(fd int, req uint, value int) error { return ioctl(fd, req, uintptr(value)) } +// IoctlSetPointerInt performs an ioctl operation which sets an +// integer value on fd, using the specified request number. The ioctl +// argument is called with a pointer to the integer value, rather than +// passing the integer value directly. +func IoctlSetPointerInt(fd int, req uint, value int) error { + v := int32(value) + return ioctl(fd, req, uintptr(unsafe.Pointer(&v))) +} + // IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. // // To change fd's window size, the req argument should be TIOCSWINSZ. diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index ece31e9dc..d257fac50 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -73,26 +73,22 @@ aix_ppc64) darwin_386) mkerrors="$mkerrors -m32" mksyscall="go run mksyscall.go -l32" - mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h" mktypes="GOARCH=$GOARCH go tool cgo -godefs" mkasm="go run mkasm_darwin.go" ;; darwin_amd64) mkerrors="$mkerrors -m64" - mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h" mktypes="GOARCH=$GOARCH go tool cgo -godefs" mkasm="go run mkasm_darwin.go" ;; darwin_arm) mkerrors="$mkerrors" mksyscall="go run mksyscall.go -l32" - mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h" mktypes="GOARCH=$GOARCH go tool cgo -godefs" mkasm="go run mkasm_darwin.go" ;; darwin_arm64) mkerrors="$mkerrors -m64" - mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h" mktypes="GOARCH=$GOARCH go tool cgo -godefs" mkasm="go run mkasm_darwin.go" ;; @@ -184,6 +180,15 @@ openbsd_arm64) # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; +openbsd_mips64) + mkerrors="$mkerrors -m64" + mksyscall="go run mksyscall.go -openbsd" + mksysctl="go run mksysctl_openbsd.go" + mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" + # Let the type of C char be signed for making the bare syscall + # API consistent across platforms. + mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" + ;; solaris_amd64) mksyscall="go run mksyscall_solaris.go" mkerrors="$mkerrors -m64" @@ -217,8 +222,6 @@ esac # aix/ppc64 script generates files instead of writing to stdin. echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in && gofmt -w zsyscall_$GOOSARCH.go && gofmt -w zsyscall_"$GOOSARCH"_gccgo.go && gofmt -w zsyscall_"$GOOSARCH"_gc.go " ; elif [ "$GOOS" == "darwin" ]; then - # pre-1.12, direct syscalls - echo "$mksyscall -tags $GOOS,$GOARCH,!go1.12 $syscall_goos syscall_darwin_${GOARCH}.1_11.go $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.1_11.go"; # 1.12 and later, syscalls via libSystem echo "$mksyscall -tags $GOOS,$GOARCH,go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; # 1.13 and later, syscalls via libSystem (including syscallPtr) diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 780e387e3..0c9a5c44b 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -58,12 +58,15 @@ includes_Darwin=' #define _DARWIN_USE_64_BIT_INODE #include #include +#include +#include #include #include #include #include #include #include +#include #include #include #include @@ -93,6 +96,7 @@ includes_DragonFly=' #include #include #include +#include #include #include #include @@ -107,6 +111,7 @@ includes_FreeBSD=' #include #include #include +#include #include #include #include @@ -192,9 +197,12 @@ struct ltchars { #include #include #include +#include +#include #include #include #include +#include #include #include #include @@ -225,6 +233,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -297,6 +306,7 @@ includes_NetBSD=' #include #include #include +#include #include #include #include @@ -325,6 +335,7 @@ includes_OpenBSD=' #include #include #include +#include #include #include #include @@ -365,6 +376,7 @@ includes_SunOS=' #include #include #include +#include #include #include #include @@ -489,6 +501,7 @@ ccflags="$@" $2 !~ "NLA_TYPE_MASK" && $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || + $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || $2 ~ /^TCGET/ || @@ -507,16 +520,21 @@ ccflags="$@" $2 ~ /^(CLOCK|TIMER)_/ || $2 ~ /^CAN_/ || $2 ~ /^CAP_/ || + $2 ~ /^CP_/ || + $2 ~ /^CPUSTATES$/ || + $2 ~ /^CTLIOCGINFO$/ || $2 ~ /^ALG_/ || + $2 ~ /^FI(CLONE|DEDUPERANGE)/ || $2 ~ /^FS_(POLICY_FLAGS|KEY_DESC|ENCRYPTION_MODE|[A-Z0-9_]+_KEY_SIZE)/ || - $2 ~ /^FS_IOC_.*(ENCRYPTION|VERITY|GETFLAGS)/ || + $2 ~ /^FS_IOC_.*(ENCRYPTION|VERITY|[GS]ETFLAGS)/ || $2 ~ /^FS_VERITY_/ || $2 ~ /^FSCRYPT_/ || + $2 ~ /^DM_/ || $2 ~ /^GRND_/ || $2 ~ /^RND/ || $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ || $2 ~ /^KEYCTL_/ || - $2 ~ /^PERF_EVENT_IOC_/ || + $2 ~ /^PERF_/ || $2 ~ /^SECCOMP_MODE_/ || $2 ~ /^SPLICE_/ || $2 ~ /^SYNC_FILE_RANGE_/ || @@ -535,7 +553,7 @@ ccflags="$@" $2 ~ /^XATTR_(CREATE|REPLACE|NO(DEFAULT|FOLLOW|SECURITY)|SHOWCOMPRESSION)/ || $2 ~ /^ATTR_(BIT_MAP_COUNT|(CMN|VOL|FILE)_)/ || $2 ~ /^FSOPT_/ || - $2 ~ /^WDIOC_/ || + $2 ~ /^WDIO[CFS]_/ || $2 ~ /^NFN/ || $2 ~ /^XDP_/ || $2 ~ /^RWF_/ || diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go index 7d08dae5b..57a0021da 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go @@ -20,7 +20,7 @@ func cmsgAlignOf(salen int) int { case "aix": // There is no alignment on AIX. salign = 1 - case "darwin", "illumos", "solaris": + case "darwin", "ios", "illumos", "solaris": // NOTE: It seems like 64-bit Darwin, Illumos and Solaris // kernels still require 32-bit aligned access to network // subsystem. @@ -32,6 +32,10 @@ func cmsgAlignOf(salen int) int { if runtime.GOARCH == "arm" { salign = 8 } + // NetBSD aarch64 requires 128-bit alignment. + if runtime.GOOS == "netbsd" && runtime.GOARCH == "arm64" { + salign = 16 + } } return (salen + salign - 1) & ^(salign - 1) diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index fd4ee8ebe..ab75ef9cc 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -24,7 +24,13 @@ // holds a value of type syscall.Errno. package unix // import "golang.org/x/sys/unix" -import "strings" +import ( + "bytes" + "strings" + "unsafe" + + "golang.org/x/sys/internal/unsafeheader" +) // ByteSliceFromString returns a NUL-terminated slice of bytes // containing the text of s. If s contains a NUL byte at any @@ -49,5 +55,40 @@ func BytePtrFromString(s string) (*byte, error) { return &a[0], nil } +// ByteSliceToString returns a string form of the text represented by the slice s, with a terminating NUL and any +// bytes after the NUL removed. +func ByteSliceToString(s []byte) string { + if i := bytes.IndexByte(s, 0); i != -1 { + s = s[:i] + } + return string(s) +} + +// BytePtrToString takes a pointer to a sequence of text and returns the corresponding string. +// If the pointer is nil, it returns the empty string. It assumes that the text sequence is terminated +// at a zero byte; if the zero byte is not present, the program may crash. +func BytePtrToString(p *byte) string { + if p == nil { + return "" + } + if *p == 0 { + return "" + } + + // Find NUL terminator. + n := 0 + for ptr := unsafe.Pointer(p); *(*byte)(ptr) != 0; n++ { + ptr = unsafe.Pointer(uintptr(ptr) + 1) + } + + var s []byte + h := (*unsafeheader.Slice)(unsafe.Pointer(&s)) + h.Data = unsafe.Pointer(p) + h.Len = n + h.Cap = n + + return string(s) +} + // Single-word zero for use when we need a valid pointer to 0 bytes. var _zero uintptr diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 9ad8a0d4a..440815382 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -19,6 +19,22 @@ import "unsafe" * Wrapped */ +func Access(path string, mode uint32) (err error) { + return Faccessat(AT_FDCWD, path, mode, 0) +} + +func Chmod(path string, mode uint32) (err error) { + return Fchmodat(AT_FDCWD, path, mode, 0) +} + +func Chown(path string, uid int, gid int) (err error) { + return Fchownat(AT_FDCWD, path, uid, gid, 0) +} + +func Creat(path string, mode uint32) (fd int, err error) { + return Open(path, O_CREAT|O_WRONLY|O_TRUNC, mode) +} + //sys utimes(path string, times *[2]Timeval) (err error) func Utimes(path string, tv []Timeval) error { if len(tv) != 2 { diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 68605db62..bc634a280 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -18,6 +18,21 @@ import ( "unsafe" ) +const ImplementsGetwd = true + +func Getwd() (string, error) { + var buf [PathMax]byte + _, err := Getcwd(buf[0:]) + if err != nil { + return "", err + } + n := clen(buf[:]) + if n < 1 { + return "", EINVAL + } + return string(buf[:n]), nil +} + /* * Wrapped */ @@ -262,7 +277,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { } return sa, nil } - return nil, EAFNOSUPPORT + return anyToSockaddrGOOS(fd, rsa) } func Accept(fd int) (nfd int, sa Sockaddr, err error) { @@ -272,7 +287,7 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { if err != nil { return } - if runtime.GOOS == "darwin" && len == 0 { + if (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && len == 0 { // Accepted socket has no address. // This is likely due to a bug in xnu kernels, // where instead of ECONNABORTED error socket @@ -527,6 +542,23 @@ func SysctlClockinfo(name string) (*Clockinfo, error) { return &ci, nil } +func SysctlTimeval(name string) (*Timeval, error) { + mib, err := sysctlmib(name) + if err != nil { + return nil, err + } + + var tv Timeval + n := uintptr(unsafe.Sizeof(tv)) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&tv)), &n, nil, 0); err != nil { + return nil, err + } + if n != unsafe.Sizeof(tv) { + return nil, EIO + } + return &tv, nil +} + //sys utimes(path string, timeval *[2]Timeval) (err error) func Utimes(path string, tv []Timeval) error { diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go index 6a15cba61..b31ef0358 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go @@ -10,6 +10,8 @@ import ( "unsafe" ) +const _SYS_GETDIRENTRIES64 = 344 + func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // To implement this using libSystem we'd need syscall_syscallPtr for // fdopendir. However, syscallPtr was only added in Go 1.13, so we fall @@ -20,7 +22,7 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { } else { p = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(p), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + r0, _, e1 := Syscall6(_SYS_GETDIRENTRIES64, uintptr(fd), uintptr(p), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) n = int(r0) if e1 != 0 { return n, errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 0cf31acf0..b62573890 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -13,29 +13,11 @@ package unix import ( - "errors" + "runtime" "syscall" "unsafe" ) -const ImplementsGetwd = true - -func Getwd() (string, error) { - buf := make([]byte, 2048) - attrs, err := getAttrList(".", attrList{CommonAttr: attrCmnFullpath}, buf, 0) - if err == nil && len(attrs) == 1 && len(attrs[0]) >= 2 { - wd := string(attrs[0]) - // Sanity check that it's an absolute path and ends - // in a null byte, which we then strip. - if wd[0] == '/' && wd[len(wd)-1] == 0 { - return wd[:len(wd)-1], nil - } - } - // If pkg/os/getwd.go gets ENOTSUP, it will fall back to the - // slow algorithm. - return "", ENOTSUP -} - // SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { Len uint8 @@ -49,6 +31,41 @@ type SockaddrDatalink struct { raw RawSockaddrDatalink } +// SockaddrCtl implements the Sockaddr interface for AF_SYSTEM type sockets. +type SockaddrCtl struct { + ID uint32 + Unit uint32 + raw RawSockaddrCtl +} + +func (sa *SockaddrCtl) sockaddr() (unsafe.Pointer, _Socklen, error) { + sa.raw.Sc_len = SizeofSockaddrCtl + sa.raw.Sc_family = AF_SYSTEM + sa.raw.Ss_sysaddr = AF_SYS_CONTROL + sa.raw.Sc_id = sa.ID + sa.raw.Sc_unit = sa.Unit + return unsafe.Pointer(&sa.raw), SizeofSockaddrCtl, nil +} + +func anyToSockaddrGOOS(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { + switch rsa.Addr.Family { + case AF_SYSTEM: + pp := (*RawSockaddrCtl)(unsafe.Pointer(rsa)) + if pp.Ss_sysaddr == AF_SYS_CONTROL { + sa := new(SockaddrCtl) + sa.ID = pp.Sc_id + sa.Unit = pp.Sc_unit + return sa, nil + } + } + return nil, EAFNOSUPPORT +} + +// Some external packages rely on SYS___SYSCTL being defined to implement their +// own sysctl wrappers. Provide it here, even though direct syscalls are no +// longer supported on darwin. +const SYS___SYSCTL = SYS_SYSCTL + // Translate "kern.hostname" to []_C_int{0,1,2,3}. func nametomib(name string) (mib []_C_int, err error) { const siz = unsafe.Sizeof(mib[0]) @@ -92,11 +109,6 @@ func direntNamlen(buf []byte) (uint64, bool) { func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) } func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) } -const ( - attrBitMapCount = 5 - attrCmnFullpath = 0x08000000 -) - type attrList struct { bitmapCount uint16 _ uint16 @@ -107,54 +119,6 @@ type attrList struct { Forkattr uint32 } -func getAttrList(path string, attrList attrList, attrBuf []byte, options uint) (attrs [][]byte, err error) { - if len(attrBuf) < 4 { - return nil, errors.New("attrBuf too small") - } - attrList.bitmapCount = attrBitMapCount - - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return nil, err - } - - if err := getattrlist(_p0, unsafe.Pointer(&attrList), unsafe.Pointer(&attrBuf[0]), uintptr(len(attrBuf)), int(options)); err != nil { - return nil, err - } - size := *(*uint32)(unsafe.Pointer(&attrBuf[0])) - - // dat is the section of attrBuf that contains valid data, - // without the 4 byte length header. All attribute offsets - // are relative to dat. - dat := attrBuf - if int(size) < len(attrBuf) { - dat = dat[:size] - } - dat = dat[4:] // remove length prefix - - for i := uint32(0); int(i) < len(dat); { - header := dat[i:] - if len(header) < 8 { - return attrs, errors.New("truncated attribute header") - } - datOff := *(*int32)(unsafe.Pointer(&header[0])) - attrLen := *(*uint32)(unsafe.Pointer(&header[4])) - if datOff < 0 || uint32(datOff)+attrLen > uint32(len(dat)) { - return attrs, errors.New("truncated results; attrBuf too small") - } - end := uint32(datOff) + attrLen - attrs = append(attrs, dat[datOff:end]) - i = end - if r := i % 4; r != 0 { - i += (4 - r) - } - } - return -} - -//sys getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) - //sysnb pipe() (r int, w int, err error) func Pipe(p []int) (err error) { @@ -324,6 +288,35 @@ func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(sig //sys ioctl(fd int, req uint, arg uintptr) (err error) +func IoctlCtlInfo(fd int, ctlInfo *CtlInfo) error { + err := ioctl(fd, CTLIOCGINFO, uintptr(unsafe.Pointer(ctlInfo))) + runtime.KeepAlive(ctlInfo) + return err +} + +// IfreqMTU is struct ifreq used to get or set a network device's MTU. +type IfreqMTU struct { + Name [IFNAMSIZ]byte + MTU int32 +} + +// IoctlGetIfreqMTU performs the SIOCGIFMTU ioctl operation on fd to get the MTU +// of the network device specified by ifname. +func IoctlGetIfreqMTU(fd int, ifname string) (*IfreqMTU, error) { + var ifreq IfreqMTU + copy(ifreq.Name[:], ifname) + err := ioctl(fd, SIOCGIFMTU, uintptr(unsafe.Pointer(&ifreq))) + return &ifreq, err +} + +// IoctlSetIfreqMTU performs the SIOCSIFMTU ioctl operation on fd to set the MTU +// of the network device specified by ifreq.Name. +func IoctlSetIfreqMTU(fd int, ifreq *IfreqMTU) error { + err := ioctl(fd, SIOCSIFMTU, uintptr(unsafe.Pointer(ifreq))) + runtime.KeepAlive(ifreq) + return err +} + //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL func Uname(uname *Utsname) error { @@ -396,6 +389,8 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Chroot(path string) (err error) //sys ClockGettime(clockid int32, time *Timespec) (err error) //sys Close(fd int) (err error) +//sys Clonefile(src string, dst string, flags int) (err error) +//sys Clonefileat(srcDirfd int, src string, dstDirfd int, dst string, flags int) (err error) //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) //sys Exchangedata(path1 string, path2 string, options int) (err error) @@ -407,10 +402,12 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) //sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) +//sys Fclonefileat(srcDirfd int, dstDirfd int, dst string, flags int) (err error) //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) +//sys Getcwd(buf []byte) (n int, err error) //sys Getdtablesize() (size int) //sysnb Getegid() (egid int) //sysnb Geteuid() (uid int) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go index 2724e3a51..6c1f4ab95 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go @@ -6,11 +6,7 @@ package unix -import ( - "syscall" -) - -//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) +import "syscall" func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: int32(sec), Nsec: int32(nsec)} @@ -44,14 +40,11 @@ func (cmsg *Cmsghdr) SetLen(length int) { func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) -// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions -// of darwin/386 the syscall is called sysctl instead of __sysctl. -const SYS___SYSCTL = SYS_SYSCTL - //sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 //sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 //sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64 //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 +//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.1_11.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.1_11.go deleted file mode 100644 index 68ebd6fab..000000000 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.1_11.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin,amd64,!go1.12 - -package unix - -//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS_GETDIRENTRIES64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index ce2e0d249..0582ae256 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -6,11 +6,7 @@ package unix -import ( - "syscall" -) - -//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) +import "syscall" func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} @@ -44,14 +40,11 @@ func (cmsg *Cmsghdr) SetLen(length int) { func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) -// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions -// of darwin/amd64 the syscall is called sysctl instead of __sysctl. -const SYS___SYSCTL = SYS_SYSCTL - //sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 //sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 //sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64 //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 +//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go index fc17a3f23..c6a9733b4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go @@ -4,9 +4,7 @@ package unix -import ( - "syscall" -) +import "syscall" func ptrace(request int, pid int, addr uintptr, data uintptr) error { return ENOTSUP @@ -44,10 +42,6 @@ func (cmsg *Cmsghdr) SetLen(length int) { func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) // sic -// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions -// of darwin/arm the syscall is called sysctl instead of __sysctl. -const SYS___SYSCTL = SYS_SYSCTL - //sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) //sys Fstatfs(fd int, stat *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index 1e91ddf32..253afa4de 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -6,13 +6,7 @@ package unix -import ( - "syscall" -) - -func ptrace(request int, pid int, addr uintptr, data uintptr) error { - return ENOTSUP -} +import "syscall" func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} @@ -46,14 +40,11 @@ func (cmsg *Cmsghdr) SetLen(length int) { func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) // sic -// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions -// of darwin/arm64 the syscall is called sysctl instead of __sysctl. -const SYS___SYSCTL = SYS_SYSCTL - //sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) //sys Fstatfs(fd int, stat *Statfs_t) (err error) //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT //sys Lstat(path string, stat *Stat_t) (err error) +//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 8a195ae58..842ab5acd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -47,6 +47,10 @@ type SockaddrDatalink struct { raw RawSockaddrDatalink } +func anyToSockaddrGOOS(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { + return nil, EAFNOSUPPORT +} + // Translate "kern.hostname" to []_C_int{0,1,2,3}. func nametomib(name string) (mib []_C_int, err error) { const siz = unsafe.Sizeof(mib[0]) @@ -129,23 +133,8 @@ func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) { return } -const ImplementsGetwd = true - //sys Getcwd(buf []byte) (n int, err error) = SYS___GETCWD -func Getwd() (string, error) { - var buf [PathMax]byte - _, err := Getcwd(buf[0:]) - if err != nil { - return "", err - } - n := clen(buf[:]) - if n < 1 { - return "", EINVAL - } - return string(buf[:n]), nil -} - func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { var _p0 unsafe.Pointer var bufsize uintptr diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 6932e7c2c..acc00c2e6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -54,6 +54,10 @@ type SockaddrDatalink struct { raw RawSockaddrDatalink } +func anyToSockaddrGOOS(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { + return nil, EAFNOSUPPORT +} + // Translate "kern.hostname" to []_C_int{0,1,2,3}. func nametomib(name string) (mib []_C_int, err error) { const siz = unsafe.Sizeof(mib[0]) @@ -140,23 +144,8 @@ func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) { return } -const ImplementsGetwd = true - //sys Getcwd(buf []byte) (n int, err error) = SYS___GETCWD -func Getwd() (string, error) { - var buf [PathMax]byte - _, err := Getcwd(buf[0:]) - if err != nil { - return "", err - } - n := clen(buf[:]) - if n < 1 { - return "", EINVAL - } - return string(buf[:n]), nil -} - func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { var ( _p0 unsafe.Pointer diff --git a/vendor/golang.org/x/sys/unix/syscall_illumos.go b/vendor/golang.org/x/sys/unix/syscall_illumos.go index 99e62dcd8..bbc4f3ea5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_illumos.go +++ b/vendor/golang.org/x/sys/unix/syscall_illumos.go @@ -24,7 +24,7 @@ func bytes2iovec(bs [][]byte) []Iovec { return iovecs } -//sys readv(fd int, iovs []Iovec) (n int, err error) +//sys readv(fd int, iovs []Iovec) (n int, err error) func Readv(fd int, iovs [][]byte) (n int, err error) { iovecs := bytes2iovec(iovs) @@ -32,7 +32,7 @@ func Readv(fd int, iovs [][]byte) (n int, err error) { return n, err } -//sys preadv(fd int, iovs []Iovec, off int64) (n int, err error) +//sys preadv(fd int, iovs []Iovec, off int64) (n int, err error) func Preadv(fd int, iovs [][]byte, off int64) (n int, err error) { iovecs := bytes2iovec(iovs) @@ -40,7 +40,7 @@ func Preadv(fd int, iovs [][]byte, off int64) (n int, err error) { return n, err } -//sys writev(fd int, iovs []Iovec) (n int, err error) +//sys writev(fd int, iovs []Iovec) (n int, err error) func Writev(fd int, iovs [][]byte) (n int, err error) { iovecs := bytes2iovec(iovs) @@ -48,10 +48,43 @@ func Writev(fd int, iovs [][]byte) (n int, err error) { return n, err } -//sys pwritev(fd int, iovs []Iovec, off int64) (n int, err error) +//sys pwritev(fd int, iovs []Iovec, off int64) (n int, err error) func Pwritev(fd int, iovs [][]byte, off int64) (n int, err error) { iovecs := bytes2iovec(iovs) n, err = pwritev(fd, iovecs, off) return n, err } + +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) = libsocket.accept4 + +func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + nfd, err = accept4(fd, &rsa, &len, flags) + if err != nil { + return + } + if len > SizeofSockaddrAny { + panic("RawSockaddrAny too small") + } + sa, err = anyToSockaddr(fd, &rsa) + if err != nil { + Close(nfd) + nfd = 0 + } + return +} + +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) error { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err := pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index e50e4cb27..84a9e5277 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -82,15 +82,6 @@ func IoctlRetInt(fd int, req uint) (int, error) { return int(ret), nil } -// IoctlSetPointerInt performs an ioctl operation which sets an -// integer value on fd, using the specified request number. The ioctl -// argument is called with a pointer to the integer value, rather than -// passing the integer value directly. -func IoctlSetPointerInt(fd int, req uint, value int) error { - v := int32(value) - return ioctl(fd, req, uintptr(unsafe.Pointer(&v))) -} - func IoctlSetRTCTime(fd int, value *RTCTime) error { err := ioctl(fd, RTC_SET_TIME, uintptr(unsafe.Pointer(value))) runtime.KeepAlive(value) @@ -115,12 +106,53 @@ func IoctlGetRTCTime(fd int) (*RTCTime, error) { return &value, err } +// IoctlGetWatchdogInfo fetches information about a watchdog device from the +// Linux watchdog API. For more information, see: +// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. +func IoctlGetWatchdogInfo(fd int) (*WatchdogInfo, error) { + var value WatchdogInfo + err := ioctl(fd, WDIOC_GETSUPPORT, uintptr(unsafe.Pointer(&value))) + return &value, err +} + func IoctlGetRTCWkAlrm(fd int) (*RTCWkAlrm, error) { var value RTCWkAlrm err := ioctl(fd, RTC_WKALM_RD, uintptr(unsafe.Pointer(&value))) return &value, err } +// IoctlFileCloneRange performs an FICLONERANGE ioctl operation to clone the +// range of data conveyed in value to the file associated with the file +// descriptor destFd. See the ioctl_ficlonerange(2) man page for details. +func IoctlFileCloneRange(destFd int, value *FileCloneRange) error { + err := ioctl(destFd, FICLONERANGE, uintptr(unsafe.Pointer(value))) + runtime.KeepAlive(value) + return err +} + +// IoctlFileClone performs an FICLONE ioctl operation to clone the entire file +// associated with the file description srcFd to the file associated with the +// file descriptor destFd. See the ioctl_ficlone(2) man page for details. +func IoctlFileClone(destFd, srcFd int) error { + return ioctl(destFd, FICLONE, uintptr(srcFd)) +} + +// IoctlFileDedupeRange performs an FIDEDUPERANGE ioctl operation to share the +// range of data conveyed in value with the file associated with the file +// descriptor destFd. See the ioctl_fideduperange(2) man page for details. +func IoctlFileDedupeRange(destFd int, value *FileDedupeRange) error { + err := ioctl(destFd, FIDEDUPERANGE, uintptr(unsafe.Pointer(value))) + runtime.KeepAlive(value) + return err +} + +// IoctlWatchdogKeepalive issues a keepalive ioctl to a watchdog device. For +// more information, see: +// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. +func IoctlWatchdogKeepalive(fd int) error { + return ioctl(fd, WDIOC_KEEPALIVE, 0) +} + //sys Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) func Link(oldpath string, newpath string) (err error) { @@ -145,6 +177,12 @@ func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) return openat(dirfd, path, flags|O_LARGEFILE, mode) } +//sys openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) + +func Openat2(dirfd int, path string, how *OpenHow) (fd int, err error) { + return openat2(dirfd, path, how, SizeofOpenHow) +} + //sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { @@ -885,6 +923,35 @@ func (sa *SockaddrL2TPIP6) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrL2TPIP6, nil } +// SockaddrIUCV implements the Sockaddr interface for AF_IUCV sockets. +type SockaddrIUCV struct { + UserID string + Name string + raw RawSockaddrIUCV +} + +func (sa *SockaddrIUCV) sockaddr() (unsafe.Pointer, _Socklen, error) { + sa.raw.Family = AF_IUCV + // These are EBCDIC encoded by the kernel, but we still need to pad them + // with blanks. Initializing with blanks allows the caller to feed in either + // a padded or an unpadded string. + for i := 0; i < 8; i++ { + sa.raw.Nodeid[i] = ' ' + sa.raw.User_id[i] = ' ' + sa.raw.Name[i] = ' ' + } + if len(sa.UserID) > 8 || len(sa.Name) > 8 { + return nil, 0, EINVAL + } + for i, b := range []byte(sa.UserID[:]) { + sa.raw.User_id[i] = int8(b) + } + for i, b := range []byte(sa.Name[:]) { + sa.raw.Name[i] = int8(b) + } + return unsafe.Pointer(&sa.raw), SizeofSockaddrIUCV, nil +} + func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { switch rsa.Addr.Family { case AF_NETLINK: @@ -1065,6 +1132,38 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { } return sa, nil + case AF_IUCV: + pp := (*RawSockaddrIUCV)(unsafe.Pointer(rsa)) + + var user [8]byte + var name [8]byte + + for i := 0; i < 8; i++ { + user[i] = byte(pp.User_id[i]) + name[i] = byte(pp.Name[i]) + } + + sa := &SockaddrIUCV{ + UserID: string(user[:]), + Name: string(name[:]), + } + return sa, nil + + case AF_CAN: + pp := (*RawSockaddrCAN)(unsafe.Pointer(rsa)) + sa := &SockaddrCAN{ + Ifindex: int(pp.Ifindex), + } + rx := (*[4]byte)(unsafe.Pointer(&sa.RxID)) + for i := 0; i < 4; i++ { + rx[i] = pp.Addr[i] + } + tx := (*[4]byte)(unsafe.Pointer(&sa.TxID)) + for i := 0; i < 4; i++ { + tx[i] = pp.Addr[i+4] + } + return sa, nil + } return nil, EAFNOSUPPORT } @@ -1965,10 +2064,15 @@ func isGroupMember(gid int) bool { } //sys faccessat(dirfd int, path string, mode uint32) (err error) +//sys Faccessat2(dirfd int, path string, mode uint32, flags int) (err error) func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - if flags & ^(AT_SYMLINK_NOFOLLOW|AT_EACCESS) != 0 { - return EINVAL + if flags == 0 { + return faccessat(dirfd, path, mode) + } + + if err := Faccessat2(dirfd, path, mode, flags); err != ENOSYS && err != EPERM { + return err } // The Linux kernel faccessat system call does not take any flags. @@ -1977,8 +2081,8 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { // Because people naturally expect syscall.Faccessat to act // like C faccessat, we do the same. - if flags == 0 { - return faccessat(dirfd, path, mode) + if flags & ^(AT_SYMLINK_NOFOLLOW|AT_EACCESS) != 0 { + return EINVAL } var st Stat_t @@ -2122,6 +2226,18 @@ func Klogset(typ int, arg int) (err error) { return nil } +// RemoteIovec is Iovec with the pointer replaced with an integer. +// It is used for ProcessVMReadv and ProcessVMWritev, where the pointer +// refers to a location in a different process' address space, which +// would confuse the Go garbage collector. +type RemoteIovec struct { + Base uintptr + Len int +} + +//sys ProcessVMReadv(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) = SYS_PROCESS_VM_READV +//sys ProcessVMWritev(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) = SYS_PROCESS_VM_WRITEV + /* * Unimplemented */ diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index 048d18e3c..c97c2ee53 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// TODO(rsc): Rewrite all nn(SP) references into name+(nn-8)(FP) -// so that go vet can check that they are correct. - // +build 386,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index e1913e2c9..496837b1e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -7,7 +7,6 @@ package unix import ( - "syscall" "unsafe" ) @@ -49,10 +48,6 @@ func Pipe2(p []int, flags int) (err error) { return } -// Underlying system call writes to newoffset via pointer. -// Implemented in assembly to avoid allocation. -func seek(fd int, offset int64, whence int) (newoffset int64, err syscall.Errno) - func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { newoffset, errno := seek(fd, offset, whence) if errno != 0 { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go new file mode 100644 index 000000000..8c514c95e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go @@ -0,0 +1,13 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm,!gccgo,linux + +package unix + +import "syscall" + +// Underlying system call writes to newoffset via pointer. +// Implemented in assembly to avoid allocation. +func seek(fd int, offset int64, whence int) (newoffset int64, err syscall.Errno) diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 45b50a610..1e6843b4c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -31,6 +31,10 @@ type SockaddrDatalink struct { raw RawSockaddrDatalink } +func anyToSockaddrGOOS(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { + return nil, EAFNOSUPPORT +} + func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func sysctlNodes(mib []_C_int) (nodes []Sysctlnode, err error) { @@ -141,23 +145,8 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { return } -const ImplementsGetwd = true - //sys Getcwd(buf []byte) (n int, err error) = SYS___GETCWD -func Getwd() (string, error) { - var buf [PathMax]byte - _, err := Getcwd(buf[0:]) - if err != nil { - return "", err - } - n := clen(buf[:]) - if n < 1 { - return "", EINVAL - } - return string(buf[:n]), nil -} - // TODO func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { return -1, ENOSYS diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index a266e92a9..6a50b50bd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -31,6 +31,10 @@ type SockaddrDatalink struct { raw RawSockaddrDatalink } +func anyToSockaddrGOOS(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { + return nil, EAFNOSUPPORT +} + func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func nametomib(name string) (mib []_C_int, err error) { @@ -114,23 +118,8 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { return } -const ImplementsGetwd = true - //sys Getcwd(buf []byte) (n int, err error) = SYS___GETCWD -func Getwd() (string, error) { - var buf [PathMax]byte - _, err := Getcwd(buf[0:]) - if err != nil { - return "", err - } - n := clen(buf[:]) - if n < 1 { - return "", EINVAL - } - return string(buf[:n]), nil -} - func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { if raceenabled { raceReleaseMerge(unsafe.Pointer(&ioSync)) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go new file mode 100644 index 000000000..30f285343 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go @@ -0,0 +1,35 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions +// of OpenBSD the syscall is called sysctl instead of __sysctl. +const SYS___SYSCTL = SYS_SYSCTL diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 0e2a696ad..fee6e9952 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -13,6 +13,7 @@ package unix import ( + "runtime" "syscall" "unsafe" ) @@ -553,8 +554,10 @@ func Minor(dev uint64) uint32 { //sys ioctl(fd int, req uint, arg uintptr) (err error) -func IoctlSetTermio(fd int, req uint, value *Termio) (err error) { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) +func IoctlSetTermio(fd int, req uint, value *Termio) error { + err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) + runtime.KeepAlive(value) + return err } func IoctlGetTermio(fd int, req uint) (*Termio, error) { diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go index 6217cdba5..ec376f51b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go @@ -45,6 +45,7 @@ const ( AF_SIP = 0x18 AF_SNA = 0xb AF_SYSTEM = 0x20 + AF_SYS_CONTROL = 0x2 AF_UNIX = 0x1 AF_UNSPEC = 0x0 AF_UTUN = 0x26 @@ -232,6 +233,8 @@ const ( CLOCK_THREAD_CPUTIME_ID = 0x10 CLOCK_UPTIME_RAW = 0x8 CLOCK_UPTIME_RAW_APPROX = 0x9 + CLONE_NOFOLLOW = 0x1 + CLONE_NOOWNERCOPY = 0x2 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -249,6 +252,7 @@ const ( CSTOP = 0x13 CSTOPB = 0x400 CSUSP = 0x1a + CTLIOCGINFO = 0xc0644e03 CTL_HW = 0x6 CTL_KERN = 0x1 CTL_MAXNAME = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index e3ff2ee3d..fea5dfaad 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -45,6 +45,7 @@ const ( AF_SIP = 0x18 AF_SNA = 0xb AF_SYSTEM = 0x20 + AF_SYS_CONTROL = 0x2 AF_UNIX = 0x1 AF_UNSPEC = 0x0 AF_UTUN = 0x26 @@ -232,6 +233,8 @@ const ( CLOCK_THREAD_CPUTIME_ID = 0x10 CLOCK_UPTIME_RAW = 0x8 CLOCK_UPTIME_RAW_APPROX = 0x9 + CLONE_NOFOLLOW = 0x1 + CLONE_NOOWNERCOPY = 0x2 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -249,6 +252,7 @@ const ( CSTOP = 0x13 CSTOPB = 0x400 CSUSP = 0x1a + CTLIOCGINFO = 0xc0644e03 CTL_HW = 0x6 CTL_KERN = 0x1 CTL_MAXNAME = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go index 3e417571a..03feefbf8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go @@ -45,6 +45,7 @@ const ( AF_SIP = 0x18 AF_SNA = 0xb AF_SYSTEM = 0x20 + AF_SYS_CONTROL = 0x2 AF_UNIX = 0x1 AF_UNSPEC = 0x0 AF_UTUN = 0x26 @@ -232,6 +233,8 @@ const ( CLOCK_THREAD_CPUTIME_ID = 0x10 CLOCK_UPTIME_RAW = 0x8 CLOCK_UPTIME_RAW_APPROX = 0x9 + CLONE_NOFOLLOW = 0x1 + CLONE_NOOWNERCOPY = 0x2 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -249,6 +252,7 @@ const ( CSTOP = 0x13 CSTOPB = 0x400 CSUSP = 0x1a + CTLIOCGINFO = 0xc0644e03 CTL_HW = 0x6 CTL_KERN = 0x1 CTL_MAXNAME = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index cbd8ed18b..b40fb1f69 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -45,6 +45,7 @@ const ( AF_SIP = 0x18 AF_SNA = 0xb AF_SYSTEM = 0x20 + AF_SYS_CONTROL = 0x2 AF_UNIX = 0x1 AF_UNSPEC = 0x0 AF_UTUN = 0x26 @@ -232,6 +233,8 @@ const ( CLOCK_THREAD_CPUTIME_ID = 0x10 CLOCK_UPTIME_RAW = 0x8 CLOCK_UPTIME_RAW_APPROX = 0x9 + CLONE_NOFOLLOW = 0x1 + CLONE_NOOWNERCOPY = 0x2 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -249,6 +252,7 @@ const ( CSTOP = 0x13 CSTOPB = 0x400 CSUSP = 0x1a + CTLIOCGINFO = 0xc0644e03 CTL_HW = 0x6 CTL_KERN = 0x1 CTL_MAXNAME = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go index 613047174..f5e91b7ab 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go @@ -62,6 +62,7 @@ const ( B28800 = 0x7080 B300 = 0x12c B38400 = 0x9600 + B460800 = 0x70800 B4800 = 0x12c0 B50 = 0x32 B57600 = 0xe100 @@ -69,12 +70,15 @@ const ( B7200 = 0x1c20 B75 = 0x4b B76800 = 0x12c00 + B921600 = 0xe1000 B9600 = 0x2580 + BIOCFEEDBACK = 0x8004427d BIOCFLUSH = 0x20004268 BIOCGBLEN = 0x40044266 BIOCGDLT = 0x4004426a BIOCGDLTLIST = 0xc0104279 BIOCGETIF = 0x4020426b + BIOCGFEEDBACK = 0x4004427c BIOCGHDRCMPLT = 0x40044274 BIOCGRSIG = 0x40044272 BIOCGRTIMEOUT = 0x4010426e @@ -88,6 +92,7 @@ const ( BIOCSETF = 0x80104267 BIOCSETIF = 0x8020426c BIOCSETWF = 0x8010427b + BIOCSFEEDBACK = 0x8004427d BIOCSHDRCMPLT = 0x80044275 BIOCSRSIG = 0x80044273 BIOCSRTIMEOUT = 0x8010426d @@ -125,6 +130,7 @@ const ( BPF_MINBUFSIZE = 0x20 BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 + BPF_MOD = 0x90 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 @@ -139,6 +145,7 @@ const ( BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XOR = 0xa0 BRKINT = 0x2 CFLUSH = 0xf CLOCAL = 0x8000 @@ -156,6 +163,12 @@ const ( CLOCK_UPTIME_FAST = 0x8 CLOCK_UPTIME_PRECISE = 0x7 CLOCK_VIRTUAL = 0x1 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x30000 CS5 = 0x0 @@ -175,6 +188,7 @@ const ( DLT_A429 = 0xb8 DLT_A653_ICM = 0xb9 DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde DLT_APPLE_IP_OVER_IEEE1394 = 0x8a DLT_ARCNET = 0x7 DLT_ARCNET_LINUX = 0x81 @@ -184,22 +198,33 @@ const ( DLT_AX25 = 0x3 DLT_AX25_KISS = 0xca DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_BREDR_BB = 0xff DLT_BLUETOOTH_HCI_H4 = 0xbb DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_BLUETOOTH_LE_LL = 0xfb + DLT_BLUETOOTH_LE_LL_WITH_PHDR = 0x100 + DLT_BLUETOOTH_LINUX_MONITOR = 0xfe DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 DLT_CHAOS = 0x5 DLT_CHDLC = 0x68 DLT_CISCO_IOS = 0x76 DLT_C_HDLC = 0x68 DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd DLT_DOCSIS = 0x8f + DLT_DVB_CI = 0xeb DLT_ECONET = 0x73 DLT_EN10MB = 0x1 DLT_EN3MB = 0x2 DLT_ENC = 0x6d + DLT_EPON = 0x103 DLT_ERF = 0xc5 DLT_ERF_ETH = 0xaf DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 DLT_FDDI = 0xa DLT_FLEXRAY = 0xd2 DLT_FRELAY = 0x6b @@ -209,6 +234,8 @@ const ( DLT_GPF_F = 0xab DLT_GPF_T = 0xaa DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 DLT_HHDLC = 0x79 DLT_IBM_SN = 0x92 DLT_IBM_SP = 0x91 @@ -218,18 +245,28 @@ const ( DLT_IEEE802_11_RADIO_AVS = 0xa3 DLT_IEEE802_15_4 = 0xc3 DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 DLT_IEEE802_15_4_NONASK_PHY = 0xd7 DLT_IEEE802_16_MAC_CPS = 0xbc DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_INFINIBAND = 0xf7 DLT_IPFILTER = 0x74 DLT_IPMB = 0xc7 DLT_IPMB_LINUX = 0xd1 + DLT_IPMI_HPM_2 = 0x104 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 DLT_IP_OVER_FC = 0x7a + DLT_ISO_14443 = 0x108 DLT_JUNIPER_ATM1 = 0x89 DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee DLT_JUNIPER_CHDLC = 0xb5 DLT_JUNIPER_ES = 0x84 DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea DLT_JUNIPER_FRELAY = 0xb4 DLT_JUNIPER_GGSN = 0x85 DLT_JUNIPER_ISM = 0xc2 @@ -242,25 +279,40 @@ const ( DLT_JUNIPER_PPPOE = 0xa7 DLT_JUNIPER_PPPOE_ATM = 0xa8 DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 DLT_JUNIPER_ST = 0xc8 DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 DLT_LAPB_WITH_DIR = 0xcf DLT_LAPD = 0xcb DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 DLT_LINUX_IRDA = 0x90 DLT_LINUX_LAPD = 0xb1 DLT_LINUX_SLL = 0x71 DLT_LOOP = 0x6c DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0x109 + DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb DLT_MTP2 = 0x8c DLT_MTP2_WITH_PHDR = 0x8b DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NETLINK = 0xfd + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 DLT_NULL = 0x0 DLT_PCI_EXP = 0x7d DLT_PFLOG = 0x75 DLT_PFSYNC = 0x12 + DLT_PKTAP = 0x102 DLT_PPI = 0xc0 DLT_PPP = 0x9 DLT_PPP_BSDOS = 0x10 @@ -269,22 +321,51 @@ const ( DLT_PPP_SERIAL = 0x32 DLT_PPP_WITH_DIR = 0xcc DLT_PRISM_HEADER = 0x77 + DLT_PROFIBUS_DL = 0x101 DLT_PRONET = 0x4 DLT_RAIF1 = 0xc6 DLT_RAW = 0xc + DLT_RDS = 0x109 DLT_REDBACK_SMARTEDGE = 0x20 DLT_RIO = 0x7c + DLT_RTAC_SERIAL = 0xfa DLT_SCCP = 0x8e + DLT_SCTP = 0xf8 DLT_SITA = 0xc4 DLT_SLIP = 0x8 DLT_SLIP_BSDOS = 0xf + DLT_STANAG_5066_D_PDU = 0xed DLT_SUNATM = 0x7b DLT_SYMANTEC_FIREWALL = 0x63 DLT_TZSP = 0x80 DLT_USB = 0xba + DLT_USBPCAP = 0xf9 + DLT_USB_FREEBSD = 0xba DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_WATTSTOPPER_DLM = 0x107 + DLT_WIHART = 0xdf + DLT_WIRESHARK_UPPER_PDU = 0xfc DLT_X2E_SERIAL = 0xd5 DLT_X2E_XORAYA = 0xd6 + DLT_ZWAVE_R1_R2 = 0x105 + DLT_ZWAVE_R3 = 0x106 DT_BLK = 0x6 DT_CHR = 0x2 DT_DBF = 0xf @@ -323,10 +404,11 @@ const ( EV_EOF = 0x8000 EV_ERROR = 0x4000 EV_FLAG1 = 0x2000 + EV_HUP = 0x800 EV_NODATA = 0x1000 EV_ONESHOT = 0x10 EV_RECEIPT = 0x40 - EV_SYSFLAGS = 0xf000 + EV_SYSFLAGS = 0xf800 EXTA = 0x4b00 EXTB = 0x9600 EXTEXIT_LWP = 0x10000 @@ -365,8 +447,9 @@ const ( IFF_ALLMULTI = 0x200 IFF_ALTPHYS = 0x4000 IFF_BROADCAST = 0x2 - IFF_CANTCHANGE = 0x118e72 + IFF_CANTCHANGE = 0x318e72 IFF_DEBUG = 0x4 + IFF_IDIRECT = 0x200000 IFF_LINK0 = 0x1000 IFF_LINK1 = 0x2000 IFF_LINK2 = 0x4000 @@ -441,7 +524,6 @@ const ( IFT_EPLRS = 0x57 IFT_ESCON = 0x49 IFT_ETHER = 0x6 - IFT_FAITH = 0xf2 IFT_FAST = 0x7d IFT_FASTETHER = 0x3e IFT_FASTETHERFX = 0x45 @@ -614,6 +696,7 @@ const ( IN_CLASSD_NET = 0xf0000000 IN_CLASSD_NSHIFT = 0x1c IN_LOOPBACKNET = 0x7f + IN_RFC3021_MASK = 0xfffffffe IPPROTO_3PC = 0x22 IPPROTO_ADFS = 0x44 IPPROTO_AH = 0x33 @@ -735,7 +818,6 @@ const ( IPV6_DEFHLIM = 0x40 IPV6_DONTFRAG = 0x3e IPV6_DSTOPTS = 0x32 - IPV6_FAITH = 0x1d IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 IPV6_FRAGTTL = 0x78 @@ -747,7 +829,6 @@ const ( IPV6_HLIMDEC = 0x1 IPV6_HOPLIMIT = 0x2f IPV6_HOPOPTS = 0x31 - IPV6_IPSEC_POLICY = 0x1c IPV6_JOIN_GROUP = 0xc IPV6_LEAVE_GROUP = 0xd IPV6_MAXHLIM = 0xff @@ -795,16 +876,22 @@ const ( IP_DUMMYNET_DEL = 0x3d IP_DUMMYNET_FLUSH = 0x3e IP_DUMMYNET_GET = 0x40 - IP_FAITH = 0x16 IP_FW_ADD = 0x32 IP_FW_DEL = 0x33 IP_FW_FLUSH = 0x34 IP_FW_GET = 0x36 IP_FW_RESETLOG = 0x37 + IP_FW_TBL_ADD = 0x2a + IP_FW_TBL_CREATE = 0x28 + IP_FW_TBL_DEL = 0x2b + IP_FW_TBL_DESTROY = 0x29 + IP_FW_TBL_EXPIRE = 0x2f + IP_FW_TBL_FLUSH = 0x2c + IP_FW_TBL_GET = 0x2d + IP_FW_TBL_ZERO = 0x2e IP_FW_X = 0x31 IP_FW_ZERO = 0x35 IP_HDRINCL = 0x2 - IP_IPSEC_POLICY = 0x15 IP_MAXPACKET = 0xffff IP_MAX_MEMBERSHIPS = 0x14 IP_MF = 0x2000 @@ -1080,12 +1167,10 @@ const ( RTM_MISS = 0x7 RTM_NEWADDR = 0xc RTM_NEWMADDR = 0xf - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa RTM_REDIRECT = 0x6 RTM_RESOLVE = 0xb RTM_RTTUNIT = 0xf4240 - RTM_VERSION = 0x6 + RTM_VERSION = 0x7 RTV_EXPIRE = 0x4 RTV_HOPCOUNT = 0x2 RTV_IWCAPSEGS = 0x400 @@ -1106,13 +1191,13 @@ const ( SHUT_RDWR = 0x2 SHUT_WR = 0x1 SIOCADDMULTI = 0x80206931 - SIOCADDRT = 0x8040720a SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80286987 SIOCALIFADDR = 0x8118691b SIOCATMARK = 0x40047307 SIOCDELMULTI = 0x80206932 - SIOCDELRT = 0x8040720b SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80286989 SIOCDIFPHYADDR = 0x80206949 SIOCDLIFADDR = 0x8118691d SIOCGDRVSPEC = 0xc028697b @@ -1120,6 +1205,7 @@ const ( SIOCGETVIFCNT = 0xc028720f SIOCGHIWAT = 0x40047301 SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc0406929 SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCAP = 0xc020691f SIOCGIFCONF = 0xc0106924 @@ -1128,6 +1214,7 @@ const ( SIOCGIFFLAGS = 0xc0206911 SIOCGIFGENERIC = 0xc020693a SIOCGIFGMEMB = 0xc028698a + SIOCGIFGROUP = 0xc0286988 SIOCGIFINDEX = 0xc0206920 SIOCGIFMEDIA = 0xc0306938 SIOCGIFMETRIC = 0xc0206917 @@ -1194,6 +1281,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x2000 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_SNDBUF = 0x1001 @@ -1233,6 +1321,9 @@ const ( S_IXGRP = 0x8 S_IXOTH = 0x1 S_IXUSR = 0x40 + TAB0 = 0x0 + TAB3 = 0x4 + TABDLY = 0x4 TCIFLUSH = 0x1 TCIOFF = 0x3 TCIOFLUSH = 0x3 @@ -1259,6 +1350,8 @@ const ( TCP_NOPUSH = 0x4 TCP_SIGNATURE_ENABLE = 0x10 TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 TIOCCONS = 0x80047462 @@ -1272,7 +1365,6 @@ const ( TIOCGETD = 0x4004741a TIOCGPGRP = 0x40047477 TIOCGSID = 0x40047463 - TIOCGSIZE = 0x40087468 TIOCGWINSZ = 0x40087468 TIOCISPTMASTER = 0x20007455 TIOCMBIC = 0x8004746b @@ -1317,7 +1409,6 @@ const ( TIOCSETD = 0x8004741b TIOCSIG = 0x2000745f TIOCSPGRP = 0x80047476 - TIOCSSIZE = 0x80087467 TIOCSTART = 0x2000746e TIOCSTAT = 0x20007465 TIOCSTI = 0x80017472 @@ -1326,6 +1417,8 @@ const ( TIOCTIMESTAMP = 0x40107459 TIOCUCNTL = 0x80047466 TOSTOP = 0x400000 + UTIME_NOW = -0x1 + UTIME_OMIT = -0x2 VCHECKPT = 0x13 VDISCARD = 0xf VDSUSP = 0xb @@ -1350,9 +1443,12 @@ const ( VWERASE = 0x4 WCONTINUED = 0x4 WCOREFLAG = 0x80 + WEXITED = 0x10 WLINUXCLONE = 0x80000000 WNOHANG = 0x1 - WSTOPPED = 0x7f + WNOWAIT = 0x8 + WSTOPPED = 0x2 + WTRAPPED = 0x20 WUNTRACED = 0x2 ) @@ -1452,11 +1548,6 @@ const ( ETIMEDOUT = syscall.Errno(0x3c) ETOOMANYREFS = syscall.Errno(0x3b) ETXTBSY = syscall.Errno(0x1a) - EUNUSED94 = syscall.Errno(0x5e) - EUNUSED95 = syscall.Errno(0x5f) - EUNUSED96 = syscall.Errno(0x60) - EUNUSED97 = syscall.Errno(0x61) - EUNUSED98 = syscall.Errno(0x62) EUSERS = syscall.Errno(0x44) EWOULDBLOCK = syscall.Errno(0x23) EXDEV = syscall.Errno(0x12) @@ -1600,12 +1691,7 @@ var errorList = [...]struct { {91, "ENOLINK", "link has been severed"}, {92, "EPROTO", "protocol error"}, {93, "ENOMEDIUM", "no medium found"}, - {94, "EUNUSED94", "unknown error: 94"}, - {95, "EUNUSED95", "unknown error: 95"}, - {96, "EUNUSED96", "unknown error: 96"}, - {97, "EUNUSED97", "unknown error: 97"}, - {98, "EUNUSED98", "unknown error: 98"}, - {99, "ELAST", "unknown error: 99"}, + {99, "EASYNC", "unknown error: 99"}, } // Signal table diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index 848245873..3689c8084 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -339,6 +339,12 @@ const ( CLOCK_UPTIME_FAST = 0x8 CLOCK_UPTIME_PRECISE = 0x7 CLOCK_VIRTUAL = 0x1 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x30000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index 4acd101c3..b8f7c3c93 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -339,6 +339,12 @@ const ( CLOCK_UPTIME_FAST = 0x8 CLOCK_UPTIME_PRECISE = 0x7 CLOCK_VIRTUAL = 0x1 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x30000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index e4719873b..be14bb1a4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -339,6 +339,12 @@ const ( CLOCK_UPTIME_FAST = 0x8 CLOCK_UPTIME_PRECISE = 0x7 CLOCK_VIRTUAL = 0x1 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x30000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index 5e49769d9..7ce9c0081 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -339,6 +339,12 @@ const ( CLOCK_UPTIME_FAST = 0x8 CLOCK_UPTIME_PRECISE = 0x7 CLOCK_VIRTUAL = 0x1 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x30000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index f8bd50c11..2069fb861 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -244,8 +244,66 @@ const ( CAN_EFF_FLAG = 0x80000000 CAN_EFF_ID_BITS = 0x1d CAN_EFF_MASK = 0x1fffffff + CAN_ERR_ACK = 0x20 + CAN_ERR_BUSERROR = 0x80 + CAN_ERR_BUSOFF = 0x40 + CAN_ERR_CRTL = 0x4 + CAN_ERR_CRTL_ACTIVE = 0x40 + CAN_ERR_CRTL_RX_OVERFLOW = 0x1 + CAN_ERR_CRTL_RX_PASSIVE = 0x10 + CAN_ERR_CRTL_RX_WARNING = 0x4 + CAN_ERR_CRTL_TX_OVERFLOW = 0x2 + CAN_ERR_CRTL_TX_PASSIVE = 0x20 + CAN_ERR_CRTL_TX_WARNING = 0x8 + CAN_ERR_CRTL_UNSPEC = 0x0 + CAN_ERR_DLC = 0x8 CAN_ERR_FLAG = 0x20000000 + CAN_ERR_LOSTARB = 0x2 + CAN_ERR_LOSTARB_UNSPEC = 0x0 CAN_ERR_MASK = 0x1fffffff + CAN_ERR_PROT = 0x8 + CAN_ERR_PROT_ACTIVE = 0x40 + CAN_ERR_PROT_BIT = 0x1 + CAN_ERR_PROT_BIT0 = 0x8 + CAN_ERR_PROT_BIT1 = 0x10 + CAN_ERR_PROT_FORM = 0x2 + CAN_ERR_PROT_LOC_ACK = 0x19 + CAN_ERR_PROT_LOC_ACK_DEL = 0x1b + CAN_ERR_PROT_LOC_CRC_DEL = 0x18 + CAN_ERR_PROT_LOC_CRC_SEQ = 0x8 + CAN_ERR_PROT_LOC_DATA = 0xa + CAN_ERR_PROT_LOC_DLC = 0xb + CAN_ERR_PROT_LOC_EOF = 0x1a + CAN_ERR_PROT_LOC_ID04_00 = 0xe + CAN_ERR_PROT_LOC_ID12_05 = 0xf + CAN_ERR_PROT_LOC_ID17_13 = 0x7 + CAN_ERR_PROT_LOC_ID20_18 = 0x6 + CAN_ERR_PROT_LOC_ID28_21 = 0x2 + CAN_ERR_PROT_LOC_IDE = 0x5 + CAN_ERR_PROT_LOC_INTERM = 0x12 + CAN_ERR_PROT_LOC_RES0 = 0x9 + CAN_ERR_PROT_LOC_RES1 = 0xd + CAN_ERR_PROT_LOC_RTR = 0xc + CAN_ERR_PROT_LOC_SOF = 0x3 + CAN_ERR_PROT_LOC_SRTR = 0x4 + CAN_ERR_PROT_LOC_UNSPEC = 0x0 + CAN_ERR_PROT_OVERLOAD = 0x20 + CAN_ERR_PROT_STUFF = 0x4 + CAN_ERR_PROT_TX = 0x80 + CAN_ERR_PROT_UNSPEC = 0x0 + CAN_ERR_RESTARTED = 0x100 + CAN_ERR_TRX = 0x10 + CAN_ERR_TRX_CANH_NO_WIRE = 0x4 + CAN_ERR_TRX_CANH_SHORT_TO_BAT = 0x5 + CAN_ERR_TRX_CANH_SHORT_TO_GND = 0x7 + CAN_ERR_TRX_CANH_SHORT_TO_VCC = 0x6 + CAN_ERR_TRX_CANL_NO_WIRE = 0x40 + CAN_ERR_TRX_CANL_SHORT_TO_BAT = 0x50 + CAN_ERR_TRX_CANL_SHORT_TO_CANH = 0x80 + CAN_ERR_TRX_CANL_SHORT_TO_GND = 0x70 + CAN_ERR_TRX_CANL_SHORT_TO_VCC = 0x60 + CAN_ERR_TRX_UNSPEC = 0x0 + CAN_ERR_TX_TIMEOUT = 0x1 CAN_INV_FILTER = 0x20000000 CAN_ISOTP = 0x6 CAN_J1939 = 0x7 @@ -265,6 +323,8 @@ const ( CAP_AUDIT_READ = 0x25 CAP_AUDIT_WRITE = 0x1d CAP_BLOCK_SUSPEND = 0x24 + CAP_BPF = 0x27 + CAP_CHECKPOINT_RESTORE = 0x28 CAP_CHOWN = 0x0 CAP_DAC_OVERRIDE = 0x1 CAP_DAC_READ_SEARCH = 0x2 @@ -273,7 +333,7 @@ const ( CAP_IPC_LOCK = 0xe CAP_IPC_OWNER = 0xf CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 + CAP_LAST_CAP = 0x28 CAP_LEASE = 0x1c CAP_LINUX_IMMUTABLE = 0x9 CAP_MAC_ADMIN = 0x21 @@ -283,6 +343,7 @@ const ( CAP_NET_BIND_SERVICE = 0xa CAP_NET_BROADCAST = 0xb CAP_NET_RAW = 0xd + CAP_PERFMON = 0x26 CAP_SETFCAP = 0x1f CAP_SETGID = 0x6 CAP_SETPCAP = 0x8 @@ -372,8 +433,54 @@ const ( DEVLINK_GENL_NAME = "devlink" DEVLINK_GENL_VERSION = 0x1 DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX = 0x14 + DEVMEM_MAGIC = 0x454d444d DEVPTS_SUPER_MAGIC = 0x1cd1 DMA_BUF_MAGIC = 0x444d4142 + DM_ACTIVE_PRESENT_FLAG = 0x20 + DM_BUFFER_FULL_FLAG = 0x100 + DM_CONTROL_NODE = "control" + DM_DATA_OUT_FLAG = 0x10000 + DM_DEFERRED_REMOVE = 0x20000 + DM_DEV_ARM_POLL = 0xc138fd10 + DM_DEV_CREATE = 0xc138fd03 + DM_DEV_REMOVE = 0xc138fd04 + DM_DEV_RENAME = 0xc138fd05 + DM_DEV_SET_GEOMETRY = 0xc138fd0f + DM_DEV_STATUS = 0xc138fd07 + DM_DEV_SUSPEND = 0xc138fd06 + DM_DEV_WAIT = 0xc138fd08 + DM_DIR = "mapper" + DM_GET_TARGET_VERSION = 0xc138fd11 + DM_INACTIVE_PRESENT_FLAG = 0x40 + DM_INTERNAL_SUSPEND_FLAG = 0x40000 + DM_IOCTL = 0xfd + DM_LIST_DEVICES = 0xc138fd02 + DM_LIST_VERSIONS = 0xc138fd0d + DM_MAX_TYPE_NAME = 0x10 + DM_NAME_LEN = 0x80 + DM_NOFLUSH_FLAG = 0x800 + DM_PERSISTENT_DEV_FLAG = 0x8 + DM_QUERY_INACTIVE_TABLE_FLAG = 0x1000 + DM_READONLY_FLAG = 0x1 + DM_REMOVE_ALL = 0xc138fd01 + DM_SECURE_DATA_FLAG = 0x8000 + DM_SKIP_BDGET_FLAG = 0x200 + DM_SKIP_LOCKFS_FLAG = 0x400 + DM_STATUS_TABLE_FLAG = 0x10 + DM_SUSPEND_FLAG = 0x2 + DM_TABLE_CLEAR = 0xc138fd0a + DM_TABLE_DEPS = 0xc138fd0b + DM_TABLE_LOAD = 0xc138fd09 + DM_TABLE_STATUS = 0xc138fd0c + DM_TARGET_MSG = 0xc138fd0e + DM_UEVENT_GENERATED_FLAG = 0x2000 + DM_UUID_FLAG = 0x4000 + DM_UUID_LEN = 0x81 + DM_VERSION = 0xc138fd00 + DM_VERSION_EXTRA = "-ioctl (2020-02-27)" + DM_VERSION_MAJOR = 0x4 + DM_VERSION_MINOR = 0x2a + DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -475,6 +582,7 @@ const ( ETH_P_MOBITEX = 0x15 ETH_P_MPLS_MC = 0x8848 ETH_P_MPLS_UC = 0x8847 + ETH_P_MRP = 0x88e3 ETH_P_MVRP = 0x88f5 ETH_P_NCSI = 0x88f8 ETH_P_NSH = 0x894f @@ -543,8 +651,8 @@ const ( FAN_DELETE = 0x200 FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 - FAN_DIR_MODIFY = 0x80000 FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_DFID = 0x3 FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2 FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 @@ -572,13 +680,17 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_DFID_NAME = 0xc00 + FAN_REPORT_DIR_FID = 0x400 FAN_REPORT_FID = 0x200 + FAN_REPORT_NAME = 0x800 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 FF0 = 0x0 + FIDEDUPERANGE = 0xc0189436 FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8 FSCRYPT_KEY_DESC_PREFIX = "fscrypt:" FSCRYPT_KEY_DESC_PREFIX_SIZE = 0x8 @@ -602,8 +714,9 @@ const ( FSCRYPT_POLICY_FLAGS_PAD_4 = 0x0 FSCRYPT_POLICY_FLAGS_PAD_8 = 0x1 FSCRYPT_POLICY_FLAGS_PAD_MASK = 0x3 - FSCRYPT_POLICY_FLAGS_VALID = 0xf + FSCRYPT_POLICY_FLAGS_VALID = 0x1f FSCRYPT_POLICY_FLAG_DIRECT_KEY = 0x4 + FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32 = 0x10 FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 = 0x8 FSCRYPT_POLICY_V1 = 0x0 FSCRYPT_POLICY_V2 = 0x2 @@ -632,7 +745,7 @@ const ( FS_POLICY_FLAGS_PAD_4 = 0x0 FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0xf + FS_POLICY_FLAGS_VALID = 0x1f FS_VERITY_FL = 0x100000 FS_VERITY_HASH_ALG_SHA256 = 0x1 FS_VERITY_HASH_ALG_SHA512 = 0x2 @@ -834,6 +947,7 @@ const ( IPPROTO_EGP = 0x8 IPPROTO_ENCAP = 0x62 IPPROTO_ESP = 0x32 + IPPROTO_ETHERNET = 0x8f IPPROTO_FRAGMENT = 0x2c IPPROTO_GRE = 0x2f IPPROTO_HOPOPTS = 0x0 @@ -847,6 +961,7 @@ const ( IPPROTO_L2TP = 0x73 IPPROTO_MH = 0x87 IPPROTO_MPLS = 0x89 + IPPROTO_MPTCP = 0x106 IPPROTO_MTP = 0x5c IPPROTO_NONE = 0x3b IPPROTO_PIM = 0x67 @@ -1016,6 +1131,7 @@ const ( KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 KEYCTL_CAPS0_PUBLIC_KEY = 0x8 KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NOTIFICATIONS = 0x4 KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 @@ -1053,6 +1169,7 @@ const ( KEYCTL_SUPPORTS_VERIFY = 0x8 KEYCTL_UNLINK = 0x9 KEYCTL_UPDATE = 0x2 + KEYCTL_WATCH_KEY = 0x20 KEY_REQKEY_DEFL_DEFAULT = 0x0 KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 KEY_REQKEY_DEFL_NO_CHANGE = -0x1 @@ -1096,6 +1213,8 @@ const ( LOOP_SET_FD = 0x4c00 LOOP_SET_STATUS = 0x4c02 LOOP_SET_STATUS64 = 0x4c04 + LOOP_SET_STATUS_CLEARABLE_FLAGS = 0x4 + LOOP_SET_STATUS_SETTABLE_FLAGS = 0xc LO_KEY_SIZE = 0x20 LO_NAME_SIZE = 0x40 MADV_COLD = 0x14 @@ -1393,6 +1512,92 @@ const ( PARITY_DEFAULT = 0x0 PARITY_NONE = 0x1 PARMRK = 0x8 + PERF_ATTR_SIZE_VER0 = 0x40 + PERF_ATTR_SIZE_VER1 = 0x48 + PERF_ATTR_SIZE_VER2 = 0x50 + PERF_ATTR_SIZE_VER3 = 0x60 + PERF_ATTR_SIZE_VER4 = 0x68 + PERF_ATTR_SIZE_VER5 = 0x70 + PERF_ATTR_SIZE_VER6 = 0x78 + PERF_AUX_FLAG_COLLISION = 0x8 + PERF_AUX_FLAG_OVERWRITE = 0x2 + PERF_AUX_FLAG_PARTIAL = 0x4 + PERF_AUX_FLAG_TRUNCATED = 0x1 + PERF_FLAG_FD_CLOEXEC = 0x8 + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 + PERF_MAX_CONTEXTS_PER_STACK = 0x8 + PERF_MAX_STACK_DEPTH = 0x7f + PERF_MEM_LOCK_LOCKED = 0x2 + PERF_MEM_LOCK_NA = 0x1 + PERF_MEM_LOCK_SHIFT = 0x18 + PERF_MEM_LVLNUM_ANY_CACHE = 0xb + PERF_MEM_LVLNUM_L1 = 0x1 + PERF_MEM_LVLNUM_L2 = 0x2 + PERF_MEM_LVLNUM_L3 = 0x3 + PERF_MEM_LVLNUM_L4 = 0x4 + PERF_MEM_LVLNUM_LFB = 0xc + PERF_MEM_LVLNUM_NA = 0xf + PERF_MEM_LVLNUM_PMEM = 0xe + PERF_MEM_LVLNUM_RAM = 0xd + PERF_MEM_LVLNUM_SHIFT = 0x21 + PERF_MEM_LVL_HIT = 0x2 + PERF_MEM_LVL_IO = 0x1000 + PERF_MEM_LVL_L1 = 0x8 + PERF_MEM_LVL_L2 = 0x20 + PERF_MEM_LVL_L3 = 0x40 + PERF_MEM_LVL_LFB = 0x10 + PERF_MEM_LVL_LOC_RAM = 0x80 + PERF_MEM_LVL_MISS = 0x4 + PERF_MEM_LVL_NA = 0x1 + PERF_MEM_LVL_REM_CCE1 = 0x400 + PERF_MEM_LVL_REM_CCE2 = 0x800 + PERF_MEM_LVL_REM_RAM1 = 0x100 + PERF_MEM_LVL_REM_RAM2 = 0x200 + PERF_MEM_LVL_SHIFT = 0x5 + PERF_MEM_LVL_UNC = 0x2000 + PERF_MEM_OP_EXEC = 0x10 + PERF_MEM_OP_LOAD = 0x2 + PERF_MEM_OP_NA = 0x1 + PERF_MEM_OP_PFETCH = 0x8 + PERF_MEM_OP_SHIFT = 0x0 + PERF_MEM_OP_STORE = 0x4 + PERF_MEM_REMOTE_REMOTE = 0x1 + PERF_MEM_REMOTE_SHIFT = 0x25 + PERF_MEM_SNOOPX_FWD = 0x1 + PERF_MEM_SNOOPX_SHIFT = 0x25 + PERF_MEM_SNOOP_HIT = 0x4 + PERF_MEM_SNOOP_HITM = 0x10 + PERF_MEM_SNOOP_MISS = 0x8 + PERF_MEM_SNOOP_NA = 0x1 + PERF_MEM_SNOOP_NONE = 0x2 + PERF_MEM_SNOOP_SHIFT = 0x13 + PERF_MEM_TLB_HIT = 0x2 + PERF_MEM_TLB_L1 = 0x8 + PERF_MEM_TLB_L2 = 0x10 + PERF_MEM_TLB_MISS = 0x4 + PERF_MEM_TLB_NA = 0x1 + PERF_MEM_TLB_OS = 0x40 + PERF_MEM_TLB_SHIFT = 0x1a + PERF_MEM_TLB_WK = 0x20 + PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER = 0x1 + PERF_RECORD_MISC_COMM_EXEC = 0x2000 + PERF_RECORD_MISC_CPUMODE_MASK = 0x7 + PERF_RECORD_MISC_CPUMODE_UNKNOWN = 0x0 + PERF_RECORD_MISC_EXACT_IP = 0x4000 + PERF_RECORD_MISC_EXT_RESERVED = 0x8000 + PERF_RECORD_MISC_FORK_EXEC = 0x2000 + PERF_RECORD_MISC_GUEST_KERNEL = 0x4 + PERF_RECORD_MISC_GUEST_USER = 0x5 + PERF_RECORD_MISC_HYPERVISOR = 0x3 + PERF_RECORD_MISC_KERNEL = 0x1 + PERF_RECORD_MISC_MMAP_DATA = 0x2000 + PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT = 0x1000 + PERF_RECORD_MISC_SWITCH_OUT = 0x2000 + PERF_RECORD_MISC_SWITCH_OUT_PREEMPT = 0x4000 + PERF_RECORD_MISC_USER = 0x2 + PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 PIPEFS_MAGIC = 0x50495045 PPC_CMM_MAGIC = 0xc7571590 PPPIOCGNPMODE = 0xc008744c @@ -1779,6 +1984,7 @@ const ( RTPROT_EIGRP = 0xc0 RTPROT_GATED = 0x8 RTPROT_ISIS = 0xbb + RTPROT_KEEPALIVED = 0x12 RTPROT_KERNEL = 0x2 RTPROT_MROUTED = 0x11 RTPROT_MRT = 0xa @@ -1929,6 +2135,7 @@ const ( SOL_ATM = 0x108 SOL_CAIF = 0x116 SOL_CAN_BASE = 0x64 + SOL_CAN_RAW = 0x65 SOL_DCCP = 0x10d SOL_DECNET = 0x105 SOL_ICMPV6 = 0x3a @@ -1968,6 +2175,7 @@ const ( SO_EE_ORIGIN_TXSTATUS = 0x4 SO_EE_ORIGIN_TXTIME = 0x6 SO_EE_ORIGIN_ZEROCOPY = 0x5 + SO_EE_RFC4884_FLAG_INVALID = 0x1 SO_GET_FILTER = 0x1a SO_NO_CHECK = 0xb SO_PEERNAME = 0x1c @@ -1992,8 +2200,10 @@ const ( STATX_ATTR_APPEND = 0x20 STATX_ATTR_AUTOMOUNT = 0x1000 STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_DAX = 0x2000 STATX_ATTR_ENCRYPTED = 0x800 STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_MOUNT_ROOT = 0x2000 STATX_ATTR_NODUMP = 0x40 STATX_ATTR_VERITY = 0x100000 STATX_BASIC_STATS = 0x7ff @@ -2002,6 +2212,7 @@ const ( STATX_CTIME = 0x80 STATX_GID = 0x10 STATX_INO = 0x100 + STATX_MNT_ID = 0x1000 STATX_MODE = 0x2 STATX_MTIME = 0x40 STATX_NLINK = 0x4 @@ -2238,6 +2449,23 @@ const ( WCONTINUED = 0x8 WDIOC_SETPRETIMEOUT = 0xc0045708 WDIOC_SETTIMEOUT = 0xc0045706 + WDIOF_ALARMONLY = 0x400 + WDIOF_CARDRESET = 0x20 + WDIOF_EXTERN1 = 0x4 + WDIOF_EXTERN2 = 0x8 + WDIOF_FANFAULT = 0x2 + WDIOF_KEEPALIVEPING = 0x8000 + WDIOF_MAGICCLOSE = 0x100 + WDIOF_OVERHEAT = 0x1 + WDIOF_POWEROVER = 0x40 + WDIOF_POWERUNDER = 0x10 + WDIOF_PRETIMEOUT = 0x200 + WDIOF_SETTIMEOUT = 0x80 + WDIOF_UNKNOWN = -0x1 + WDIOS_DISABLECARD = 0x1 + WDIOS_ENABLECARD = 0x2 + WDIOS_TEMPPANIC = 0x4 + WDIOS_UNKNOWN = -0x1 WEXITED = 0x4 WIN_ACKMEDIACHANGE = 0xdb WIN_CHECKPOWERMODE1 = 0xe5 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 8d207b041..dd282c08b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -71,6 +71,8 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x40049409 + FICLONERANGE = 0x4020940d FLUSHO = 0x1000 FP_XSTATE_MAGIC2 = 0x46505845 FS_IOC_ENABLE_VERITY = 0x40806685 @@ -78,6 +80,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40046602 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 F_GETLK = 0xc F_GETLK64 = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index c4bf9cb80..82fc93c7b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -71,6 +71,8 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x40049409 + FICLONERANGE = 0x4020940d FLUSHO = 0x1000 FP_XSTATE_MAGIC2 = 0x46505845 FS_IOC_ENABLE_VERITY = 0x40806685 @@ -78,6 +80,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 F_GETLK = 0x5 F_GETLK64 = 0x5 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 0cab0522e..fe7094f27 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -71,12 +71,15 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x40049409 + FICLONERANGE = 0x4020940d FLUSHO = 0x1000 FS_IOC_ENABLE_VERITY = 0x40806685 FS_IOC_GETFLAGS = 0x80046601 FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40046602 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 F_GETLK = 0xc F_GETLK64 = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 370d0a7f5..3b6cc5880 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -73,6 +73,8 @@ const ( EXTRA_MAGIC = 0x45585401 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x40049409 + FICLONERANGE = 0x4020940d FLUSHO = 0x1000 FPSIMD_MAGIC = 0x46508001 FS_IOC_ENABLE_VERITY = 0x40806685 @@ -80,6 +82,7 @@ const ( FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 F_GETLK = 0x5 F_GETLK64 = 0x5 @@ -191,6 +194,7 @@ const ( PPPIOCSRASYNCMAP = 0x40047454 PPPIOCSXASYNCMAP = 0x4020744f PPPIOCXFERUNIT = 0x744e + PROT_BTI = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff PTRACE_SYSEMU = 0x1f PTRACE_SYSEMU_SINGLESTEP = 0x20 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index fbf2f3174..ce3d9ae15 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -71,12 +71,15 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x80049409 + FICLONERANGE = 0x8020940d FLUSHO = 0x2000 FS_IOC_ENABLE_VERITY = 0x80806685 FS_IOC_GETFLAGS = 0x40046601 FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80046602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0x21 F_GETLK64 = 0x21 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 25e74b30a..7a85215ce 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -71,12 +71,15 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x80049409 + FICLONERANGE = 0x8020940d FLUSHO = 0x2000 FS_IOC_ENABLE_VERITY = 0x80806685 FS_IOC_GETFLAGS = 0x40086601 FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0xe F_GETLK64 = 0xe diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 4ecc0bca3..07d4cc1bd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -71,12 +71,15 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x80049409 + FICLONERANGE = 0x8020940d FLUSHO = 0x2000 FS_IOC_ENABLE_VERITY = 0x80806685 FS_IOC_GETFLAGS = 0x40086601 FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0xe F_GETLK64 = 0xe diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index dfb8f88a7..d4842ba1c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -71,12 +71,15 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x80049409 + FICLONERANGE = 0x8020940d FLUSHO = 0x2000 FS_IOC_ENABLE_VERITY = 0x80806685 FS_IOC_GETFLAGS = 0x40046601 FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80046602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0x21 F_GETLK64 = 0x21 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 72d8dad5b..941e20dac 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -71,12 +71,15 @@ const ( EXTPROC = 0x10000000 FF1 = 0x4000 FFDLY = 0x4000 + FICLONE = 0x80049409 + FICLONERANGE = 0x8020940d FLUSHO = 0x800000 FS_IOC_ENABLE_VERITY = 0x80806685 FS_IOC_GETFLAGS = 0x40086601 FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0x5 F_GETLK64 = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index ca0e7b526..63d3bc566 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -71,12 +71,15 @@ const ( EXTPROC = 0x10000000 FF1 = 0x4000 FFDLY = 0x4000 + FICLONE = 0x80049409 + FICLONERANGE = 0x8020940d FLUSHO = 0x800000 FS_IOC_ENABLE_VERITY = 0x80806685 FS_IOC_GETFLAGS = 0x40086601 FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0x5 F_GETLK64 = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 147511a97..490bee1ab 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -71,12 +71,15 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x40049409 + FICLONERANGE = 0x4020940d FLUSHO = 0x1000 FS_IOC_ENABLE_VERITY = 0x40806685 FS_IOC_GETFLAGS = 0x80086601 FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 F_GETLK = 0x5 F_GETLK64 = 0x5 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 517349daf..467b8218e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -71,12 +71,15 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x40049409 + FICLONERANGE = 0x4020940d FLUSHO = 0x1000 FS_IOC_ENABLE_VERITY = 0x40806685 FS_IOC_GETFLAGS = 0x80086601 FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 F_GETLK = 0x5 F_GETLK64 = 0x5 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 094822465..79fbafbcf 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -75,12 +75,15 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x80049409 + FICLONERANGE = 0x8020940d FLUSHO = 0x1000 FS_IOC_ENABLE_VERITY = 0x80806685 FS_IOC_GETFLAGS = 0x40086601 FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80086602 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 F_GETLK = 0x7 F_GETLK64 = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go index 96b9b8ab3..20f3a5799 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go @@ -158,6 +158,12 @@ const ( CLONE_SIGHAND = 0x800 CLONE_VFORK = 0x4000 CLONE_VM = 0x100 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x10000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go index ed522a84e..90b8fcd29 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go @@ -158,6 +158,12 @@ const ( CLONE_SIGHAND = 0x800 CLONE_VFORK = 0x4000 CLONE_VM = 0x100 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x10000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go index c8d36fe99..c5c03993b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go @@ -150,6 +150,12 @@ const ( BRKINT = 0x2 CFLUSH = 0xf CLOCAL = 0x8000 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x10000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go index f1c146a74..14dd3c1d1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go @@ -158,6 +158,12 @@ const ( CLONE_SIGHAND = 0x800 CLONE_VFORK = 0x4000 CLONE_VM = 0x100 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x10000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index 5402bd55c..c865a10df 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -146,6 +146,13 @@ const ( BRKINT = 0x2 CFLUSH = 0xf CLOCAL = 0x8000 + CPUSTATES = 0x6 + CP_IDLE = 0x5 + CP_INTR = 0x4 + CP_NICE = 0x1 + CP_SPIN = 0x3 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x10000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index ffaf2d2f9..9db6b2fb6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -153,6 +153,13 @@ const ( CLOCK_REALTIME = 0x0 CLOCK_THREAD_CPUTIME_ID = 0x4 CLOCK_UPTIME = 0x5 + CPUSTATES = 0x6 + CP_IDLE = 0x5 + CP_INTR = 0x4 + CP_NICE = 0x1 + CP_SPIN = 0x3 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x10000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index 7aa796a64..7072526a6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -146,6 +146,13 @@ const ( BRKINT = 0x2 CFLUSH = 0xf CLOCAL = 0x8000 + CPUSTATES = 0x6 + CP_IDLE = 0x5 + CP_INTR = 0x4 + CP_NICE = 0x1 + CP_SPIN = 0x3 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x10000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go index 1792d3f13..ac5efbe5a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go @@ -156,6 +156,13 @@ const ( CLOCK_REALTIME = 0x0 CLOCK_THREAD_CPUTIME_ID = 0x4 CLOCK_UPTIME = 0x5 + CPUSTATES = 0x6 + CP_IDLE = 0x5 + CP_INTR = 0x4 + CP_NICE = 0x1 + CP_SPIN = 0x3 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x10000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go new file mode 100644 index 000000000..a74639a46 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go @@ -0,0 +1,1862 @@ +// mkerrors.sh -m64 +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build mips64,openbsd + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_BLUETOOTH = 0x20 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_ENCAP = 0x1c + AF_HYLINK = 0xf + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x18 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_KEY = 0x1e + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x24 + AF_MPLS = 0x21 + AF_NATM = 0x1b + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SIP = 0x1d + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + ALTWERASE = 0x200 + ARPHRD_ETHER = 0x1 + ARPHRD_FRELAY = 0xf + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRFILT = 0x4004427c + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc010427b + BIOCGETIF = 0x4020426b + BIOCGFILDROP = 0x40044278 + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044273 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x20004276 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDIRFILT = 0x8004427d + BIOCSDLT = 0x8004427a + BIOCSETF = 0x80104267 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x80104277 + BIOCSFILDROP = 0x80044279 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044272 + BIOCSRTIMEOUT = 0x8010426d + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIRECTION_IN = 0x1 + BPF_DIRECTION_OUT = 0x2 + BPF_DIV = 0x30 + BPF_FILDROP_CAPTURE = 0x1 + BPF_FILDROP_DROP = 0x2 + BPF_FILDROP_PASS = 0x0 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x200000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x6 + CLOCK_MONOTONIC = 0x3 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x4 + CLOCK_UPTIME = 0x5 + CPUSTATES = 0x6 + CP_IDLE = 0x5 + CP_INTR = 0x4 + CP_NICE = 0x1 + CP_SPIN = 0x3 + CP_SYS = 0x2 + CP_USER = 0x0 + CREAD = 0x800 + CRTSCTS = 0x10000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0xff + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DIOCADDQUEUE = 0xc110445d + DIOCADDRULE = 0xcd604404 + DIOCADDSTATE = 0xc1084425 + DIOCCHANGERULE = 0xcd60441a + DIOCCLRIFFLAG = 0xc028445a + DIOCCLRSRCNODES = 0x20004455 + DIOCCLRSTATES = 0xc0e04412 + DIOCCLRSTATUS = 0xc0284416 + DIOCGETLIMIT = 0xc0084427 + DIOCGETQSTATS = 0xc1204460 + DIOCGETQUEUE = 0xc110445f + DIOCGETQUEUES = 0xc110445e + DIOCGETRULE = 0xcd604407 + DIOCGETRULES = 0xcd604406 + DIOCGETRULESET = 0xc444443b + DIOCGETRULESETS = 0xc444443a + DIOCGETSRCNODES = 0xc0104454 + DIOCGETSTATE = 0xc1084413 + DIOCGETSTATES = 0xc0104419 + DIOCGETSTATUS = 0xc1e84415 + DIOCGETSYNFLWATS = 0xc0084463 + DIOCGETTIMEOUT = 0xc008441e + DIOCIGETIFACES = 0xc0284457 + DIOCKILLSRCNODES = 0xc080445b + DIOCKILLSTATES = 0xc0e04429 + DIOCNATLOOK = 0xc0504417 + DIOCOSFPADD = 0xc088444f + DIOCOSFPFLUSH = 0x2000444e + DIOCOSFPGET = 0xc0884450 + DIOCRADDADDRS = 0xc4504443 + DIOCRADDTABLES = 0xc450443d + DIOCRCLRADDRS = 0xc4504442 + DIOCRCLRASTATS = 0xc4504448 + DIOCRCLRTABLES = 0xc450443c + DIOCRCLRTSTATS = 0xc4504441 + DIOCRDELADDRS = 0xc4504444 + DIOCRDELTABLES = 0xc450443e + DIOCRGETADDRS = 0xc4504446 + DIOCRGETASTATS = 0xc4504447 + DIOCRGETTABLES = 0xc450443f + DIOCRGETTSTATS = 0xc4504440 + DIOCRINADEFINE = 0xc450444d + DIOCRSETADDRS = 0xc4504445 + DIOCRSETTFLAGS = 0xc450444a + DIOCRTSTADDRS = 0xc4504449 + DIOCSETDEBUG = 0xc0044418 + DIOCSETHOSTID = 0xc0044456 + DIOCSETIFFLAG = 0xc0284459 + DIOCSETLIMIT = 0xc0084428 + DIOCSETREASS = 0xc004445c + DIOCSETSTATUSIF = 0xc0284414 + DIOCSETSYNCOOKIES = 0xc0014462 + DIOCSETSYNFLWATS = 0xc0084461 + DIOCSETTIMEOUT = 0xc008441d + DIOCSTART = 0x20004401 + DIOCSTOP = 0x20004402 + DIOCXBEGIN = 0xc0104451 + DIOCXCOMMIT = 0xc0104452 + DIOCXROLLBACK = 0xc0104453 + DLT_ARCNET = 0x7 + DLT_ATM_RFC1483 = 0xb + DLT_AX25 = 0x3 + DLT_CHAOS = 0x5 + DLT_C_HDLC = 0x68 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0xd + DLT_FDDI = 0xa + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_LOOP = 0xc + DLT_MPLS = 0xdb + DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_SERIAL = 0x32 + DLT_PRONET = 0x4 + DLT_RAW = 0xe + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_USBPCAP = 0xf9 + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EMT_TAGOVF = 0x1 + EMUL_ENABLED = 0x1 + EMUL_NATIVE = 0x2 + ENDRUNDISC = 0x9 + ETHERMIN = 0x2e + ETHERMTU = 0x5dc + ETHERTYPE_8023 = 0x4 + ETHERTYPE_AARP = 0x80f3 + ETHERTYPE_ACCTON = 0x8390 + ETHERTYPE_AEONIC = 0x8036 + ETHERTYPE_ALPHA = 0x814a + ETHERTYPE_AMBER = 0x6008 + ETHERTYPE_AMOEBA = 0x8145 + ETHERTYPE_AOE = 0x88a2 + ETHERTYPE_APOLLO = 0x80f7 + ETHERTYPE_APOLLODOMAIN = 0x8019 + ETHERTYPE_APPLETALK = 0x809b + ETHERTYPE_APPLITEK = 0x80c7 + ETHERTYPE_ARGONAUT = 0x803a + ETHERTYPE_ARP = 0x806 + ETHERTYPE_AT = 0x809b + ETHERTYPE_ATALK = 0x809b + ETHERTYPE_ATOMIC = 0x86df + ETHERTYPE_ATT = 0x8069 + ETHERTYPE_ATTSTANFORD = 0x8008 + ETHERTYPE_AUTOPHON = 0x806a + ETHERTYPE_AXIS = 0x8856 + ETHERTYPE_BCLOOP = 0x9003 + ETHERTYPE_BOFL = 0x8102 + ETHERTYPE_CABLETRON = 0x7034 + ETHERTYPE_CHAOS = 0x804 + ETHERTYPE_COMDESIGN = 0x806c + ETHERTYPE_COMPUGRAPHIC = 0x806d + ETHERTYPE_COUNTERPOINT = 0x8062 + ETHERTYPE_CRONUS = 0x8004 + ETHERTYPE_CRONUSVLN = 0x8003 + ETHERTYPE_DCA = 0x1234 + ETHERTYPE_DDE = 0x807b + ETHERTYPE_DEBNI = 0xaaaa + ETHERTYPE_DECAM = 0x8048 + ETHERTYPE_DECCUST = 0x6006 + ETHERTYPE_DECDIAG = 0x6005 + ETHERTYPE_DECDNS = 0x803c + ETHERTYPE_DECDTS = 0x803e + ETHERTYPE_DECEXPER = 0x6000 + ETHERTYPE_DECLAST = 0x8041 + ETHERTYPE_DECLTM = 0x803f + ETHERTYPE_DECMUMPS = 0x6009 + ETHERTYPE_DECNETBIOS = 0x8040 + ETHERTYPE_DELTACON = 0x86de + ETHERTYPE_DIDDLE = 0x4321 + ETHERTYPE_DLOG1 = 0x660 + ETHERTYPE_DLOG2 = 0x661 + ETHERTYPE_DN = 0x6003 + ETHERTYPE_DOGFIGHT = 0x1989 + ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_ECMA = 0x803 + ETHERTYPE_ENCRYPT = 0x803d + ETHERTYPE_ES = 0x805d + ETHERTYPE_EXCELAN = 0x8010 + ETHERTYPE_EXPERDATA = 0x8049 + ETHERTYPE_FLIP = 0x8146 + ETHERTYPE_FLOWCONTROL = 0x8808 + ETHERTYPE_FRARP = 0x808 + ETHERTYPE_GENDYN = 0x8068 + ETHERTYPE_HAYES = 0x8130 + ETHERTYPE_HIPPI_FP = 0x8180 + ETHERTYPE_HITACHI = 0x8820 + ETHERTYPE_HP = 0x8005 + ETHERTYPE_IEEEPUP = 0xa00 + ETHERTYPE_IEEEPUPAT = 0xa01 + ETHERTYPE_IMLBL = 0x4c42 + ETHERTYPE_IMLBLDIAG = 0x424c + ETHERTYPE_IP = 0x800 + ETHERTYPE_IPAS = 0x876c + ETHERTYPE_IPV6 = 0x86dd + ETHERTYPE_IPX = 0x8137 + ETHERTYPE_IPXNEW = 0x8037 + ETHERTYPE_KALPANA = 0x8582 + ETHERTYPE_LANBRIDGE = 0x8038 + ETHERTYPE_LANPROBE = 0x8888 + ETHERTYPE_LAT = 0x6004 + ETHERTYPE_LBACK = 0x9000 + ETHERTYPE_LITTLE = 0x8060 + ETHERTYPE_LLDP = 0x88cc + ETHERTYPE_LOGICRAFT = 0x8148 + ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MACSEC = 0x88e5 + ETHERTYPE_MATRA = 0x807a + ETHERTYPE_MAX = 0xffff + ETHERTYPE_MERIT = 0x807c + ETHERTYPE_MICP = 0x873a + ETHERTYPE_MOPDL = 0x6001 + ETHERTYPE_MOPRC = 0x6002 + ETHERTYPE_MOTOROLA = 0x818d + ETHERTYPE_MPLS = 0x8847 + ETHERTYPE_MPLS_MCAST = 0x8848 + ETHERTYPE_MUMPS = 0x813f + ETHERTYPE_NBPCC = 0x3c04 + ETHERTYPE_NBPCLAIM = 0x3c09 + ETHERTYPE_NBPCLREQ = 0x3c05 + ETHERTYPE_NBPCLRSP = 0x3c06 + ETHERTYPE_NBPCREQ = 0x3c02 + ETHERTYPE_NBPCRSP = 0x3c03 + ETHERTYPE_NBPDG = 0x3c07 + ETHERTYPE_NBPDGB = 0x3c08 + ETHERTYPE_NBPDLTE = 0x3c0a + ETHERTYPE_NBPRAR = 0x3c0c + ETHERTYPE_NBPRAS = 0x3c0b + ETHERTYPE_NBPRST = 0x3c0d + ETHERTYPE_NBPSCD = 0x3c01 + ETHERTYPE_NBPVCD = 0x3c00 + ETHERTYPE_NBS = 0x802 + ETHERTYPE_NCD = 0x8149 + ETHERTYPE_NESTAR = 0x8006 + ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NOVELL = 0x8138 + ETHERTYPE_NS = 0x600 + ETHERTYPE_NSAT = 0x601 + ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NTRAILER = 0x10 + ETHERTYPE_OS9 = 0x7007 + ETHERTYPE_OS9NET = 0x7009 + ETHERTYPE_PACER = 0x80c6 + ETHERTYPE_PAE = 0x888e + ETHERTYPE_PBB = 0x88e7 + ETHERTYPE_PCS = 0x4242 + ETHERTYPE_PLANNING = 0x8044 + ETHERTYPE_PPP = 0x880b + ETHERTYPE_PPPOE = 0x8864 + ETHERTYPE_PPPOEDISC = 0x8863 + ETHERTYPE_PRIMENTS = 0x7031 + ETHERTYPE_PUP = 0x200 + ETHERTYPE_PUPAT = 0x200 + ETHERTYPE_QINQ = 0x88a8 + ETHERTYPE_RACAL = 0x7030 + ETHERTYPE_RATIONAL = 0x8150 + ETHERTYPE_RAWFR = 0x6559 + ETHERTYPE_RCL = 0x1995 + ETHERTYPE_RDP = 0x8739 + ETHERTYPE_RETIX = 0x80f2 + ETHERTYPE_REVARP = 0x8035 + ETHERTYPE_SCA = 0x6007 + ETHERTYPE_SECTRA = 0x86db + ETHERTYPE_SECUREDATA = 0x876d + ETHERTYPE_SGITW = 0x817e + ETHERTYPE_SG_BOUNCE = 0x8016 + ETHERTYPE_SG_DIAG = 0x8013 + ETHERTYPE_SG_NETGAMES = 0x8014 + ETHERTYPE_SG_RESV = 0x8015 + ETHERTYPE_SIMNET = 0x5208 + ETHERTYPE_SLOW = 0x8809 + ETHERTYPE_SNA = 0x80d5 + ETHERTYPE_SNMP = 0x814c + ETHERTYPE_SONIX = 0xfaf5 + ETHERTYPE_SPIDER = 0x809f + ETHERTYPE_SPRITE = 0x500 + ETHERTYPE_STP = 0x8181 + ETHERTYPE_TALARIS = 0x812b + ETHERTYPE_TALARISMC = 0x852b + ETHERTYPE_TCPCOMP = 0x876b + ETHERTYPE_TCPSM = 0x9002 + ETHERTYPE_TEC = 0x814f + ETHERTYPE_TIGAN = 0x802f + ETHERTYPE_TRAIL = 0x1000 + ETHERTYPE_TRANSETHER = 0x6558 + ETHERTYPE_TYMSHARE = 0x802e + ETHERTYPE_UBBST = 0x7005 + ETHERTYPE_UBDEBUG = 0x900 + ETHERTYPE_UBDIAGLOOP = 0x7002 + ETHERTYPE_UBDL = 0x7000 + ETHERTYPE_UBNIU = 0x7001 + ETHERTYPE_UBNMC = 0x7003 + ETHERTYPE_VALID = 0x1600 + ETHERTYPE_VARIAN = 0x80dd + ETHERTYPE_VAXELN = 0x803b + ETHERTYPE_VEECO = 0x8067 + ETHERTYPE_VEXP = 0x805b + ETHERTYPE_VGLAB = 0x8131 + ETHERTYPE_VINES = 0xbad + ETHERTYPE_VINESECHO = 0xbaf + ETHERTYPE_VINESLOOP = 0xbae + ETHERTYPE_VITAL = 0xff00 + ETHERTYPE_VLAN = 0x8100 + ETHERTYPE_VLTLMAN = 0x8080 + ETHERTYPE_VPROD = 0x805c + ETHERTYPE_VURESERVED = 0x8147 + ETHERTYPE_WATERLOO = 0x8130 + ETHERTYPE_WELLFLEET = 0x8103 + ETHERTYPE_X25 = 0x805 + ETHERTYPE_X75 = 0x801 + ETHERTYPE_XNSSM = 0x9001 + ETHERTYPE_XTP = 0x817d + ETHER_ADDR_LEN = 0x6 + ETHER_ALIGN = 0x2 + ETHER_CRC_LEN = 0x4 + ETHER_CRC_POLY_BE = 0x4c11db6 + ETHER_CRC_POLY_LE = 0xedb88320 + ETHER_HDR_LEN = 0xe + ETHER_MAX_DIX_LEN = 0x600 + ETHER_MAX_HARDMTU_LEN = 0xff9b + ETHER_MAX_LEN = 0x5ee + ETHER_MIN_LEN = 0x40 + ETHER_TYPE_LEN = 0x2 + ETHER_VLAN_ENCAP_LEN = 0x4 + EVFILT_AIO = -0x3 + EVFILT_DEVICE = -0x8 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0x8 + EVFILT_TIMER = -0x7 + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EVL_ENCAPLEN = 0x4 + EVL_PRIO_BITS = 0xd + EVL_PRIO_MAX = 0x7 + EVL_VLID_MASK = 0xfff + EVL_VLID_MAX = 0xffe + EVL_VLID_MIN = 0x1 + EVL_VLID_NULL = 0x0 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0xa + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETOWN = 0x5 + F_ISATTY = 0xb + F_OK = 0x0 + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x8e52 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x20 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BLUETOOTH = 0xf8 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf7 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DUMMY = 0xf1 + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ECONET = 0xce + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf3 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LINEGROUP = 0xd2 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MBIM = 0xfa + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf5 + IFT_PFLOW = 0xf9 + IFT_PFSYNC = 0xf6 + IFT_PLC = 0xae + IFT_PON155 = 0xcf + IFT_PON622 = 0xd0 + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPATM = 0xc5 + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf2 + IFT_Q2931 = 0xc9 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SIPSIG = 0xcc + IFT_SIPTG = 0xcb + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TELINK = 0xc8 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VIRTUALTG = 0xca + IFT_VOICEDID = 0xd5 + IFT_VOICEEM = 0x64 + IFT_VOICEEMFGD = 0xd3 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFGDEANA = 0xd4 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERCABLE = 0xc6 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_RFC3021_HOST = 0x1 + IN_RFC3021_NET = 0xfffffffe + IN_RFC3021_NSHIFT = 0x1f + IPPROTO_AH = 0x33 + IPPROTO_CARP = 0x70 + IPPROTO_DIVERT = 0x102 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPIP = 0x4 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x103 + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_NONE = 0x3b + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_AUTH_LEVEL = 0x35 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_ESP_NETWORK_LEVEL = 0x37 + IPV6_ESP_TRANS_LEVEL = 0x36 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff + IPV6_FRAGTTL = 0x78 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPCOMP_LEVEL = 0x3c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXPACKET = 0xffff + IPV6_MINHOPCOUNT = 0x41 + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_OPTIONS = 0x1 + IPV6_PATHMTU = 0x2c + IPV6_PIPEX = 0x3f + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVDSTPORT = 0x40 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTABLE = 0x1021 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_AUTH_LEVEL = 0x14 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_ESP_NETWORK_LEVEL = 0x16 + IP_ESP_TRANS_LEVEL = 0x15 + IP_HDRINCL = 0x2 + IP_IPCOMP_LEVEL = 0x1d + IP_IPDEFTTL = 0x25 + IP_IPSECFLOWINFO = 0x24 + IP_IPSEC_LOCAL_AUTH = 0x1b + IP_IPSEC_LOCAL_CRED = 0x19 + IP_IPSEC_LOCAL_ID = 0x17 + IP_IPSEC_REMOTE_AUTH = 0x1c + IP_IPSEC_REMOTE_CRED = 0x1a + IP_IPSEC_REMOTE_ID = 0x18 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0xfff + IP_MF = 0x2000 + IP_MINTTL = 0x20 + IP_MIN_MEMBERSHIPS = 0xf + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x1 + IP_PIPEX = 0x22 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVDSTPORT = 0x21 + IP_RECVIF = 0x1e + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVRTABLE = 0x23 + IP_RECVTTL = 0x1f + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RTABLE = 0x1021 + IP_SENDSRCADDR = 0x7 + IP_TOS = 0x3 + IP_TTL = 0x4 + ISIG = 0x80 + ISTRIP = 0x20 + IUCLC = 0x1000 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LCNT_OVERLOAD_FLUSH = 0x6 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x6 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SPACEAVAIL = 0x5 + MADV_WILLNEED = 0x3 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_CONCEAL = 0x8000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_FLAGMASK = 0xfff7 + MAP_HASSEMAPHORE = 0x0 + MAP_INHERIT = 0x0 + MAP_INHERIT_COPY = 0x1 + MAP_INHERIT_NONE = 0x2 + MAP_INHERIT_SHARE = 0x0 + MAP_INHERIT_ZERO = 0x3 + MAP_NOEXTEND = 0x0 + MAP_NORESERVE = 0x0 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x0 + MAP_SHARED = 0x1 + MAP_STACK = 0x4000 + MAP_TRYFIXED = 0x0 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MNT_ASYNC = 0x40 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_DOOMED = 0x8000000 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXRDONLY = 0x80 + MNT_FORCE = 0x80000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_NOATIME = 0x8000 + MNT_NODEV = 0x10 + MNT_NOEXEC = 0x4 + MNT_NOPERM = 0x20 + MNT_NOSUID = 0x8 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SOFTDEP = 0x4000000 + MNT_STALLED = 0x100000 + MNT_SWAPPABLE = 0x200000 + MNT_SYNCHRONOUS = 0x2 + MNT_UPDATE = 0x10000 + MNT_VISFLAGMASK = 0x400ffff + MNT_WAIT = 0x1 + MNT_WANTRDWR = 0x2000000 + MNT_WXALLOWED = 0x800 + MSG_BCAST = 0x100 + MSG_CMSG_CLOEXEC = 0x800 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOR = 0x8 + MSG_MCAST = 0x200 + MSG_NOSIGNAL = 0x400 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x4 + MS_SYNC = 0x2 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFNAMES = 0x6 + NET_RT_MAXID = 0x7 + NET_RT_STATS = 0x4 + NET_RT_TABLE = 0x5 + NFDBITS = 0x20 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ATTRIB = 0x8 + NOTE_CHANGE = 0x1 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EOF = 0x2 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRUNCATE = 0x80 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OLCUC = 0x20 + ONLCR = 0x2 + ONLRET = 0x80 + ONOCR = 0x40 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x10000 + O_CREAT = 0x200 + O_DIRECTORY = 0x20000 + O_DSYNC = 0x80 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x80 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PF_FLUSH = 0x1 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BFD = 0xb + RTAX_BRD = 0x7 + RTAX_DNS = 0xc + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_LABEL = 0xa + RTAX_MAX = 0xf + RTAX_NETMASK = 0x2 + RTAX_SEARCH = 0xe + RTAX_SRC = 0x8 + RTAX_SRCMASK = 0x9 + RTAX_STATIC = 0xd + RTA_AUTHOR = 0x40 + RTA_BFD = 0x800 + RTA_BRD = 0x80 + RTA_DNS = 0x1000 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_LABEL = 0x400 + RTA_NETMASK = 0x4 + RTA_SEARCH = 0x4000 + RTA_SRC = 0x100 + RTA_SRCMASK = 0x200 + RTA_STATIC = 0x2000 + RTF_ANNOUNCE = 0x4000 + RTF_BFD = 0x1000000 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CACHED = 0x20000 + RTF_CLONED = 0x10000 + RTF_CLONING = 0x100 + RTF_CONNECTED = 0x800000 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FMASK = 0x110fc08 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MPATH = 0x40000 + RTF_MPLS = 0x100000 + RTF_MULTICAST = 0x200 + RTF_PERMANENT_ARP = 0x2000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x2000 + RTF_REJECT = 0x8 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_USETRAILERS = 0x8000 + RTM_80211INFO = 0x15 + RTM_ADD = 0x1 + RTM_BFD = 0x12 + RTM_CHANGE = 0x3 + RTM_CHGADDRATTR = 0x14 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DESYNC = 0x10 + RTM_GET = 0x4 + RTM_IFANNOUNCE = 0xf + RTM_IFINFO = 0xe + RTM_INVALIDATE = 0x11 + RTM_LOSING = 0x5 + RTM_MAXSIZE = 0x800 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_PROPOSAL = 0x13 + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RT_TABLEID_BITS = 0x8 + RT_TABLEID_MASK = 0xff + RT_TABLEID_MAX = 0xff + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x4 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80286987 + SIOCATMARK = 0x40047307 + SIOCBRDGADD = 0x8060693c + SIOCBRDGADDL = 0x80606949 + SIOCBRDGADDS = 0x80606941 + SIOCBRDGARL = 0x808c694d + SIOCBRDGDADDR = 0x81286947 + SIOCBRDGDEL = 0x8060693d + SIOCBRDGDELS = 0x80606942 + SIOCBRDGFLUSH = 0x80606948 + SIOCBRDGFRL = 0x808c694e + SIOCBRDGGCACHE = 0xc0186941 + SIOCBRDGGFD = 0xc0186952 + SIOCBRDGGHT = 0xc0186951 + SIOCBRDGGIFFLGS = 0xc060693e + SIOCBRDGGMA = 0xc0186953 + SIOCBRDGGPARAM = 0xc0406958 + SIOCBRDGGPRI = 0xc0186950 + SIOCBRDGGRL = 0xc030694f + SIOCBRDGGTO = 0xc0186946 + SIOCBRDGIFS = 0xc0606942 + SIOCBRDGRTS = 0xc0206943 + SIOCBRDGSADDR = 0xc1286944 + SIOCBRDGSCACHE = 0x80186940 + SIOCBRDGSFD = 0x80186952 + SIOCBRDGSHT = 0x80186951 + SIOCBRDGSIFCOST = 0x80606955 + SIOCBRDGSIFFLGS = 0x8060693f + SIOCBRDGSIFPRIO = 0x80606954 + SIOCBRDGSIFPROT = 0x8060694a + SIOCBRDGSMA = 0x80186953 + SIOCBRDGSPRI = 0x80186950 + SIOCBRDGSPROTO = 0x8018695a + SIOCBRDGSTO = 0x80186945 + SIOCBRDGSTXHC = 0x80186959 + SIOCDELLABEL = 0x80206997 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80286989 + SIOCDIFPARENT = 0x802069b4 + SIOCDIFPHYADDR = 0x80206949 + SIOCDPWE3NEIGHBOR = 0x802069de + SIOCDVNETID = 0x802069af + SIOCGETKALIVE = 0xc01869a4 + SIOCGETLABEL = 0x8020699a + SIOCGETMPWCFG = 0xc02069ae + SIOCGETPFLOW = 0xc02069fe + SIOCGETPFSYNC = 0xc02069f8 + SIOCGETSGCNT = 0xc0207534 + SIOCGETVIFCNT = 0xc0287533 + SIOCGETVLAN = 0xc0206990 + SIOCGIFADDR = 0xc0206921 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCONF = 0xc0106924 + SIOCGIFDATA = 0xc020691b + SIOCGIFDESCR = 0xc0206981 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGATTR = 0xc028698b + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGLIST = 0xc028698d + SIOCGIFGMEMB = 0xc028698a + SIOCGIFGROUP = 0xc0286988 + SIOCGIFHARDMTU = 0xc02069a5 + SIOCGIFLLPRIO = 0xc02069b6 + SIOCGIFMEDIA = 0xc0406938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc020697e + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPAIR = 0xc02069b1 + SIOCGIFPARENT = 0xc02069b3 + SIOCGIFPRIORITY = 0xc020699c + SIOCGIFRDOMAIN = 0xc02069a0 + SIOCGIFRTLABEL = 0xc0206983 + SIOCGIFRXR = 0x802069aa + SIOCGIFSFFPAGE = 0xc1126939 + SIOCGIFXFLAGS = 0xc020699e + SIOCGLIFPHYADDR = 0xc218694b + SIOCGLIFPHYDF = 0xc02069c2 + SIOCGLIFPHYECN = 0xc02069c8 + SIOCGLIFPHYRTABLE = 0xc02069a2 + SIOCGLIFPHYTTL = 0xc02069a9 + SIOCGPGRP = 0x40047309 + SIOCGPWE3 = 0xc0206998 + SIOCGPWE3CTRLWORD = 0xc02069dc + SIOCGPWE3FAT = 0xc02069dd + SIOCGPWE3NEIGHBOR = 0xc21869de + SIOCGRXHPRIO = 0xc02069db + SIOCGSPPPPARAMS = 0xc0206994 + SIOCGTXHPRIO = 0xc02069c6 + SIOCGUMBINFO = 0xc02069be + SIOCGUMBPARAM = 0xc02069c0 + SIOCGVH = 0xc02069f6 + SIOCGVNETFLOWID = 0xc02069c4 + SIOCGVNETID = 0xc02069a7 + SIOCIFAFATTACH = 0x801169ab + SIOCIFAFDETACH = 0x801169ac + SIOCIFCREATE = 0x8020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106978 + SIOCSETKALIVE = 0x801869a3 + SIOCSETLABEL = 0x80206999 + SIOCSETMPWCFG = 0x802069ad + SIOCSETPFLOW = 0x802069fd + SIOCSETPFSYNC = 0x802069f7 + SIOCSETVLAN = 0x8020698f + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFDESCR = 0x80206980 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGATTR = 0x8028698c + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020691f + SIOCSIFLLPRIO = 0x802069b5 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x8020697f + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPAIR = 0x802069b0 + SIOCSIFPARENT = 0x802069b2 + SIOCSIFPRIORITY = 0x8020699b + SIOCSIFRDOMAIN = 0x8020699f + SIOCSIFRTLABEL = 0x80206982 + SIOCSIFXFLAGS = 0x8020699d + SIOCSLIFPHYADDR = 0x8218694a + SIOCSLIFPHYDF = 0x802069c1 + SIOCSLIFPHYECN = 0x802069c7 + SIOCSLIFPHYRTABLE = 0x802069a1 + SIOCSLIFPHYTTL = 0x802069a8 + SIOCSPGRP = 0x80047308 + SIOCSPWE3CTRLWORD = 0x802069dc + SIOCSPWE3FAT = 0x802069dd + SIOCSPWE3NEIGHBOR = 0x821869de + SIOCSRXHPRIO = 0x802069db + SIOCSSPPPPARAMS = 0x80206993 + SIOCSTXHPRIO = 0x802069c5 + SIOCSUMBPARAM = 0x802069bf + SIOCSVH = 0xc02069f5 + SIOCSVNETFLOWID = 0x802069c3 + SIOCSVNETID = 0x802069a6 + SIOCSWGDPID = 0xc018695b + SIOCSWGMAXFLOW = 0xc0186960 + SIOCSWGMAXGROUP = 0xc018695d + SIOCSWSDPID = 0x8018695c + SIOCSWSPORTNO = 0xc060695f + SOCK_CLOEXEC = 0x8000 + SOCK_DGRAM = 0x2 + SOCK_DNS = 0x1000 + SOCK_NONBLOCK = 0x4000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BINDANY = 0x1000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DOMAIN = 0x1024 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_NETPROC = 0x1020 + SO_OOBINLINE = 0x100 + SO_PEERCRED = 0x1022 + SO_PROTOCOL = 0x1025 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RTABLE = 0x1021 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_SPLICE = 0x1023 + SO_TIMESTAMP = 0x800 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_ZEROIZE = 0x2000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCP_MAXBURST = 0x4 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x3 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x4 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOPUSH = 0x10 + TCP_SACKHOLE_LIMIT = 0x80 + TCP_SACK_ENABLE = 0x8 + TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCHKVERAUTH = 0x2000741e + TIOCCLRVERAUTH = 0x2000741d + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLAG_CLOCAL = 0x2 + TIOCFLAG_CRTSCTS = 0x4 + TIOCFLAG_MDMBUF = 0x8 + TIOCFLAG_PPS = 0x10 + TIOCFLAG_SOFTCAR = 0x1 + TIOCFLUSH = 0x80047410 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGFLAGS = 0x4004745d + TIOCGPGRP = 0x40047477 + TIOCGSID = 0x40047463 + TIOCGTSTAMP = 0x4010745b + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGET = 0x4004746a + TIOCMODG = 0x4004746a + TIOCMODS = 0x8004746d + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSETVERAUTH = 0x8004741c + TIOCSFLAGS = 0x8004745c + TIOCSIG = 0x8004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTOP = 0x2000746f + TIOCSTSTAMP = 0x8008745a + TIOCSWINSZ = 0x80087467 + TIOCUCNTL = 0x80047466 + TIOCUCNTL_CBRK = 0x7a + TIOCUCNTL_SBRK = 0x7b + TOSTOP = 0x400000 + UTIME_NOW = -0x2 + UTIME_OMIT = -0x1 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VM_ANONMIN = 0x7 + VM_LOADAVG = 0x2 + VM_MALLOC_CONF = 0xc + VM_MAXID = 0xd + VM_MAXSLP = 0xa + VM_METER = 0x1 + VM_NKMEMPAGES = 0x6 + VM_PSSTRINGS = 0x3 + VM_SWAPENCRYPT = 0x5 + VM_USPACE = 0xb + VM_UVMEXP = 0x4 + VM_VNODEMIN = 0x9 + VM_VTEXTMIN = 0x8 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WALTSIG = 0x4 + WCONTINUED = 0x8 + WCOREFLAG = 0x80 + WNOHANG = 0x1 + WUNTRACED = 0x2 + XCASE = 0x1000000 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x5c) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x58) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x59) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EIPSEC = syscall.Errno(0x52) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x5f) + ELOOP = syscall.Errno(0x3e) + EMEDIUMTYPE = syscall.Errno(0x56) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x53) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOMEDIUM = syscall.Errno(0x55) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x5a) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5d) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x5b) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x57) + EOWNERDEAD = syscall.Errno(0x5e) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5f) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disk quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC program not available"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIPSEC", "IPsec processing failure"}, + {83, "ENOATTR", "attribute not found"}, + {84, "EILSEQ", "illegal byte sequence"}, + {85, "ENOMEDIUM", "no medium found"}, + {86, "EMEDIUMTYPE", "wrong medium type"}, + {87, "EOVERFLOW", "value too large to be stored in data type"}, + {88, "ECANCELED", "operation canceled"}, + {89, "EIDRM", "identifier removed"}, + {90, "ENOMSG", "no message of desired type"}, + {91, "ENOTSUP", "not supported"}, + {92, "EBADMSG", "bad message"}, + {93, "ENOTRECOVERABLE", "state not recoverable"}, + {94, "EOWNERDEAD", "previous owner died"}, + {95, "ELAST", "protocol error"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGABRT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "thread AST"}, +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go index 46e054ccb..5312c36cc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -192,6 +192,12 @@ const ( CSTOPB = 0x40 CSUSP = 0x1a CSWTCH = 0x1a + DIOC = 0x6400 + DIOCGETB = 0x6402 + DIOCGETC = 0x6401 + DIOCGETP = 0x6408 + DIOCSETE = 0x6403 + DIOCSETP = 0x6409 DLT_AIRONET_HEADER = 0x78 DLT_APPLE_IP_OVER_IEEE1394 = 0x8a DLT_ARCNET = 0x7 @@ -290,6 +296,7 @@ const ( FF0 = 0x0 FF1 = 0x8000 FFDLY = 0x8000 + FIORDCHK = 0x6603 FLUSHALL = 0x1 FLUSHDATA = 0x0 FLUSHO = 0x2000 @@ -645,6 +652,14 @@ const ( MAP_SHARED = 0x1 MAP_TEXT = 0x400 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x29 + MCAST_JOIN_SOURCE_GROUP = 0x2d + MCAST_LEAVE_GROUP = 0x2a + MCAST_LEAVE_SOURCE_GROUP = 0x2e + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MSG_CTRUNC = 0x10 @@ -653,6 +668,7 @@ const ( MSG_DUPCTRL = 0x800 MSG_EOR = 0x8 MSG_MAXIOVLEN = 0x10 + MSG_NOSIGNAL = 0x200 MSG_NOTIFICATION = 0x100 MSG_OOB = 0x1 MSG_PEEK = 0x2 @@ -687,6 +703,7 @@ const ( O_APPEND = 0x8 O_CLOEXEC = 0x800000 O_CREAT = 0x100 + O_DIRECTORY = 0x1000000 O_DSYNC = 0x40 O_EXCL = 0x400 O_EXEC = 0x400000 @@ -725,7 +742,7 @@ const ( RLIMIT_FSIZE = 0x1 RLIMIT_NOFILE = 0x5 RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x3 + RLIM_INFINITY = 0xfffffffffffffffd RTAX_AUTHOR = 0x6 RTAX_BRD = 0x7 RTAX_DST = 0x0 @@ -1047,6 +1064,7 @@ const ( TCOON = 0x1 TCP_ABORT_THRESHOLD = 0x11 TCP_ANONPRIVBIND = 0x20 + TCP_CONGESTION = 0x25 TCP_CONN_ABORT_THRESHOLD = 0x13 TCP_CONN_NOTIFY_THRESHOLD = 0x12 TCP_CORK = 0x18 @@ -1076,6 +1094,8 @@ const ( TCSETSF = 0x5410 TCSETSW = 0x540f TCXONC = 0x5406 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOC = 0x5400 TIOCCBRK = 0x747a TIOCCDTR = 0x7478 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go deleted file mode 100644 index 23e94d366..000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go +++ /dev/null @@ -1,1809 +0,0 @@ -// go run mksyscall.go -l32 -tags darwin,386,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_386.1_11.go syscall_darwin_386.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build darwin,386,!go1.12 - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimes(path string, timeval *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, behav int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe() (r int, w int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - r = int(r0) - w = int(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fsetxattr(fd int, attr string, data *byte, size int, position uint32, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func removexattr(path string, attr string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fremovexattr(fd int, attr string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func listxattr(path string, dest *byte, size int, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { - r0, _, e1 := Syscall6(SYS_FLISTXATTR, uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := Syscall6(SYS_SETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func kill(pid int, signum int, posix int) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { - _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(offset>>32), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Access(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chflags(path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chmod(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) - nfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exchangedata(path1 string, path2 string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path1) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(path2) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_EXCHANGEDATA, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), uintptr(length>>32)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdtablesize() (size int) { - r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) - size = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) - egid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) - uid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) - gid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) - pgrp = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) - uid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Issetugid() (tainted bool) { - r0, _, _ := RawSyscall(SYS_ISSETUGID, 0, 0, 0) - tainted = bool(r0 != 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lchown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Link(path string, link string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdir(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkfifo(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Open(path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pathconf(path string, name int) (val int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pread(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlink(path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rename(from string, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat(fromfd int, from string, tofd int, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Revoke(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rmdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(whence), 0, 0) - newoffset = int64(int64(r1)<<32 | int64(r0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setegid(egid int) (err error) { - _, _, e1 := Syscall(SYS_SETEGID, uintptr(egid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setlogin(name string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setprivexec(flag int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIVEXEC, uintptr(flag), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlink(path string, link string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Truncate(path string, length int64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Undelete(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlink(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos), uintptr(pos>>32), 0, 0) - ret = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_GETFSSTAT64, uintptr(buf), uintptr(size), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lstat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Stat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statfs(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go index e2ffb3bed..6eb457983 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -490,21 +490,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getattrlist_trampoline() - -//go:linkname libc_getattrlist libc_getattrlist -//go:cgo_import_dynamic libc_getattrlist getattrlist "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe() (r int, w int, err error) { r0, r1, e1 := syscall_rawSyscall(funcPC(libc_pipe_trampoline), 0, 0, 0) r = int(r0) @@ -958,6 +943,56 @@ func libc_close_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Clonefile(src string, dst string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(src) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dst) + if err != nil { + return + } + _, _, e1 := syscall_syscall(funcPC(libc_clonefile_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clonefile_trampoline() + +//go:linkname libc_clonefile libc_clonefile +//go:cgo_import_dynamic libc_clonefile clonefile "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Clonefileat(srcDirfd int, src string, dstDirfd int, dst string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(src) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dst) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(funcPC(libc_clonefileat_trampoline), uintptr(srcDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clonefileat_trampoline() + +//go:linkname libc_clonefileat libc_clonefileat +//go:cgo_import_dynamic libc_clonefileat clonefileat "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup(fd int) (nfd int, err error) { r0, _, e1 := syscall_syscall(funcPC(libc_dup_trampoline), uintptr(fd), 0, 0) nfd = int(r0) @@ -1146,6 +1181,26 @@ func libc_fchownat_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fclonefileat(srcDirfd int, dstDirfd int, dst string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(dst) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(funcPC(libc_fclonefileat_trampoline), uintptr(srcDirfd), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fclonefileat_trampoline() + +//go:linkname libc_fclonefileat libc_fclonefileat +//go:cgo_import_dynamic libc_fclonefileat fclonefileat "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_flock_trampoline), uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -1207,6 +1262,28 @@ func libc_ftruncate_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(funcPC(libc_getcwd_trampoline), uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_getcwd_trampoline() + +//go:linkname libc_getcwd libc_getcwd +//go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdtablesize() (size int) { r0, _, _ := syscall_syscall(funcPC(libc_getdtablesize_trampoline), 0, 0, 0) size = int(r0) @@ -2357,21 +2434,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:linkname libc_ptrace libc_ptrace -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_fstat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { @@ -2458,6 +2520,21 @@ func libc_lstat64_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_ptrace_trampoline() + +//go:linkname libc_ptrace libc_ptrace +//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Stat(path string, stat *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s index 6836a4129..1c53979a1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s @@ -60,8 +60,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) TEXT ·libc_getxattr_trampoline(SB),NOSPLIT,$0-0 @@ -110,6 +108,10 @@ TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 JMP libc_clock_gettime(SB) TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 JMP libc_close(SB) +TEXT ·libc_clonefile_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clonefile(SB) +TEXT ·libc_clonefileat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clonefileat(SB) TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 JMP libc_dup(SB) TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 @@ -132,6 +134,8 @@ TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) TEXT ·libc_fchownat_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) +TEXT ·libc_fclonefileat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fclonefileat(SB) TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 JMP libc_flock(SB) TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 @@ -140,6 +144,8 @@ TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) +TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0 JMP libc_getdtablesize(SB) TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 @@ -164,6 +170,8 @@ TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) +TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 @@ -264,10 +272,6 @@ TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) -TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 - JMP libc_gettimeofday(SB) TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstat64(SB) TEXT ·libc_fstatat64_trampoline(SB),NOSPLIT,$0-0 @@ -278,6 +282,8 @@ TEXT ·libc_getfsstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_getfsstat64(SB) TEXT ·libc_lstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_lstat64(SB) +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ptrace(SB) TEXT ·libc_stat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_stat64(SB) TEXT ·libc_statfs64_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go deleted file mode 100644 index 102561730..000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go +++ /dev/null @@ -1,1809 +0,0 @@ -// go run mksyscall.go -tags darwin,amd64,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.1_11.go syscall_darwin_amd64.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build darwin,amd64,!go1.12 - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimes(path string, timeval *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, behav int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe() (r int, w int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - r = int(r0) - w = int(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fsetxattr(fd int, attr string, data *byte, size int, position uint32, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func removexattr(path string, attr string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fremovexattr(fd int, attr string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func listxattr(path string, dest *byte, size int, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { - r0, _, e1 := Syscall6(SYS_FLISTXATTR, uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := Syscall6(SYS_SETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func kill(pid int, signum int, posix int) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { - _, _, e1 := Syscall6(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Access(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chflags(path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chmod(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) - nfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exchangedata(path1 string, path2 string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path1) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(path2) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_EXCHANGEDATA, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdtablesize() (size int) { - r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) - size = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) - egid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) - uid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) - gid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) - pgrp = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) - uid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Issetugid() (tainted bool) { - r0, _, _ := RawSyscall(SYS_ISSETUGID, 0, 0, 0) - tainted = bool(r0 != 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lchown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Link(path string, link string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdir(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkfifo(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Open(path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pathconf(path string, name int) (val int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pread(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlink(path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rename(from string, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat(fromfd int, from string, tofd int, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Revoke(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rmdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) - newoffset = int64(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setegid(egid int) (err error) { - _, _, e1 := Syscall(SYS_SETEGID, uintptr(egid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setlogin(name string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setprivexec(flag int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIVEXEC, uintptr(flag), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlink(path string, link string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Truncate(path string, length int64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Undelete(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlink(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) - ret = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_GETFSSTAT64, uintptr(buf), uintptr(size), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lstat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Stat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statfs(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index c67e336e2..889c14059 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -490,21 +490,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getattrlist_trampoline() - -//go:linkname libc_getattrlist libc_getattrlist -//go:cgo_import_dynamic libc_getattrlist getattrlist "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe() (r int, w int, err error) { r0, r1, e1 := syscall_rawSyscall(funcPC(libc_pipe_trampoline), 0, 0, 0) r = int(r0) @@ -958,6 +943,56 @@ func libc_close_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Clonefile(src string, dst string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(src) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dst) + if err != nil { + return + } + _, _, e1 := syscall_syscall(funcPC(libc_clonefile_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clonefile_trampoline() + +//go:linkname libc_clonefile libc_clonefile +//go:cgo_import_dynamic libc_clonefile clonefile "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Clonefileat(srcDirfd int, src string, dstDirfd int, dst string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(src) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dst) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(funcPC(libc_clonefileat_trampoline), uintptr(srcDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clonefileat_trampoline() + +//go:linkname libc_clonefileat libc_clonefileat +//go:cgo_import_dynamic libc_clonefileat clonefileat "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup(fd int) (nfd int, err error) { r0, _, e1 := syscall_syscall(funcPC(libc_dup_trampoline), uintptr(fd), 0, 0) nfd = int(r0) @@ -1146,6 +1181,26 @@ func libc_fchownat_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fclonefileat(srcDirfd int, dstDirfd int, dst string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(dst) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(funcPC(libc_fclonefileat_trampoline), uintptr(srcDirfd), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fclonefileat_trampoline() + +//go:linkname libc_fclonefileat libc_fclonefileat +//go:cgo_import_dynamic libc_fclonefileat fclonefileat "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_flock_trampoline), uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -1207,6 +1262,28 @@ func libc_ftruncate_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(funcPC(libc_getcwd_trampoline), uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_getcwd_trampoline() + +//go:linkname libc_getcwd libc_getcwd +//go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdtablesize() (size int) { r0, _, _ := syscall_syscall(funcPC(libc_getdtablesize_trampoline), 0, 0, 0) size = int(r0) @@ -2357,21 +2434,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:linkname libc_ptrace libc_ptrace -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_fstat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { @@ -2458,6 +2520,21 @@ func libc_lstat64_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_ptrace_trampoline() + +//go:linkname libc_ptrace libc_ptrace +//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Stat(path string, stat *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index a3fdf099d..c77bd6e20 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -60,8 +60,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) TEXT ·libc_getxattr_trampoline(SB),NOSPLIT,$0-0 @@ -110,6 +108,10 @@ TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 JMP libc_clock_gettime(SB) TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 JMP libc_close(SB) +TEXT ·libc_clonefile_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clonefile(SB) +TEXT ·libc_clonefileat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clonefileat(SB) TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 JMP libc_dup(SB) TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 @@ -132,6 +134,8 @@ TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) TEXT ·libc_fchownat_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) +TEXT ·libc_fclonefileat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fclonefileat(SB) TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 JMP libc_flock(SB) TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 @@ -140,6 +144,8 @@ TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) +TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0 JMP libc_getdtablesize(SB) TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 @@ -164,6 +170,8 @@ TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) +TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 @@ -264,10 +272,6 @@ TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) -TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 - JMP libc_gettimeofday(SB) TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstat64(SB) TEXT ·libc_fstatat64_trampoline(SB),NOSPLIT,$0-0 @@ -278,6 +282,8 @@ TEXT ·libc_getfsstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_getfsstat64(SB) TEXT ·libc_lstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_lstat64(SB) +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ptrace(SB) TEXT ·libc_stat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_stat64(SB) TEXT ·libc_statfs64_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go deleted file mode 100644 index d34e6df2f..000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go +++ /dev/null @@ -1,1782 +0,0 @@ -// go run mksyscall.go -l32 -tags darwin,arm,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm.1_11.go syscall_darwin_arm.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build darwin,arm,!go1.12 - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimes(path string, timeval *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, behav int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe() (r int, w int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - r = int(r0) - w = int(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fsetxattr(fd int, attr string, data *byte, size int, position uint32, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func removexattr(path string, attr string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fremovexattr(fd int, attr string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func listxattr(path string, dest *byte, size int, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { - r0, _, e1 := Syscall6(SYS_FLISTXATTR, uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := Syscall6(SYS_SETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func kill(pid int, signum int, posix int) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { - _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(offset>>32), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Access(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chflags(path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chmod(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) - nfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exchangedata(path1 string, path2 string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path1) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(path2) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_EXCHANGEDATA, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), uintptr(length>>32)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdtablesize() (size int) { - r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) - size = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) - egid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) - uid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) - gid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) - pgrp = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) - uid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Issetugid() (tainted bool) { - r0, _, _ := RawSyscall(SYS_ISSETUGID, 0, 0, 0) - tainted = bool(r0 != 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lchown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Link(path string, link string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdir(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkfifo(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Open(path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pathconf(path string, name int) (val int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pread(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlink(path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rename(from string, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat(fromfd int, from string, tofd int, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Revoke(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rmdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(whence), 0, 0) - newoffset = int64(int64(r1)<<32 | int64(r0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setegid(egid int) (err error) { - _, _, e1 := Syscall(SYS_SETEGID, uintptr(egid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setlogin(name string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setprivexec(flag int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIVEXEC, uintptr(flag), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlink(path string, link string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Truncate(path string, length int64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Undelete(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlink(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos), uintptr(pos>>32), 0, 0) - ret = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(buf), uintptr(size), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lstat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Stat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statfs(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go index b759757a7..d6b5249c2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go @@ -490,21 +490,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getattrlist_trampoline() - -//go:linkname libc_getattrlist libc_getattrlist -//go:cgo_import_dynamic libc_getattrlist getattrlist "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe() (r int, w int, err error) { r0, r1, e1 := syscall_rawSyscall(funcPC(libc_pipe_trampoline), 0, 0, 0) r = int(r0) @@ -958,6 +943,56 @@ func libc_close_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Clonefile(src string, dst string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(src) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dst) + if err != nil { + return + } + _, _, e1 := syscall_syscall(funcPC(libc_clonefile_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clonefile_trampoline() + +//go:linkname libc_clonefile libc_clonefile +//go:cgo_import_dynamic libc_clonefile clonefile "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Clonefileat(srcDirfd int, src string, dstDirfd int, dst string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(src) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dst) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(funcPC(libc_clonefileat_trampoline), uintptr(srcDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clonefileat_trampoline() + +//go:linkname libc_clonefileat libc_clonefileat +//go:cgo_import_dynamic libc_clonefileat clonefileat "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup(fd int) (nfd int, err error) { r0, _, e1 := syscall_syscall(funcPC(libc_dup_trampoline), uintptr(fd), 0, 0) nfd = int(r0) @@ -1146,6 +1181,26 @@ func libc_fchownat_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fclonefileat(srcDirfd int, dstDirfd int, dst string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(dst) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(funcPC(libc_fclonefileat_trampoline), uintptr(srcDirfd), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fclonefileat_trampoline() + +//go:linkname libc_fclonefileat libc_fclonefileat +//go:cgo_import_dynamic libc_fclonefileat fclonefileat "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_flock_trampoline), uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -1207,6 +1262,28 @@ func libc_ftruncate_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(funcPC(libc_getcwd_trampoline), uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_getcwd_trampoline() + +//go:linkname libc_getcwd libc_getcwd +//go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdtablesize() (size int) { r0, _, _ := syscall_syscall(funcPC(libc_getdtablesize_trampoline), 0, 0, 0) size = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s index b67f518fa..5eec5f1d9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s @@ -60,8 +60,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) TEXT ·libc_getxattr_trampoline(SB),NOSPLIT,$0-0 @@ -110,6 +108,10 @@ TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 JMP libc_clock_gettime(SB) TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 JMP libc_close(SB) +TEXT ·libc_clonefile_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clonefile(SB) +TEXT ·libc_clonefileat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clonefileat(SB) TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 JMP libc_dup(SB) TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 @@ -132,6 +134,8 @@ TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) TEXT ·libc_fchownat_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) +TEXT ·libc_fclonefileat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fclonefileat(SB) TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 JMP libc_flock(SB) TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 @@ -140,6 +144,8 @@ TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) +TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0 JMP libc_getdtablesize(SB) TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 @@ -164,6 +170,8 @@ TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) +TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 @@ -264,8 +272,6 @@ TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) -TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 - JMP libc_gettimeofday(SB) TEXT ·libc_fstat_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) TEXT ·libc_fstatat_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index b28861260..23b65a530 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -490,21 +490,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getattrlist_trampoline() - -//go:linkname libc_getattrlist libc_getattrlist -//go:cgo_import_dynamic libc_getattrlist getattrlist "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe() (r int, w int, err error) { r0, r1, e1 := syscall_rawSyscall(funcPC(libc_pipe_trampoline), 0, 0, 0) r = int(r0) @@ -958,6 +943,56 @@ func libc_close_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Clonefile(src string, dst string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(src) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dst) + if err != nil { + return + } + _, _, e1 := syscall_syscall(funcPC(libc_clonefile_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clonefile_trampoline() + +//go:linkname libc_clonefile libc_clonefile +//go:cgo_import_dynamic libc_clonefile clonefile "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Clonefileat(srcDirfd int, src string, dstDirfd int, dst string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(src) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dst) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(funcPC(libc_clonefileat_trampoline), uintptr(srcDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clonefileat_trampoline() + +//go:linkname libc_clonefileat libc_clonefileat +//go:cgo_import_dynamic libc_clonefileat clonefileat "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup(fd int) (nfd int, err error) { r0, _, e1 := syscall_syscall(funcPC(libc_dup_trampoline), uintptr(fd), 0, 0) nfd = int(r0) @@ -1146,6 +1181,26 @@ func libc_fchownat_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fclonefileat(srcDirfd int, dstDirfd int, dst string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(dst) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(funcPC(libc_fclonefileat_trampoline), uintptr(srcDirfd), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fclonefileat_trampoline() + +//go:linkname libc_fclonefileat libc_fclonefileat +//go:cgo_import_dynamic libc_fclonefileat fclonefileat "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_flock_trampoline), uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -1207,6 +1262,28 @@ func libc_ftruncate_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(funcPC(libc_getcwd_trampoline), uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_getcwd_trampoline() + +//go:linkname libc_getcwd libc_getcwd +//go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdtablesize() (size int) { r0, _, _ := syscall_syscall(funcPC(libc_getdtablesize_trampoline), 0, 0, 0) size = int(r0) @@ -2443,6 +2520,21 @@ func libc_lstat_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_ptrace_trampoline() + +//go:linkname libc_ptrace libc_ptrace +//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Stat(path string, stat *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 40cce1bb2..53c402bf6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -60,8 +60,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) TEXT ·libc_getxattr_trampoline(SB),NOSPLIT,$0-0 @@ -110,6 +108,10 @@ TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 JMP libc_clock_gettime(SB) TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 JMP libc_close(SB) +TEXT ·libc_clonefile_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clonefile(SB) +TEXT ·libc_clonefileat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clonefileat(SB) TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 JMP libc_dup(SB) TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 @@ -132,6 +134,8 @@ TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) TEXT ·libc_fchownat_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) +TEXT ·libc_fclonefileat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fclonefileat(SB) TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 JMP libc_flock(SB) TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 @@ -140,6 +144,8 @@ TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) +TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0 JMP libc_getdtablesize(SB) TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 @@ -164,6 +170,8 @@ TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) +TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 @@ -264,8 +272,6 @@ TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) -TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 - JMP libc_gettimeofday(SB) TEXT ·libc_fstat_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) TEXT ·libc_fstatat_trampoline(SB),NOSPLIT,$0-0 @@ -276,6 +282,8 @@ TEXT ·libc_getfsstat_trampoline(SB),NOSPLIT,$0-0 JMP libc_getfsstat(SB) TEXT ·libc_lstat_trampoline(SB),NOSPLIT,$0-0 JMP libc_lstat(SB) +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ptrace(SB) TEXT ·libc_stat_trampoline(SB),NOSPLIT,$0-0 JMP libc_stat(SB) TEXT ·libc_statfs_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index fe1fdd78d..aebfe511a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -439,6 +423,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go index 92efa1da3..d3af083f4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go @@ -13,17 +13,23 @@ import ( //go:cgo_import_dynamic libc_preadv preadv "libc.so" //go:cgo_import_dynamic libc_writev writev "libc.so" //go:cgo_import_dynamic libc_pwritev pwritev "libc.so" +//go:cgo_import_dynamic libc_accept4 accept4 "libsocket.so" +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" //go:linkname procreadv libc_readv //go:linkname procpreadv libc_preadv //go:linkname procwritev libc_writev //go:linkname procpwritev libc_pwritev +//go:linkname procaccept4 libc_accept4 +//go:linkname procpipe2 libc_pipe2 var ( procreadv, procpreadv, procwritev, - procpwritev syscallFunc + procpwritev, + procaccept4, + procpipe2 syscallFunc ) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -85,3 +91,24 @@ func pwritev(fd int, iovs []Iovec, off int64) (n int, err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procaccept4)), 4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procpipe2)), 2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index df217825f..2fbbbe5a8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -83,6 +83,22 @@ func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(open_how)), uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -1821,6 +1837,21 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Faccessat2(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(pathname) @@ -1847,6 +1878,52 @@ func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ProcessVMReadv(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) { + var _p0 unsafe.Pointer + if len(localIov) > 0 { + _p0 = unsafe.Pointer(&localIov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + var _p1 unsafe.Pointer + if len(remoteIov) > 0 { + _p1 = unsafe.Pointer(&remoteIov[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PROCESS_VM_READV, uintptr(pid), uintptr(_p0), uintptr(len(localIov)), uintptr(_p1), uintptr(len(remoteIov)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ProcessVMWritev(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) { + var _p0 unsafe.Pointer + if len(localIov) > 0 { + _p0 = unsafe.Pointer(&localIov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + var _p1 unsafe.Pointer + if len(remoteIov) > 0 { + _p1 = unsafe.Pointer(&remoteIov[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PROCESS_VM_WRITEV, uintptr(pid), uintptr(_p0), uintptr(len(localIov)), uintptr(_p1), uintptr(len(remoteIov)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go similarity index 86% rename from vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go rename to vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index 8d39a09f7..ec6bd5bb7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -1,7 +1,7 @@ -// go run mksyscall.go -tags darwin,arm64,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.1_11.go syscall_darwin_arm64.go +// go run mksyscall.go -openbsd -tags openbsd,mips64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_mips64.go // Code generated by the command above; see README.md. DO NOT EDIT. -// +build darwin,arm64,!go1.12 +// +build openbsd,mips64 package unix @@ -350,8 +350,8 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } @@ -360,154 +360,15 @@ func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintp // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe() (r int, w int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - r = int(r0) - w = int(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fsetxattr(fd int, attr string, data *byte, size int, position uint32, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func removexattr(path string, attr string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fremovexattr(fd int, attr string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func listxattr(path string, dest *byte, size int, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { - r0, _, e1 := Syscall6(SYS_FLISTXATTR, uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := Syscall6(SYS_SETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) + r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -516,19 +377,15 @@ func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintp // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func kill(pid int, signum int, posix int) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -554,7 +411,7 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } @@ -563,8 +420,9 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { - _, _, e1 := Syscall6(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -704,18 +562,8 @@ func Dup2(from int, to int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Exchangedata(path1 string, path2 string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path1) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(path2) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_EXCHANGEDATA, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) +func Dup3(from int, to int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } @@ -837,8 +685,8 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -847,8 +695,33 @@ func Fsync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -857,9 +730,11 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdtablesize() (size int) { - r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) - size = int(r0) +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length)) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -945,6 +820,17 @@ func Getrlimit(which int, lim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getrtable() (rtable int, err error) { + r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + rtable = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { @@ -966,8 +852,8 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Gettimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -985,13 +871,23 @@ func Getuid() (uid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := RawSyscall(SYS_ISSETUGID, 0, 0, 0) + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) tainted = bool(r0 != 0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Kqueue() (fd int, err error) { r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) fd = int(r0) @@ -1068,6 +964,21 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1113,6 +1024,21 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1128,6 +1054,31 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Open(path string, mode int, perm uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1183,7 +1134,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1200,7 +1151,7 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1342,7 +1293,7 @@ func Rmdir(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0) newoffset = int64(r0) if e1 != 0 { err = errnoErr(e1) @@ -1364,7 +1315,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := Syscall(SYS_SETEGID, uintptr(egid), 0, 0) + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1428,8 +1379,8 @@ func Setpriority(which int, who int, prio int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setprivexec(flag int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIVEXEC, uintptr(flag), 0, 0) +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1438,8 +1389,8 @@ func Setprivexec(flag int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1448,8 +1399,18 @@ func Setregid(rgid int, egid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) if e1 != 0 { err = errnoErr(e1) } @@ -1468,6 +1429,16 @@ func Setrlimit(which int, lim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Setrtable(rtable int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) @@ -1499,6 +1470,36 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1555,7 +1556,7 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length)) if e1 != 0 { err = errnoErr(e1) } @@ -1572,21 +1573,6 @@ func Umask(newmask int) (oldmask int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Undelete(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Unlink(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1650,7 +1636,7 @@ func write(fd int, p []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -1692,89 +1678,13 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(buf), uintptr(size), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lstat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Stat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statfs(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go new file mode 100644 index 000000000..aca34b349 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go @@ -0,0 +1,279 @@ +// go run mksysctl_openbsd.go +// Code generated by the command above; DO NOT EDIT. + +// +build mips64,openbsd + +package unix + +type mibentry struct { + ctlname string + ctloid []_C_int +} + +var sysctlMib = []mibentry{ + {"ddb.console", []_C_int{9, 6}}, + {"ddb.log", []_C_int{9, 7}}, + {"ddb.max_line", []_C_int{9, 3}}, + {"ddb.max_width", []_C_int{9, 2}}, + {"ddb.panic", []_C_int{9, 5}}, + {"ddb.profile", []_C_int{9, 9}}, + {"ddb.radix", []_C_int{9, 1}}, + {"ddb.tab_stop_width", []_C_int{9, 4}}, + {"ddb.trigger", []_C_int{9, 8}}, + {"fs.posix.setuid", []_C_int{3, 1, 1}}, + {"hw.allowpowerdown", []_C_int{6, 22}}, + {"hw.byteorder", []_C_int{6, 4}}, + {"hw.cpuspeed", []_C_int{6, 12}}, + {"hw.diskcount", []_C_int{6, 10}}, + {"hw.disknames", []_C_int{6, 8}}, + {"hw.diskstats", []_C_int{6, 9}}, + {"hw.machine", []_C_int{6, 1}}, + {"hw.model", []_C_int{6, 2}}, + {"hw.ncpu", []_C_int{6, 3}}, + {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.ncpuonline", []_C_int{6, 25}}, + {"hw.pagesize", []_C_int{6, 7}}, + {"hw.perfpolicy", []_C_int{6, 23}}, + {"hw.physmem", []_C_int{6, 19}}, + {"hw.product", []_C_int{6, 15}}, + {"hw.serialno", []_C_int{6, 17}}, + {"hw.setperf", []_C_int{6, 13}}, + {"hw.smt", []_C_int{6, 24}}, + {"hw.usermem", []_C_int{6, 20}}, + {"hw.uuid", []_C_int{6, 18}}, + {"hw.vendor", []_C_int{6, 14}}, + {"hw.version", []_C_int{6, 16}}, + {"kern.allowdt", []_C_int{1, 65}}, + {"kern.allowkmem", []_C_int{1, 52}}, + {"kern.argmax", []_C_int{1, 8}}, + {"kern.audio", []_C_int{1, 84}}, + {"kern.boottime", []_C_int{1, 21}}, + {"kern.bufcachepercent", []_C_int{1, 72}}, + {"kern.ccpu", []_C_int{1, 45}}, + {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consbuf", []_C_int{1, 83}}, + {"kern.consbufsize", []_C_int{1, 82}}, + {"kern.consdev", []_C_int{1, 75}}, + {"kern.cp_time", []_C_int{1, 40}}, + {"kern.cp_time2", []_C_int{1, 71}}, + {"kern.cpustats", []_C_int{1, 85}}, + {"kern.domainname", []_C_int{1, 22}}, + {"kern.file", []_C_int{1, 73}}, + {"kern.forkstat", []_C_int{1, 42}}, + {"kern.fscale", []_C_int{1, 46}}, + {"kern.fsync", []_C_int{1, 33}}, + {"kern.global_ptrace", []_C_int{1, 81}}, + {"kern.hostid", []_C_int{1, 11}}, + {"kern.hostname", []_C_int{1, 10}}, + {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, + {"kern.job_control", []_C_int{1, 19}}, + {"kern.malloc.buckets", []_C_int{1, 39, 1}}, + {"kern.malloc.kmemnames", []_C_int{1, 39, 3}}, + {"kern.maxclusters", []_C_int{1, 67}}, + {"kern.maxfiles", []_C_int{1, 7}}, + {"kern.maxlocksperuid", []_C_int{1, 70}}, + {"kern.maxpartitions", []_C_int{1, 23}}, + {"kern.maxproc", []_C_int{1, 6}}, + {"kern.maxthread", []_C_int{1, 25}}, + {"kern.maxvnodes", []_C_int{1, 5}}, + {"kern.mbstat", []_C_int{1, 59}}, + {"kern.msgbuf", []_C_int{1, 48}}, + {"kern.msgbufsize", []_C_int{1, 38}}, + {"kern.nchstats", []_C_int{1, 41}}, + {"kern.netlivelocks", []_C_int{1, 76}}, + {"kern.nfiles", []_C_int{1, 56}}, + {"kern.ngroups", []_C_int{1, 18}}, + {"kern.nosuidcoredump", []_C_int{1, 32}}, + {"kern.nprocs", []_C_int{1, 47}}, + {"kern.nselcoll", []_C_int{1, 43}}, + {"kern.nthreads", []_C_int{1, 26}}, + {"kern.numvnodes", []_C_int{1, 58}}, + {"kern.osrelease", []_C_int{1, 2}}, + {"kern.osrevision", []_C_int{1, 3}}, + {"kern.ostype", []_C_int{1, 1}}, + {"kern.osversion", []_C_int{1, 27}}, + {"kern.pfstatus", []_C_int{1, 86}}, + {"kern.pool_debug", []_C_int{1, 77}}, + {"kern.posix1version", []_C_int{1, 17}}, + {"kern.proc", []_C_int{1, 66}}, + {"kern.rawpartition", []_C_int{1, 24}}, + {"kern.saved_ids", []_C_int{1, 20}}, + {"kern.securelevel", []_C_int{1, 9}}, + {"kern.seminfo", []_C_int{1, 61}}, + {"kern.shminfo", []_C_int{1, 62}}, + {"kern.somaxconn", []_C_int{1, 28}}, + {"kern.sominconn", []_C_int{1, 29}}, + {"kern.splassert", []_C_int{1, 54}}, + {"kern.stackgap_random", []_C_int{1, 50}}, + {"kern.sysvipc_info", []_C_int{1, 51}}, + {"kern.sysvmsg", []_C_int{1, 34}}, + {"kern.sysvsem", []_C_int{1, 35}}, + {"kern.sysvshm", []_C_int{1, 36}}, + {"kern.timecounter.choice", []_C_int{1, 69, 4}}, + {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, + {"kern.timecounter.tick", []_C_int{1, 69, 1}}, + {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, + {"kern.timeout_stats", []_C_int{1, 87}}, + {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, + {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, + {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, + {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, + {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, + {"kern.ttycount", []_C_int{1, 57}}, + {"kern.utc_offset", []_C_int{1, 88}}, + {"kern.version", []_C_int{1, 4}}, + {"kern.watchdog.auto", []_C_int{1, 64, 2}}, + {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"kern.witnesswatch", []_C_int{1, 53}}, + {"kern.wxabort", []_C_int{1, 74}}, + {"net.bpf.bufsize", []_C_int{4, 31, 1}}, + {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, + {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, + {"net.inet.ah.stats", []_C_int{4, 2, 51, 2}}, + {"net.inet.carp.allow", []_C_int{4, 2, 112, 1}}, + {"net.inet.carp.log", []_C_int{4, 2, 112, 3}}, + {"net.inet.carp.preempt", []_C_int{4, 2, 112, 2}}, + {"net.inet.carp.stats", []_C_int{4, 2, 112, 4}}, + {"net.inet.divert.recvspace", []_C_int{4, 2, 258, 1}}, + {"net.inet.divert.sendspace", []_C_int{4, 2, 258, 2}}, + {"net.inet.divert.stats", []_C_int{4, 2, 258, 3}}, + {"net.inet.esp.enable", []_C_int{4, 2, 50, 1}}, + {"net.inet.esp.stats", []_C_int{4, 2, 50, 4}}, + {"net.inet.esp.udpencap", []_C_int{4, 2, 50, 2}}, + {"net.inet.esp.udpencap_port", []_C_int{4, 2, 50, 3}}, + {"net.inet.etherip.allow", []_C_int{4, 2, 97, 1}}, + {"net.inet.etherip.stats", []_C_int{4, 2, 97, 2}}, + {"net.inet.gre.allow", []_C_int{4, 2, 47, 1}}, + {"net.inet.gre.wccp", []_C_int{4, 2, 47, 2}}, + {"net.inet.icmp.bmcastecho", []_C_int{4, 2, 1, 2}}, + {"net.inet.icmp.errppslimit", []_C_int{4, 2, 1, 3}}, + {"net.inet.icmp.maskrepl", []_C_int{4, 2, 1, 1}}, + {"net.inet.icmp.rediraccept", []_C_int{4, 2, 1, 4}}, + {"net.inet.icmp.redirtimeout", []_C_int{4, 2, 1, 5}}, + {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, + {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, + {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpdown", []_C_int{4, 2, 0, 40}}, + {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.arptimeout", []_C_int{4, 2, 0, 39}}, + {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, + {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, + {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, + {"net.inet.ip.ifq.drops", []_C_int{4, 2, 0, 30, 3}}, + {"net.inet.ip.ifq.len", []_C_int{4, 2, 0, 30, 1}}, + {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, + {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, + {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtmfc", []_C_int{4, 2, 0, 37}}, + {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, + {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mrtvif", []_C_int{4, 2, 0, 38}}, + {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, + {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, + {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, + {"net.inet.ip.multipath", []_C_int{4, 2, 0, 32}}, + {"net.inet.ip.portfirst", []_C_int{4, 2, 0, 7}}, + {"net.inet.ip.porthifirst", []_C_int{4, 2, 0, 9}}, + {"net.inet.ip.porthilast", []_C_int{4, 2, 0, 10}}, + {"net.inet.ip.portlast", []_C_int{4, 2, 0, 8}}, + {"net.inet.ip.redirect", []_C_int{4, 2, 0, 2}}, + {"net.inet.ip.sourceroute", []_C_int{4, 2, 0, 5}}, + {"net.inet.ip.stats", []_C_int{4, 2, 0, 33}}, + {"net.inet.ip.ttl", []_C_int{4, 2, 0, 3}}, + {"net.inet.ipcomp.enable", []_C_int{4, 2, 108, 1}}, + {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, + {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, + {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, + {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, + {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, + {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, + {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, + {"net.inet.tcp.drop", []_C_int{4, 2, 6, 19}}, + {"net.inet.tcp.ecn", []_C_int{4, 2, 6, 14}}, + {"net.inet.tcp.ident", []_C_int{4, 2, 6, 9}}, + {"net.inet.tcp.keepidle", []_C_int{4, 2, 6, 3}}, + {"net.inet.tcp.keepinittime", []_C_int{4, 2, 6, 2}}, + {"net.inet.tcp.keepintvl", []_C_int{4, 2, 6, 4}}, + {"net.inet.tcp.mssdflt", []_C_int{4, 2, 6, 11}}, + {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, + {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, + {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rootonly", []_C_int{4, 2, 6, 24}}, + {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, + {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, + {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, + {"net.inet.tcp.slowhz", []_C_int{4, 2, 6, 5}}, + {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, + {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, + {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.tcp.synhashsize", []_C_int{4, 2, 6, 25}}, + {"net.inet.tcp.synuselimit", []_C_int{4, 2, 6, 23}}, + {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, + {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, + {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.rootonly", []_C_int{4, 2, 17, 6}}, + {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, + {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, + {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, + {"net.inet6.divert.sendspace", []_C_int{4, 24, 86, 2}}, + {"net.inet6.divert.stats", []_C_int{4, 24, 86, 3}}, + {"net.inet6.icmp6.errppslimit", []_C_int{4, 24, 30, 14}}, + {"net.inet6.icmp6.mtudisc_hiwat", []_C_int{4, 24, 30, 16}}, + {"net.inet6.icmp6.mtudisc_lowat", []_C_int{4, 24, 30, 17}}, + {"net.inet6.icmp6.nd6_debug", []_C_int{4, 24, 30, 18}}, + {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, + {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, + {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, + {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, + {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, + {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, + {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, + {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, + {"net.inet6.ip6.defmcasthlim", []_C_int{4, 24, 17, 18}}, + {"net.inet6.ip6.forwarding", []_C_int{4, 24, 17, 1}}, + {"net.inet6.ip6.forwsrcrt", []_C_int{4, 24, 17, 5}}, + {"net.inet6.ip6.hdrnestlimit", []_C_int{4, 24, 17, 15}}, + {"net.inet6.ip6.hlim", []_C_int{4, 24, 17, 3}}, + {"net.inet6.ip6.log_interval", []_C_int{4, 24, 17, 14}}, + {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, + {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, + {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, + {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtmfc", []_C_int{4, 24, 17, 53}}, + {"net.inet6.ip6.mrtmif", []_C_int{4, 24, 17, 52}}, + {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, + {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, + {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, + {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, + {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, + {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, + {"net.inet6.ip6.soiikey", []_C_int{4, 24, 17, 54}}, + {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, + {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, + {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, + {"net.key.sadb_dump", []_C_int{4, 30, 1}}, + {"net.key.spd_dump", []_C_int{4, 30, 2}}, + {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, + {"net.mpls.ifq.drops", []_C_int{4, 33, 3, 3}}, + {"net.mpls.ifq.len", []_C_int{4, 33, 3, 1}}, + {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, + {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, + {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, + {"net.mpls.ttl", []_C_int{4, 33, 2}}, + {"net.pflow.stats", []_C_int{4, 34, 1}}, + {"net.pipex.enable", []_C_int{4, 35, 1}}, + {"vm.anonmin", []_C_int{2, 7}}, + {"vm.loadavg", []_C_int{2, 2}}, + {"vm.malloc_conf", []_C_int{2, 12}}, + {"vm.maxslp", []_C_int{2, 10}}, + {"vm.nkmempages", []_C_int{2, 6}}, + {"vm.psstrings", []_C_int{2, 3}}, + {"vm.swapencrypt.enable", []_C_int{2, 5, 0}}, + {"vm.swapencrypt.keyscreated", []_C_int{2, 5, 1}}, + {"vm.swapencrypt.keysdeleted", []_C_int{2, 5, 2}}, + {"vm.uspace", []_C_int{2, 11}}, + {"vm.uvmexp", []_C_int{2, 4}}, + {"vm.vmmeter", []_C_int{2, 1}}, + {"vm.vnodemin", []_C_int{2, 9}}, + {"vm.vtextmin", []_C_int{2, 8}}, +} diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go index f33614532..ad62324c7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go @@ -5,6 +5,7 @@ package unix +// Deprecated: Use libSystem wrappers instead of direct syscalls. const ( SYS_SYSCALL = 0 SYS_EXIT = 1 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go index 654dd3da3..a2fc91d6a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go @@ -5,6 +5,7 @@ package unix +// Deprecated: Use libSystem wrappers instead of direct syscalls. const ( SYS_SYSCALL = 0 SYS_EXIT = 1 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go index 103a72ed1..20d7808ac 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go @@ -5,6 +5,7 @@ package unix +// Deprecated: Use libSystem wrappers instead of direct syscalls. const ( SYS_SYSCALL = 0 SYS_EXIT = 1 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go index 7ab2130b9..527b9588c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go @@ -5,6 +5,7 @@ package unix +// Deprecated: Use libSystem wrappers instead of direct syscalls. const ( SYS_SYSCALL = 0 SYS_EXIT = 1 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go index 464c9a983..9912c6ee3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go @@ -6,129 +6,125 @@ package unix const ( - // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int - SYS_EXIT = 1 // { void exit(int rval); } - SYS_FORK = 2 // { int fork(void); } - SYS_READ = 3 // { ssize_t read(int fd, void *buf, size_t nbyte); } - SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, size_t nbyte); } - SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } - SYS_CLOSE = 6 // { int close(int fd); } - SYS_WAIT4 = 7 // { int wait4(int pid, int *status, int options, struct rusage *rusage); } wait4 wait_args int - SYS_LINK = 9 // { int link(char *path, char *link); } - SYS_UNLINK = 10 // { int unlink(char *path); } - SYS_CHDIR = 12 // { int chdir(char *path); } - SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } - SYS_CHMOD = 15 // { int chmod(char *path, int mode); } - SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int - SYS_GETFSSTAT = 18 // { int getfsstat(struct statfs *buf, long bufsize, int flags); } - SYS_GETPID = 20 // { pid_t getpid(void); } - SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } - SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } - SYS_SETUID = 23 // { int setuid(uid_t uid); } - SYS_GETUID = 24 // { uid_t getuid(void); } - SYS_GETEUID = 25 // { uid_t geteuid(void); } - SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, caddr_t addr, int data); } - SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, int flags); } - SYS_SENDMSG = 28 // { int sendmsg(int s, caddr_t msg, int flags); } - SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, size_t len, int flags, caddr_t from, int *fromlenaddr); } - SYS_ACCEPT = 30 // { int accept(int s, caddr_t name, int *anamelen); } - SYS_GETPEERNAME = 31 // { int getpeername(int fdes, caddr_t asa, int *alen); } - SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, caddr_t asa, int *alen); } - SYS_ACCESS = 33 // { int access(char *path, int flags); } - SYS_CHFLAGS = 34 // { int chflags(char *path, int flags); } - SYS_FCHFLAGS = 35 // { int fchflags(int fd, int flags); } - SYS_SYNC = 36 // { int sync(void); } - SYS_KILL = 37 // { int kill(int pid, int signum); } - SYS_GETPPID = 39 // { pid_t getppid(void); } - SYS_DUP = 41 // { int dup(int fd); } - SYS_PIPE = 42 // { int pipe(void); } - SYS_GETEGID = 43 // { gid_t getegid(void); } - SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } - SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } - SYS_GETGID = 47 // { gid_t getgid(void); } - SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int namelen); } - SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } - SYS_ACCT = 51 // { int acct(char *path); } - SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, stack_t *oss); } - SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, caddr_t data); } - SYS_REBOOT = 55 // { int reboot(int opt); } - SYS_REVOKE = 56 // { int revoke(char *path); } - SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } - SYS_READLINK = 58 // { int readlink(char *path, char *buf, int count); } - SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int - SYS_CHROOT = 61 // { int chroot(char *path); } - SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } - SYS_VFORK = 66 // { pid_t vfork(void); } - SYS_SBRK = 69 // { int sbrk(int incr); } - SYS_SSTK = 70 // { int sstk(int incr); } - SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } - SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } - SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } - SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } - SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, gid_t *gidset); } - SYS_GETPGRP = 81 // { int getpgrp(void); } - SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } - SYS_SETITIMER = 83 // { int setitimer(u_int which, struct itimerval *itv, struct itimerval *oitv); } - SYS_SWAPON = 85 // { int swapon(char *name); } - SYS_GETITIMER = 86 // { int getitimer(u_int which, struct itimerval *itv); } - SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } - SYS_DUP2 = 90 // { int dup2(int from, int to); } - SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } - SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } - SYS_FSYNC = 95 // { int fsync(int fd); } - SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, int prio); } - SYS_SOCKET = 97 // { int socket(int domain, int type, int protocol); } - SYS_CONNECT = 98 // { int connect(int s, caddr_t name, int namelen); } - SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } - SYS_BIND = 104 // { int bind(int s, caddr_t name, int namelen); } - SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, caddr_t val, int valsize); } - SYS_LISTEN = 106 // { int listen(int s, int backlog); } - SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, struct timezone *tzp); } - SYS_GETRUSAGE = 117 // { int getrusage(int who, struct rusage *rusage); } - SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, caddr_t val, int *avalsize); } - SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, u_int iovcnt); } - SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, u_int iovcnt); } - SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, struct timezone *tzp); } - SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } - SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } - SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } - SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } - SYS_RENAME = 128 // { int rename(char *from, char *to); } - SYS_FLOCK = 131 // { int flock(int fd, int how); } - SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } - SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen); } - SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } - SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, int protocol, int *rsv); } - SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } - SYS_RMDIR = 137 // { int rmdir(char *path); } - SYS_UTIMES = 138 // { int utimes(char *path, struct timeval *tptr); } - SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, struct timeval *olddelta); } - SYS_SETSID = 147 // { int setsid(void); } - SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, caddr_t arg); } - SYS_STATFS = 157 // { int statfs(char *path, struct statfs *buf); } - SYS_FSTATFS = 158 // { int fstatfs(int fd, struct statfs *buf); } - SYS_GETFH = 161 // { int getfh(char *fname, struct fhandle *fhp); } - SYS_GETDOMAINNAME = 162 // { int getdomainname(char *domainname, int len); } - SYS_SETDOMAINNAME = 163 // { int setdomainname(char *domainname, int len); } - SYS_UNAME = 164 // { int uname(struct utsname *name); } - SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } - SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, struct rtprio *rtp); } - SYS_EXTPREAD = 173 // { ssize_t extpread(int fd, void *buf, size_t nbyte, int flags, off_t offset); } - SYS_EXTPWRITE = 174 // { ssize_t extpwrite(int fd, const void *buf, size_t nbyte, int flags, off_t offset); } - SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } - SYS_SETGID = 181 // { int setgid(gid_t gid); } - SYS_SETEGID = 182 // { int setegid(gid_t egid); } - SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } - SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } - SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int - SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int - SYS_MMAP = 197 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, int pad, off_t pos); } - // SYS_NOSYS = 198; // { int nosys(void); } __syscall __syscall_args int + SYS_EXIT = 1 // { void exit(int rval); } + SYS_FORK = 2 // { int fork(void); } + SYS_READ = 3 // { ssize_t read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } + SYS_CLOSE = 6 // { int close(int fd); } + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, int options, struct rusage *rusage); } wait4 wait_args int + // SYS_NOSYS = 8; // { int nosys(void); } __nosys nosys_args int + SYS_LINK = 9 // { int link(char *path, char *link); } + SYS_UNLINK = 10 // { int unlink(char *path); } + SYS_CHDIR = 12 // { int chdir(char *path); } + SYS_FCHDIR = 13 // { int fchdir(int fd); } + SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } + SYS_CHMOD = 15 // { int chmod(char *path, int mode); } + SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } + SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_GETFSSTAT = 18 // { int getfsstat(struct statfs *buf, long bufsize, int flags); } + SYS_GETPID = 20 // { pid_t getpid(void); } + SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } + SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } + SYS_SETUID = 23 // { int setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t getuid(void); } + SYS_GETEUID = 25 // { uid_t geteuid(void); } + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, caddr_t addr, int data); } + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { int sendmsg(int s, caddr_t msg, int flags); } + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, size_t len, int flags, caddr_t from, int *fromlenaddr); } + SYS_ACCEPT = 30 // { int accept(int s, caddr_t name, int *anamelen); } + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, caddr_t asa, int *alen); } + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, caddr_t asa, int *alen); } + SYS_ACCESS = 33 // { int access(char *path, int flags); } + SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } + SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } + SYS_SYNC = 36 // { int sync(void); } + SYS_KILL = 37 // { int kill(int pid, int signum); } + SYS_GETPPID = 39 // { pid_t getppid(void); } + SYS_DUP = 41 // { int dup(int fd); } + SYS_PIPE = 42 // { int pipe(void); } + SYS_GETEGID = 43 // { gid_t getegid(void); } + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, u_long offset, u_int scale); } + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } + SYS_GETGID = 47 // { gid_t getgid(void); } + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, size_t namelen); } + SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } + SYS_ACCT = 51 // { int acct(char *path); } + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, stack_t *oss); } + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, caddr_t data); } + SYS_REBOOT = 55 // { int reboot(int opt); } + SYS_REVOKE = 56 // { int revoke(char *path); } + SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } + SYS_READLINK = 58 // { int readlink(char *path, char *buf, int count); } + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } + SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_CHROOT = 61 // { int chroot(char *path); } + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } + SYS_VFORK = 66 // { pid_t vfork(void); } + SYS_SBRK = 69 // { caddr_t sbrk(size_t incr); } + SYS_SSTK = 70 // { int sstk(size_t incr); } + SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, gid_t *gidset); } + SYS_GETPGRP = 81 // { int getpgrp(void); } + SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct itimerval *itv, struct itimerval *oitv); } + SYS_SWAPON = 85 // { int swapon(char *name); } + SYS_GETITIMER = 86 // { int getitimer(u_int which, struct itimerval *itv); } + SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } + SYS_DUP2 = 90 // { int dup2(int from, int to); } + SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } + SYS_FSYNC = 95 // { int fsync(int fd); } + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, int prio); } + SYS_SOCKET = 97 // { int socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, int namelen); } + SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } + SYS_BIND = 104 // { int bind(int s, caddr_t name, int namelen); } + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, caddr_t val, int valsize); } + SYS_LISTEN = 106 // { int listen(int s, int backlog); } + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, struct timezone *tzp); } + SYS_GETRUSAGE = 117 // { int getrusage(int who, struct rusage *rusage); } + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, caddr_t val, int *avalsize); } + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, u_int iovcnt); } + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, u_int iovcnt); } + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, struct timezone *tzp); } + SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } + SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } + SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } + SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } + SYS_RENAME = 128 // { int rename(char *from, char *to); } + SYS_FLOCK = 131 // { int flock(int fd, int how); } + SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen); } + SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, int protocol, int *rsv); } + SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } + SYS_RMDIR = 137 // { int rmdir(char *path); } + SYS_UTIMES = 138 // { int utimes(char *path, struct timeval *tptr); } + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, struct timeval *olddelta); } + SYS_SETSID = 147 // { int setsid(void); } + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, caddr_t arg); } + SYS_STATFS = 157 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 158 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFH = 161 // { int getfh(char *fname, struct fhandle *fhp); } + SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, struct rtprio *rtp); } + SYS_EXTPREAD = 173 // { ssize_t extpread(int fd, void *buf, size_t nbyte, int flags, off_t offset); } + SYS_EXTPWRITE = 174 // { ssize_t extpwrite(int fd, const void *buf, size_t nbyte, int flags, off_t offset); } + SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int setgid(gid_t gid); } + SYS_SETEGID = 182 // { int setegid(gid_t egid); } + SYS_SETEUID = 183 // { int seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } + SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int + SYS_MMAP = 197 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, int pad, off_t pos); } SYS_LSEEK = 199 // { off_t lseek(int fd, int pad, off_t offset, int whence); } SYS_TRUNCATE = 200 // { int truncate(char *path, int pad, off_t length); } SYS_FTRUNCATE = 201 // { int ftruncate(int fd, int pad, off_t length); } @@ -161,8 +157,8 @@ const ( SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } - SYS_EXTPREADV = 289 // { ssize_t extpreadv(int fd, struct iovec *iovp, u_int iovcnt, int flags, off_t offset); } - SYS_EXTPWRITEV = 290 // { ssize_t extpwritev(int fd, struct iovec *iovp,u_int iovcnt, int flags, off_t offset); } + SYS_EXTPREADV = 289 // { ssize_t extpreadv(int fd, const struct iovec *iovp, int iovcnt, int flags, off_t offset); } + SYS_EXTPWRITEV = 290 // { ssize_t extpwritev(int fd, const struct iovec *iovp, int iovcnt, int flags, off_t offset); } SYS_FHSTATFS = 297 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } SYS_MODNEXT = 300 // { int modnext(int modid); } @@ -225,7 +221,7 @@ const ( SYS_KQUEUE = 362 // { int kqueue(void); } SYS_KEVENT = 363 // { int kevent(int fd, const struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } SYS_KENV = 390 // { int kenv(int what, const char *name, char *value, int len); } - SYS_LCHFLAGS = 391 // { int lchflags(char *path, int flags); } + SYS_LCHFLAGS = 391 // { int lchflags(const char *path, u_long flags); } SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_VARSYM_SET = 450 // { int varsym_set(int level, const char *name, const char *data); } @@ -302,7 +298,7 @@ const ( SYS_VMM_GUEST_CTL = 534 // { int vmm_guest_ctl(int op, struct vmm_guest_options *options); } SYS_VMM_GUEST_SYNC_ADDR = 535 // { int vmm_guest_sync_addr(long *dstaddr, long *srcaddr); } SYS_PROCCTL = 536 // { int procctl(idtype_t idtype, id_t id, int cmd, void *data); } - SYS_CHFLAGSAT = 537 // { int chflagsat(int fd, const char *path, int flags, int atflags);} + SYS_CHFLAGSAT = 537 // { int chflagsat(int fd, const char *path, u_long flags, int atflags);} SYS_PIPE2 = 538 // { int pipe2(int *fildes, int flags); } SYS_UTIMENSAT = 539 // { int utimensat(int fd, const char *path, const struct timespec *ts, int flags); } SYS_FUTIMENS = 540 // { int futimens(int fd, const struct timespec *ts); } @@ -312,4 +308,9 @@ const ( SYS_LWP_SETAFFINITY = 544 // { int lwp_setaffinity(pid_t pid, lwpid_t tid, const cpumask_t *mask); } SYS_LWP_GETAFFINITY = 545 // { int lwp_getaffinity(pid_t pid, lwpid_t tid, cpumask_t *mask); } SYS_LWP_CREATE2 = 546 // { int lwp_create2(struct lwp_params *params, const cpumask_t *mask); } + SYS_GETCPUCLOCKID = 547 // { int getcpuclockid(pid_t pid, lwpid_t lwp_id, clockid_t *clock_id); } + SYS_WAIT6 = 548 // { int wait6(idtype_t idtype, id_t id, int *status, int options, struct __wrusage *wrusage, siginfo_t *info); } + SYS_LWP_GETNAME = 549 // { int lwp_getname(lwpid_t tid, char *name, size_t len); } + SYS_GETRANDOM = 550 // { ssize_t getrandom(void *buf, size_t len, unsigned flags); } + SYS___REALPATH = 551 // { ssize_t __realpath(const char *path, char *buf, size_t len); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 54559a895..0f5a3f697 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -431,6 +431,8 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 054a741b7..36d5219ef 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -353,6 +353,8 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 307f2ba12..3622ba14b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -395,6 +395,8 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index e9404dd54..6193c3dc0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -298,6 +298,8 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 68bb6d29b..640b97434 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -416,6 +416,8 @@ const ( SYS_FSPICK = 4433 SYS_PIDFD_OPEN = 4434 SYS_CLONE3 = 4435 + SYS_CLOSE_RANGE = 4436 SYS_OPENAT2 = 4437 SYS_PIDFD_GETFD = 4438 + SYS_FACCESSAT2 = 4439 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 4e5251185..3467fbb5f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -346,6 +346,8 @@ const ( SYS_FSPICK = 5433 SYS_PIDFD_OPEN = 5434 SYS_CLONE3 = 5435 + SYS_CLOSE_RANGE = 5436 SYS_OPENAT2 = 5437 SYS_PIDFD_GETFD = 5438 + SYS_FACCESSAT2 = 5439 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 4d9aa3003..0fc38d5a7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -346,6 +346,8 @@ const ( SYS_FSPICK = 5433 SYS_PIDFD_OPEN = 5434 SYS_CLONE3 = 5435 + SYS_CLOSE_RANGE = 5436 SYS_OPENAT2 = 5437 SYS_PIDFD_GETFD = 5438 + SYS_FACCESSAT2 = 5439 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 64af0707d..999fd55bc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -416,6 +416,8 @@ const ( SYS_FSPICK = 4433 SYS_PIDFD_OPEN = 4434 SYS_CLONE3 = 4435 + SYS_CLOSE_RANGE = 4436 SYS_OPENAT2 = 4437 SYS_PIDFD_GETFD = 4438 + SYS_FACCESSAT2 = 4439 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index cc3c067ba..1df0d7993 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -395,6 +395,8 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 4050ff983..4db39cca4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -395,6 +395,8 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 529abb6a7..e69274014 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -297,6 +297,8 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 276650010..a585aec4e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -360,6 +360,8 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 4dc82bb24..d047e567a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -374,6 +374,8 @@ const ( SYS_FSMOUNT = 432 SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go new file mode 100644 index 000000000..5c08d573b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go @@ -0,0 +1,220 @@ +// go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build mips64,openbsd + +package unix + +const ( + SYS_EXIT = 1 // { void sys_exit(int rval); } + SYS_FORK = 2 // { int sys_fork(void); } + SYS_READ = 3 // { ssize_t sys_read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t sys_write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int sys_open(const char *path, int flags, ... mode_t mode); } + SYS_CLOSE = 6 // { int sys_close(int fd); } + SYS_GETENTROPY = 7 // { int sys_getentropy(void *buf, size_t nbyte); } + SYS___TFORK = 8 // { int sys___tfork(const struct __tfork *param, size_t psize); } + SYS_LINK = 9 // { int sys_link(const char *path, const char *link); } + SYS_UNLINK = 10 // { int sys_unlink(const char *path); } + SYS_WAIT4 = 11 // { pid_t sys_wait4(pid_t pid, int *status, int options, struct rusage *rusage); } + SYS_CHDIR = 12 // { int sys_chdir(const char *path); } + SYS_FCHDIR = 13 // { int sys_fchdir(int fd); } + SYS_MKNOD = 14 // { int sys_mknod(const char *path, mode_t mode, dev_t dev); } + SYS_CHMOD = 15 // { int sys_chmod(const char *path, mode_t mode); } + SYS_CHOWN = 16 // { int sys_chown(const char *path, uid_t uid, gid_t gid); } + SYS_OBREAK = 17 // { int sys_obreak(char *nsize); } break + SYS_GETDTABLECOUNT = 18 // { int sys_getdtablecount(void); } + SYS_GETRUSAGE = 19 // { int sys_getrusage(int who, struct rusage *rusage); } + SYS_GETPID = 20 // { pid_t sys_getpid(void); } + SYS_MOUNT = 21 // { int sys_mount(const char *type, const char *path, int flags, void *data); } + SYS_UNMOUNT = 22 // { int sys_unmount(const char *path, int flags); } + SYS_SETUID = 23 // { int sys_setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t sys_getuid(void); } + SYS_GETEUID = 25 // { uid_t sys_geteuid(void); } + SYS_PTRACE = 26 // { int sys_ptrace(int req, pid_t pid, caddr_t addr, int data); } + SYS_RECVMSG = 27 // { ssize_t sys_recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { ssize_t sys_sendmsg(int s, const struct msghdr *msg, int flags); } + SYS_RECVFROM = 29 // { ssize_t sys_recvfrom(int s, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlenaddr); } + SYS_ACCEPT = 30 // { int sys_accept(int s, struct sockaddr *name, socklen_t *anamelen); } + SYS_GETPEERNAME = 31 // { int sys_getpeername(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_GETSOCKNAME = 32 // { int sys_getsockname(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_ACCESS = 33 // { int sys_access(const char *path, int amode); } + SYS_CHFLAGS = 34 // { int sys_chflags(const char *path, u_int flags); } + SYS_FCHFLAGS = 35 // { int sys_fchflags(int fd, u_int flags); } + SYS_SYNC = 36 // { void sys_sync(void); } + SYS_MSYSCALL = 37 // { int sys_msyscall(void *addr, size_t len); } + SYS_STAT = 38 // { int sys_stat(const char *path, struct stat *ub); } + SYS_GETPPID = 39 // { pid_t sys_getppid(void); } + SYS_LSTAT = 40 // { int sys_lstat(const char *path, struct stat *ub); } + SYS_DUP = 41 // { int sys_dup(int fd); } + SYS_FSTATAT = 42 // { int sys_fstatat(int fd, const char *path, struct stat *buf, int flag); } + SYS_GETEGID = 43 // { gid_t sys_getegid(void); } + SYS_PROFIL = 44 // { int sys_profil(caddr_t samples, size_t size, u_long offset, u_int scale); } + SYS_KTRACE = 45 // { int sys_ktrace(const char *fname, int ops, int facs, pid_t pid); } + SYS_SIGACTION = 46 // { int sys_sigaction(int signum, const struct sigaction *nsa, struct sigaction *osa); } + SYS_GETGID = 47 // { gid_t sys_getgid(void); } + SYS_SIGPROCMASK = 48 // { int sys_sigprocmask(int how, sigset_t mask); } + SYS_SETLOGIN = 50 // { int sys_setlogin(const char *namebuf); } + SYS_ACCT = 51 // { int sys_acct(const char *path); } + SYS_SIGPENDING = 52 // { int sys_sigpending(void); } + SYS_FSTAT = 53 // { int sys_fstat(int fd, struct stat *sb); } + SYS_IOCTL = 54 // { int sys_ioctl(int fd, u_long com, ... void *data); } + SYS_REBOOT = 55 // { int sys_reboot(int opt); } + SYS_REVOKE = 56 // { int sys_revoke(const char *path); } + SYS_SYMLINK = 57 // { int sys_symlink(const char *path, const char *link); } + SYS_READLINK = 58 // { ssize_t sys_readlink(const char *path, char *buf, size_t count); } + SYS_EXECVE = 59 // { int sys_execve(const char *path, char * const *argp, char * const *envp); } + SYS_UMASK = 60 // { mode_t sys_umask(mode_t newmask); } + SYS_CHROOT = 61 // { int sys_chroot(const char *path); } + SYS_GETFSSTAT = 62 // { int sys_getfsstat(struct statfs *buf, size_t bufsize, int flags); } + SYS_STATFS = 63 // { int sys_statfs(const char *path, struct statfs *buf); } + SYS_FSTATFS = 64 // { int sys_fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 65 // { int sys_fhstatfs(const fhandle_t *fhp, struct statfs *buf); } + SYS_VFORK = 66 // { int sys_vfork(void); } + SYS_GETTIMEOFDAY = 67 // { int sys_gettimeofday(struct timeval *tp, struct timezone *tzp); } + SYS_SETTIMEOFDAY = 68 // { int sys_settimeofday(const struct timeval *tv, const struct timezone *tzp); } + SYS_SETITIMER = 69 // { int sys_setitimer(int which, const struct itimerval *itv, struct itimerval *oitv); } + SYS_GETITIMER = 70 // { int sys_getitimer(int which, struct itimerval *itv); } + SYS_SELECT = 71 // { int sys_select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } + SYS_KEVENT = 72 // { int sys_kevent(int fd, const struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_MUNMAP = 73 // { int sys_munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int sys_mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int sys_madvise(void *addr, size_t len, int behav); } + SYS_UTIMES = 76 // { int sys_utimes(const char *path, const struct timeval *tptr); } + SYS_FUTIMES = 77 // { int sys_futimes(int fd, const struct timeval *tptr); } + SYS_GETGROUPS = 79 // { int sys_getgroups(int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int sys_setgroups(int gidsetsize, const gid_t *gidset); } + SYS_GETPGRP = 81 // { int sys_getpgrp(void); } + SYS_SETPGID = 82 // { int sys_setpgid(pid_t pid, pid_t pgid); } + SYS_FUTEX = 83 // { int sys_futex(uint32_t *f, int op, int val, const struct timespec *timeout, uint32_t *g); } + SYS_UTIMENSAT = 84 // { int sys_utimensat(int fd, const char *path, const struct timespec *times, int flag); } + SYS_FUTIMENS = 85 // { int sys_futimens(int fd, const struct timespec *times); } + SYS_KBIND = 86 // { int sys_kbind(const struct __kbind *param, size_t psize, int64_t proc_cookie); } + SYS_CLOCK_GETTIME = 87 // { int sys_clock_gettime(clockid_t clock_id, struct timespec *tp); } + SYS_CLOCK_SETTIME = 88 // { int sys_clock_settime(clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_GETRES = 89 // { int sys_clock_getres(clockid_t clock_id, struct timespec *tp); } + SYS_DUP2 = 90 // { int sys_dup2(int from, int to); } + SYS_NANOSLEEP = 91 // { int sys_nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } + SYS_FCNTL = 92 // { int sys_fcntl(int fd, int cmd, ... void *arg); } + SYS_ACCEPT4 = 93 // { int sys_accept4(int s, struct sockaddr *name, socklen_t *anamelen, int flags); } + SYS___THRSLEEP = 94 // { int sys___thrsleep(const volatile void *ident, clockid_t clock_id, const struct timespec *tp, void *lock, const int *abort); } + SYS_FSYNC = 95 // { int sys_fsync(int fd); } + SYS_SETPRIORITY = 96 // { int sys_setpriority(int which, id_t who, int prio); } + SYS_SOCKET = 97 // { int sys_socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int sys_connect(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_GETDENTS = 99 // { int sys_getdents(int fd, void *buf, size_t buflen); } + SYS_GETPRIORITY = 100 // { int sys_getpriority(int which, id_t who); } + SYS_PIPE2 = 101 // { int sys_pipe2(int *fdp, int flags); } + SYS_DUP3 = 102 // { int sys_dup3(int from, int to, int flags); } + SYS_SIGRETURN = 103 // { int sys_sigreturn(struct sigcontext *sigcntxp); } + SYS_BIND = 104 // { int sys_bind(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_SETSOCKOPT = 105 // { int sys_setsockopt(int s, int level, int name, const void *val, socklen_t valsize); } + SYS_LISTEN = 106 // { int sys_listen(int s, int backlog); } + SYS_CHFLAGSAT = 107 // { int sys_chflagsat(int fd, const char *path, u_int flags, int atflags); } + SYS_PLEDGE = 108 // { int sys_pledge(const char *promises, const char *execpromises); } + SYS_PPOLL = 109 // { int sys_ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *mask); } + SYS_PSELECT = 110 // { int sys_pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *mask); } + SYS_SIGSUSPEND = 111 // { int sys_sigsuspend(int mask); } + SYS_SENDSYSLOG = 112 // { int sys_sendsyslog(const char *buf, size_t nbyte, int flags); } + SYS_UNVEIL = 114 // { int sys_unveil(const char *path, const char *permissions); } + SYS___REALPATH = 115 // { int sys___realpath(const char *pathname, char *resolved); } + SYS_GETSOCKOPT = 118 // { int sys_getsockopt(int s, int level, int name, void *val, socklen_t *avalsize); } + SYS_THRKILL = 119 // { int sys_thrkill(pid_t tid, int signum, void *tcb); } + SYS_READV = 120 // { ssize_t sys_readv(int fd, const struct iovec *iovp, int iovcnt); } + SYS_WRITEV = 121 // { ssize_t sys_writev(int fd, const struct iovec *iovp, int iovcnt); } + SYS_KILL = 122 // { int sys_kill(int pid, int signum); } + SYS_FCHOWN = 123 // { int sys_fchown(int fd, uid_t uid, gid_t gid); } + SYS_FCHMOD = 124 // { int sys_fchmod(int fd, mode_t mode); } + SYS_SETREUID = 126 // { int sys_setreuid(uid_t ruid, uid_t euid); } + SYS_SETREGID = 127 // { int sys_setregid(gid_t rgid, gid_t egid); } + SYS_RENAME = 128 // { int sys_rename(const char *from, const char *to); } + SYS_FLOCK = 131 // { int sys_flock(int fd, int how); } + SYS_MKFIFO = 132 // { int sys_mkfifo(const char *path, mode_t mode); } + SYS_SENDTO = 133 // { ssize_t sys_sendto(int s, const void *buf, size_t len, int flags, const struct sockaddr *to, socklen_t tolen); } + SYS_SHUTDOWN = 134 // { int sys_shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int sys_socketpair(int domain, int type, int protocol, int *rsv); } + SYS_MKDIR = 136 // { int sys_mkdir(const char *path, mode_t mode); } + SYS_RMDIR = 137 // { int sys_rmdir(const char *path); } + SYS_ADJTIME = 140 // { int sys_adjtime(const struct timeval *delta, struct timeval *olddelta); } + SYS_GETLOGIN_R = 141 // { int sys_getlogin_r(char *namebuf, u_int namelen); } + SYS_SETSID = 147 // { int sys_setsid(void); } + SYS_QUOTACTL = 148 // { int sys_quotactl(const char *path, int cmd, int uid, char *arg); } + SYS_NFSSVC = 155 // { int sys_nfssvc(int flag, void *argp); } + SYS_GETFH = 161 // { int sys_getfh(const char *fname, fhandle_t *fhp); } + SYS___TMPFD = 164 // { int sys___tmpfd(int flags); } + SYS_SYSARCH = 165 // { int sys_sysarch(int op, void *parms); } + SYS_PREAD = 173 // { ssize_t sys_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); } + SYS_PWRITE = 174 // { ssize_t sys_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); } + SYS_SETGID = 181 // { int sys_setgid(gid_t gid); } + SYS_SETEGID = 182 // { int sys_setegid(gid_t egid); } + SYS_SETEUID = 183 // { int sys_seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { long sys_pathconf(const char *path, int name); } + SYS_FPATHCONF = 192 // { long sys_fpathconf(int fd, int name); } + SYS_SWAPCTL = 193 // { int sys_swapctl(int cmd, const void *arg, int misc); } + SYS_GETRLIMIT = 194 // { int sys_getrlimit(int which, struct rlimit *rlp); } + SYS_SETRLIMIT = 195 // { int sys_setrlimit(int which, const struct rlimit *rlp); } + SYS_MMAP = 197 // { void *sys_mmap(void *addr, size_t len, int prot, int flags, int fd, long pad, off_t pos); } + SYS_LSEEK = 199 // { off_t sys_lseek(int fd, int pad, off_t offset, int whence); } + SYS_TRUNCATE = 200 // { int sys_truncate(const char *path, int pad, off_t length); } + SYS_FTRUNCATE = 201 // { int sys_ftruncate(int fd, int pad, off_t length); } + SYS_SYSCTL = 202 // { int sys_sysctl(const int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_MLOCK = 203 // { int sys_mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int sys_munlock(const void *addr, size_t len); } + SYS_GETPGID = 207 // { pid_t sys_getpgid(pid_t pid); } + SYS_UTRACE = 209 // { int sys_utrace(const char *label, const void *addr, size_t len); } + SYS_SEMGET = 221 // { int sys_semget(key_t key, int nsems, int semflg); } + SYS_MSGGET = 225 // { int sys_msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int sys_msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } + SYS_MSGRCV = 227 // { int sys_msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_SHMAT = 228 // { void *sys_shmat(int shmid, const void *shmaddr, int shmflg); } + SYS_SHMDT = 230 // { int sys_shmdt(const void *shmaddr); } + SYS_MINHERIT = 250 // { int sys_minherit(void *addr, size_t len, int inherit); } + SYS_POLL = 252 // { int sys_poll(struct pollfd *fds, u_int nfds, int timeout); } + SYS_ISSETUGID = 253 // { int sys_issetugid(void); } + SYS_LCHOWN = 254 // { int sys_lchown(const char *path, uid_t uid, gid_t gid); } + SYS_GETSID = 255 // { pid_t sys_getsid(pid_t pid); } + SYS_MSYNC = 256 // { int sys_msync(void *addr, size_t len, int flags); } + SYS_PIPE = 263 // { int sys_pipe(int *fdp); } + SYS_FHOPEN = 264 // { int sys_fhopen(const fhandle_t *fhp, int flags); } + SYS_PREADV = 267 // { ssize_t sys_preadv(int fd, const struct iovec *iovp, int iovcnt, int pad, off_t offset); } + SYS_PWRITEV = 268 // { ssize_t sys_pwritev(int fd, const struct iovec *iovp, int iovcnt, int pad, off_t offset); } + SYS_KQUEUE = 269 // { int sys_kqueue(void); } + SYS_MLOCKALL = 271 // { int sys_mlockall(int flags); } + SYS_MUNLOCKALL = 272 // { int sys_munlockall(void); } + SYS_GETRESUID = 281 // { int sys_getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } + SYS_SETRESUID = 282 // { int sys_setresuid(uid_t ruid, uid_t euid, uid_t suid); } + SYS_GETRESGID = 283 // { int sys_getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } + SYS_SETRESGID = 284 // { int sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid); } + SYS_MQUERY = 286 // { void *sys_mquery(void *addr, size_t len, int prot, int flags, int fd, long pad, off_t pos); } + SYS_CLOSEFROM = 287 // { int sys_closefrom(int fd); } + SYS_SIGALTSTACK = 288 // { int sys_sigaltstack(const struct sigaltstack *nss, struct sigaltstack *oss); } + SYS_SHMGET = 289 // { int sys_shmget(key_t key, size_t size, int shmflg); } + SYS_SEMOP = 290 // { int sys_semop(int semid, struct sembuf *sops, size_t nsops); } + SYS_FHSTAT = 294 // { int sys_fhstat(const fhandle_t *fhp, struct stat *sb); } + SYS___SEMCTL = 295 // { int sys___semctl(int semid, int semnum, int cmd, union semun *arg); } + SYS_SHMCTL = 296 // { int sys_shmctl(int shmid, int cmd, struct shmid_ds *buf); } + SYS_MSGCTL = 297 // { int sys_msgctl(int msqid, int cmd, struct msqid_ds *buf); } + SYS_SCHED_YIELD = 298 // { int sys_sched_yield(void); } + SYS_GETTHRID = 299 // { pid_t sys_getthrid(void); } + SYS___THRWAKEUP = 301 // { int sys___thrwakeup(const volatile void *ident, int n); } + SYS___THREXIT = 302 // { void sys___threxit(pid_t *notdead); } + SYS___THRSIGDIVERT = 303 // { int sys___thrsigdivert(sigset_t sigmask, siginfo_t *info, const struct timespec *timeout); } + SYS___GETCWD = 304 // { int sys___getcwd(char *buf, size_t len); } + SYS_ADJFREQ = 305 // { int sys_adjfreq(const int64_t *freq, int64_t *oldfreq); } + SYS_SETRTABLE = 310 // { int sys_setrtable(int rtableid); } + SYS_GETRTABLE = 311 // { int sys_getrtable(void); } + SYS_FACCESSAT = 313 // { int sys_faccessat(int fd, const char *path, int amode, int flag); } + SYS_FCHMODAT = 314 // { int sys_fchmodat(int fd, const char *path, mode_t mode, int flag); } + SYS_FCHOWNAT = 315 // { int sys_fchownat(int fd, const char *path, uid_t uid, gid_t gid, int flag); } + SYS_LINKAT = 317 // { int sys_linkat(int fd1, const char *path1, int fd2, const char *path2, int flag); } + SYS_MKDIRAT = 318 // { int sys_mkdirat(int fd, const char *path, mode_t mode); } + SYS_MKFIFOAT = 319 // { int sys_mkfifoat(int fd, const char *path, mode_t mode); } + SYS_MKNODAT = 320 // { int sys_mknodat(int fd, const char *path, mode_t mode, dev_t dev); } + SYS_OPENAT = 321 // { int sys_openat(int fd, const char *path, int flags, ... mode_t mode); } + SYS_READLINKAT = 322 // { ssize_t sys_readlinkat(int fd, const char *path, char *buf, size_t count); } + SYS_RENAMEAT = 323 // { int sys_renameat(int fromfd, const char *from, int tofd, const char *to); } + SYS_SYMLINKAT = 324 // { int sys_symlinkat(const char *path, int fd, const char *link); } + SYS_UNLINKAT = 325 // { int sys_unlinkat(int fd, const char *path, int flag); } + SYS___SET_TCB = 329 // { void sys___set_tcb(void *tcb); } + SYS___GET_TCB = 330 // { void *sys___get_tcb(void); } +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go index 9f47b87c5..830fbb35c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go @@ -92,9 +92,9 @@ type Statfs_t struct { Type uint32 Flags uint32 Fssubtype uint32 - Fstypename [16]int8 - Mntonname [1024]int8 - Mntfromname [1024]int8 + Fstypename [16]byte + Mntonname [1024]byte + Mntfromname [1024]byte Reserved [8]uint32 } @@ -145,6 +145,10 @@ type Dirent struct { _ [3]byte } +const ( + PathMax = 0x400 +) + type RawSockaddrInet4 struct { Len uint8 Family uint8 @@ -190,6 +194,15 @@ type RawSockaddrAny struct { Pad [92]int8 } +type RawSockaddrCtl struct { + Sc_len uint8 + Sc_family uint8 + Ss_sysaddr uint16 + Sc_id uint32 + Sc_unit uint32 + Sc_reserved [5]uint32 +} + type _Socklen uint32 type Linger struct { @@ -254,6 +267,7 @@ const ( SizeofSockaddrAny = 0x6c SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x14 + SizeofSockaddrCtl = 0x20 SizeofLinger = 0x8 SizeofIPMreq = 0x8 SizeofIPv6Mreq = 0x14 @@ -301,7 +315,6 @@ type IfMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Data IfData } @@ -344,7 +357,6 @@ type IfaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Metric int32 } @@ -365,7 +377,6 @@ type IfmaMsghdr2 struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Refcount int32 } @@ -374,7 +385,6 @@ type RtMsghdr struct { Version uint8 Type uint8 Index uint16 - _ [2]byte Flags int32 Addrs int32 Pid int32 @@ -396,7 +406,8 @@ type RtMetrics struct { Rtt uint32 Rttvar uint32 Pksent uint32 - Filler [4]uint32 + State uint32 + Filler [3]uint32 } const ( @@ -497,3 +508,8 @@ type Clockinfo struct { Stathz int32 Profhz int32 } + +type CtlInfo struct { + Id uint32 + Name [96]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 966798a87..e53a7c49f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -70,7 +70,6 @@ type Stat_t struct { Uid uint32 Gid uint32 Rdev int32 - _ [4]byte Atim Timespec Mtim Timespec Ctim Timespec @@ -97,10 +96,11 @@ type Statfs_t struct { Type uint32 Flags uint32 Fssubtype uint32 - Fstypename [16]int8 - Mntonname [1024]int8 - Mntfromname [1024]int8 - Reserved [8]uint32 + Fstypename [16]byte + Mntonname [1024]byte + Mntfromname [1024]byte + Flags_ext uint32 + Reserved [7]uint32 } type Flock_t struct { @@ -133,8 +133,7 @@ type Fbootstraptransfer_t struct { type Log2phys_t struct { Flags uint32 - _ [8]byte - _ [8]byte + _ [16]byte } type Fsid struct { @@ -151,6 +150,10 @@ type Dirent struct { _ [3]byte } +const ( + PathMax = 0x400 +) + type RawSockaddrInet4 struct { Len uint8 Family uint8 @@ -196,6 +199,15 @@ type RawSockaddrAny struct { Pad [92]int8 } +type RawSockaddrCtl struct { + Sc_len uint8 + Sc_family uint8 + Ss_sysaddr uint16 + Sc_id uint32 + Sc_unit uint32 + Sc_reserved [5]uint32 +} + type _Socklen uint32 type Linger struct { @@ -221,10 +233,8 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - _ [4]byte Iov *Iovec Iovlen int32 - _ [4]byte Control *byte Controllen uint32 Flags int32 @@ -262,6 +272,7 @@ const ( SizeofSockaddrAny = 0x6c SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x14 + SizeofSockaddrCtl = 0x20 SizeofLinger = 0x8 SizeofIPMreq = 0x8 SizeofIPv6Mreq = 0x14 @@ -309,7 +320,6 @@ type IfMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Data IfData } @@ -352,7 +362,6 @@ type IfaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Metric int32 } @@ -373,7 +382,6 @@ type IfmaMsghdr2 struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Refcount int32 } @@ -382,7 +390,6 @@ type RtMsghdr struct { Version uint8 Type uint8 Index uint16 - _ [2]byte Flags int32 Addrs int32 Pid int32 @@ -404,7 +411,8 @@ type RtMetrics struct { Rtt uint32 Rttvar uint32 Pksent uint32 - Filler [4]uint32 + State uint32 + Filler [3]uint32 } const ( @@ -427,7 +435,6 @@ type BpfStat struct { type BpfProgram struct { Len uint32 - _ [4]byte Insns *BpfInsn } @@ -452,7 +459,6 @@ type Termios struct { Cflag uint64 Lflag uint64 Cc [20]uint8 - _ [4]byte Ispeed uint64 Ospeed uint64 } @@ -507,3 +513,8 @@ type Clockinfo struct { Stathz int32 Profhz int32 } + +type CtlInfo struct { + Id uint32 + Name [96]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go index 4fe4c9cd7..98be973ef 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go @@ -1,6 +1,5 @@ -// NOTE: cgo can't generate struct Stat_t and struct Statfs_t yet -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_darwin.go +// cgo -godefs types_darwin.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. // +build arm,darwin @@ -31,7 +30,7 @@ type Timeval struct { Usec int32 } -type Timeval32 [0]byte +type Timeval32 struct{} type Rusage struct { Utime Timeval @@ -93,9 +92,9 @@ type Statfs_t struct { Type uint32 Flags uint32 Fssubtype uint32 - Fstypename [16]int8 - Mntonname [1024]int8 - Mntfromname [1024]int8 + Fstypename [16]byte + Mntonname [1024]byte + Mntfromname [1024]byte Reserved [8]uint32 } @@ -146,6 +145,10 @@ type Dirent struct { _ [3]byte } +const ( + PathMax = 0x400 +) + type RawSockaddrInet4 struct { Len uint8 Family uint8 @@ -191,6 +194,15 @@ type RawSockaddrAny struct { Pad [92]int8 } +type RawSockaddrCtl struct { + Sc_len uint8 + Sc_family uint8 + Ss_sysaddr uint16 + Sc_id uint32 + Sc_unit uint32 + Sc_reserved [5]uint32 +} + type _Socklen uint32 type Linger struct { @@ -255,6 +267,7 @@ const ( SizeofSockaddrAny = 0x6c SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x14 + SizeofSockaddrCtl = 0x20 SizeofLinger = 0x8 SizeofIPMreq = 0x8 SizeofIPv6Mreq = 0x14 @@ -302,7 +315,6 @@ type IfMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Data IfData } @@ -345,7 +357,6 @@ type IfaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Metric int32 } @@ -366,7 +377,6 @@ type IfmaMsghdr2 struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Refcount int32 } @@ -375,7 +385,6 @@ type RtMsghdr struct { Version uint8 Type uint8 Index uint16 - _ [2]byte Flags int32 Addrs int32 Pid int32 @@ -397,7 +406,8 @@ type RtMetrics struct { Rtt uint32 Rttvar uint32 Pksent uint32 - Filler [4]uint32 + State uint32 + Filler [3]uint32 } const ( @@ -498,3 +508,8 @@ type Clockinfo struct { Stathz int32 Profhz int32 } + +type CtlInfo struct { + Id uint32 + Name [96]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 21999e4b0..ddae5afe1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -70,7 +70,6 @@ type Stat_t struct { Uid uint32 Gid uint32 Rdev int32 - _ [4]byte Atim Timespec Mtim Timespec Ctim Timespec @@ -97,10 +96,11 @@ type Statfs_t struct { Type uint32 Flags uint32 Fssubtype uint32 - Fstypename [16]int8 - Mntonname [1024]int8 - Mntfromname [1024]int8 - Reserved [8]uint32 + Fstypename [16]byte + Mntonname [1024]byte + Mntfromname [1024]byte + Flags_ext uint32 + Reserved [7]uint32 } type Flock_t struct { @@ -133,8 +133,7 @@ type Fbootstraptransfer_t struct { type Log2phys_t struct { Flags uint32 - _ [8]byte - _ [8]byte + _ [16]byte } type Fsid struct { @@ -151,6 +150,10 @@ type Dirent struct { _ [3]byte } +const ( + PathMax = 0x400 +) + type RawSockaddrInet4 struct { Len uint8 Family uint8 @@ -196,6 +199,15 @@ type RawSockaddrAny struct { Pad [92]int8 } +type RawSockaddrCtl struct { + Sc_len uint8 + Sc_family uint8 + Ss_sysaddr uint16 + Sc_id uint32 + Sc_unit uint32 + Sc_reserved [5]uint32 +} + type _Socklen uint32 type Linger struct { @@ -221,10 +233,8 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - _ [4]byte Iov *Iovec Iovlen int32 - _ [4]byte Control *byte Controllen uint32 Flags int32 @@ -262,6 +272,7 @@ const ( SizeofSockaddrAny = 0x6c SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x14 + SizeofSockaddrCtl = 0x20 SizeofLinger = 0x8 SizeofIPMreq = 0x8 SizeofIPv6Mreq = 0x14 @@ -309,7 +320,6 @@ type IfMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Data IfData } @@ -352,7 +362,6 @@ type IfaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Metric int32 } @@ -373,7 +382,6 @@ type IfmaMsghdr2 struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Refcount int32 } @@ -382,7 +390,6 @@ type RtMsghdr struct { Version uint8 Type uint8 Index uint16 - _ [2]byte Flags int32 Addrs int32 Pid int32 @@ -404,7 +411,8 @@ type RtMetrics struct { Rtt uint32 Rttvar uint32 Pksent uint32 - Filler [4]uint32 + State uint32 + Filler [3]uint32 } const ( @@ -427,7 +435,6 @@ type BpfStat struct { type BpfProgram struct { Len uint32 - _ [4]byte Insns *BpfInsn } @@ -452,7 +459,6 @@ type Termios struct { Cflag uint64 Lflag uint64 Cc [20]uint8 - _ [4]byte Ispeed uint64 Ospeed uint64 } @@ -507,3 +513,8 @@ type Clockinfo struct { Stathz int32 Profhz int32 } + +type CtlInfo struct { + Id uint32 + Name [96]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index 71ea1d6d2..c4772df23 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -70,11 +70,11 @@ type Stat_t struct { Ctim Timespec Size int64 Blocks int64 - Blksize uint32 + _ uint32 Flags uint32 Gen uint32 Lspare int32 - Qspare1 int64 + Blksize int64 Qspare2 int64 } @@ -91,17 +91,15 @@ type Statfs_t struct { Owner uint32 Type int32 Flags int32 - _ [4]byte Syncwrites int64 Asyncwrites int64 - Fstypename [16]int8 - Mntonname [80]int8 + Fstypename [16]byte + Mntonname [80]byte Syncreads int64 Asyncreads int64 Spares1 int16 - Mntfromname [80]int8 + Mntfromname [80]byte Spares2 int16 - _ [4]byte Spare [2]int64 } @@ -202,10 +200,8 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - _ [4]byte Iov *Iovec Iovlen int32 - _ [4]byte Control *byte Controllen uint32 Flags int32 @@ -269,7 +265,7 @@ type FdSet struct { const ( SizeofIfMsghdr = 0xb0 SizeofIfData = 0xa0 - SizeofIfaMsghdr = 0x14 + SizeofIfaMsghdr = 0x18 SizeofIfmaMsghdr = 0x10 SizeofIfAnnounceMsghdr = 0x18 SizeofRtMsghdr = 0x98 @@ -280,10 +276,9 @@ type IfMsghdr struct { Msglen uint16 Version uint8 Type uint8 - Addrs int32 - Flags int32 Index uint16 - _ [2]byte + Flags int32 + Addrs int32 Data IfData } @@ -294,7 +289,6 @@ type IfData struct { Hdrlen uint8 Recvquota uint8 Xmitquota uint8 - _ [2]byte Mtu uint64 Metric uint64 Link_state uint64 @@ -316,24 +310,23 @@ type IfData struct { } type IfaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - _ [2]byte - Metric int32 + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Flags int32 + Addrs int32 + Addrflags int32 + Metric int32 } type IfmaMsghdr struct { Msglen uint16 Version uint8 Type uint8 - Addrs int32 - Flags int32 Index uint16 - _ [2]byte + Flags int32 + Addrs int32 } type IfAnnounceMsghdr struct { @@ -350,7 +343,6 @@ type RtMsghdr struct { Version uint8 Type uint8 Index uint16 - _ [2]byte Flags int32 Addrs int32 Pid int32 @@ -374,7 +366,6 @@ type RtMetrics struct { Hopcount uint64 Mssopt uint16 Pad uint16 - _ [4]byte Msl uint64 Iwmaxsegs uint64 Iwcapsegs uint64 @@ -400,7 +391,6 @@ type BpfStat struct { type BpfProgram struct { Len uint32 - _ [4]byte Insns *BpfInsn } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 27d67ac8f..a96ad4c29 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -67,13 +67,30 @@ type Statx_t struct { Rdev_minor uint32 Dev_major uint32 Dev_minor uint32 - _ [14]uint64 + Mnt_id uint64 + _ uint64 + _ [12]uint64 } type Fsid struct { Val [2]int32 } +type FileCloneRange struct { + Src_fd int64 + Src_offset uint64 + Src_length uint64 + Dest_offset uint64 +} + +type FileDedupeRange struct { + Src_offset uint64 + Src_length uint64 + Dest_count uint16 + Reserved1 uint16 + Reserved2 uint32 +} + type FscryptPolicy struct { Version uint8 Contents_encryption_mode uint8 @@ -138,6 +155,48 @@ type FscryptGetKeyStatusArg struct { _ [13]uint32 } +type DmIoctl struct { + Version [3]uint32 + Data_size uint32 + Data_start uint32 + Target_count uint32 + Open_count int32 + Flags uint32 + Event_nr uint32 + _ uint32 + Dev uint64 + Name [128]byte + Uuid [129]byte + Data [7]byte +} + +type DmTargetSpec struct { + Sector_start uint64 + Length uint64 + Status int32 + Next uint32 + Target_type [16]byte +} + +type DmTargetDeps struct { + Count uint32 + _ uint32 +} + +type DmTargetVersions struct { + Next uint32 + Version [3]uint32 +} + +type DmTargetMsg struct { + Sector uint64 +} + +const ( + SizeofDmIoctl = 0x138 + SizeofDmTargetSpec = 0x28 +) + type KeyctlDHParams struct { Private int32 Prime int32 @@ -266,6 +325,15 @@ type RawSockaddrL2TPIP6 struct { Conn_id uint32 } +type RawSockaddrIUCV struct { + Family uint16 + Port uint16 + Addr uint32 + Nodeid [8]int8 + User_id [8]int8 + Name [8]int8 +} + type _Socklen uint32 type Linger struct { @@ -378,6 +446,7 @@ const ( SizeofSockaddrTIPC = 0x10 SizeofSockaddrL2TPIP = 0x10 SizeofSockaddrL2TPIP6 = 0x20 + SizeofSockaddrIUCV = 0x20 SizeofLinger = 0x8 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc @@ -393,166 +462,107 @@ const ( ) const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x36 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofIfaCacheinfo = 0x10 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc + NDA_UNSPEC = 0x0 + NDA_DST = 0x1 + NDA_LLADDR = 0x2 + NDA_CACHEINFO = 0x3 + NDA_PROBES = 0x4 + NDA_VLAN = 0x5 + NDA_PORT = 0x6 + NDA_VNI = 0x7 + NDA_IFINDEX = 0x8 + NDA_MASTER = 0x9 + NDA_LINK_NETNSID = 0xa + NDA_SRC_VNI = 0xb + NTF_USE = 0x1 + NTF_SELF = 0x2 + NTF_MASTER = 0x4 + NTF_PROXY = 0x8 + NTF_EXT_LEARNED = 0x10 + NTF_OFFLOADED = 0x20 + NTF_ROUTER = 0x80 + NUD_INCOMPLETE = 0x1 + NUD_REACHABLE = 0x2 + NUD_STALE = 0x4 + NUD_DELAY = 0x8 + NUD_PROBE = 0x10 + NUD_FAILED = 0x20 + NUD_NOARP = 0x40 + NUD_PERMANENT = 0x80 + NUD_NONE = 0x0 + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFA_FLAGS = 0x8 + IFA_RT_PRIORITY = 0x9 + IFA_TARGET_NETNSID = 0xa + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 + SizeofNdUseroptmsg = 0x10 + SizeofNdMsg = 0xc ) type NlMsghdr struct { @@ -671,6 +681,8 @@ type InotifyEvent struct { const SizeofInotifyEvent = 0x10 +const SI_LOAD_SHIFT = 0x10 + type Utsname struct { Sysname [65]byte Nodename [65]byte @@ -696,6 +708,22 @@ const ( AT_EACCESS = 0x200 ) +type OpenHow struct { + Flags uint64 + Mode uint64 + Resolve uint64 +} + +const SizeofOpenHow = 0x18 + +const ( + RESOLVE_BENEATH = 0x8 + RESOLVE_IN_ROOT = 0x10 + RESOLVE_NO_MAGICLINKS = 0x2 + RESOLVE_NO_SYMLINKS = 0x4 + RESOLVE_NO_XDEV = 0x1 +) + type PollFd struct { Fd int32 Events int16 @@ -736,8 +764,6 @@ type SignalfdSiginfo struct { _ [28]uint8 } -const PERF_IOC_FLAG_GROUP = 0x1 - type Winsize struct { Row uint16 Col uint16 @@ -861,7 +887,10 @@ type PerfEventMmapPage struct { Time_offset uint64 Time_zero uint64 Size uint32 - _ [948]uint8 + _ uint32 + Time_cycles uint64 + Time_mask uint64 + _ [928]uint8 Data_head uint64 Data_tail uint64 Data_offset uint64 @@ -903,13 +932,13 @@ const ( ) const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + PERF_TYPE_MAX = 0x6 PERF_COUNT_HW_CPU_CYCLES = 0x0 PERF_COUNT_HW_INSTRUCTIONS = 0x1 PERF_COUNT_HW_CACHE_REFERENCES = 0x2 @@ -920,99 +949,163 @@ const ( PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + PERF_COUNT_HW_MAX = 0xa + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + PERF_COUNT_HW_CACHE_MAX = 0x7 + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + PERF_COUNT_HW_CACHE_OP_MAX = 0x3 + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + PERF_COUNT_HW_CACHE_RESULT_MAX = 0x2 + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + PERF_COUNT_SW_BPF_OUTPUT = 0xa + PERF_COUNT_SW_MAX = 0xb + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + PERF_SAMPLE_REGS_USER = 0x1000 + PERF_SAMPLE_STACK_USER = 0x2000 + PERF_SAMPLE_WEIGHT = 0x4000 + PERF_SAMPLE_DATA_SRC = 0x8000 + PERF_SAMPLE_IDENTIFIER = 0x10000 + PERF_SAMPLE_TRANSACTION = 0x20000 + PERF_SAMPLE_REGS_INTR = 0x40000 + PERF_SAMPLE_PHYS_ADDR = 0x80000 + PERF_SAMPLE_AUX = 0x100000 + PERF_SAMPLE_CGROUP = 0x200000 + PERF_SAMPLE_MAX = 0x400000 + PERF_SAMPLE_BRANCH_USER_SHIFT = 0x0 + PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 0x1 + PERF_SAMPLE_BRANCH_HV_SHIFT = 0x2 + PERF_SAMPLE_BRANCH_ANY_SHIFT = 0x3 + PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 0x4 + PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 0x5 + PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 0x6 + PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 0x7 + PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 0x8 + PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 0x9 + PERF_SAMPLE_BRANCH_COND_SHIFT = 0xa + PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 0xb + PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 0xc + PERF_SAMPLE_BRANCH_CALL_SHIFT = 0xd + PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 0xe + PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 0xf + PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 0x10 + PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 0x11 + PERF_SAMPLE_BRANCH_MAX_SHIFT = 0x12 + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 + PERF_SAMPLE_BRANCH_IN_TX = 0x100 + PERF_SAMPLE_BRANCH_NO_TX = 0x200 + PERF_SAMPLE_BRANCH_COND = 0x400 + PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 + PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 + PERF_SAMPLE_BRANCH_CALL = 0x2000 + PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 + PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 + PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 + PERF_SAMPLE_BRANCH_HW_INDEX = 0x20000 + PERF_SAMPLE_BRANCH_MAX = 0x40000 + PERF_BR_UNKNOWN = 0x0 + PERF_BR_COND = 0x1 + PERF_BR_UNCOND = 0x2 + PERF_BR_IND = 0x3 + PERF_BR_CALL = 0x4 + PERF_BR_IND_CALL = 0x5 + PERF_BR_RET = 0x6 + PERF_BR_SYSCALL = 0x7 + PERF_BR_SYSRET = 0x8 + PERF_BR_COND_CALL = 0x9 + PERF_BR_COND_RET = 0xa + PERF_BR_MAX = 0xb + PERF_SAMPLE_REGS_ABI_NONE = 0x0 + PERF_SAMPLE_REGS_ABI_32 = 0x1 + PERF_SAMPLE_REGS_ABI_64 = 0x2 + PERF_TXN_ELISION = 0x1 + PERF_TXN_TRANSACTION = 0x2 + PERF_TXN_SYNC = 0x4 + PERF_TXN_ASYNC = 0x8 + PERF_TXN_RETRY = 0x10 + PERF_TXN_CONFLICT = 0x20 + PERF_TXN_CAPACITY_WRITE = 0x40 + PERF_TXN_CAPACITY_READ = 0x80 + PERF_TXN_MAX = 0x100 + PERF_TXN_ABORT_MASK = -0x100000000 + PERF_TXN_ABORT_SHIFT = 0x20 + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + PERF_FORMAT_MAX = 0x10 + PERF_IOC_FLAG_GROUP = 0x1 + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP2 = 0xa + PERF_RECORD_AUX = 0xb + PERF_RECORD_ITRACE_START = 0xc + PERF_RECORD_LOST_SAMPLES = 0xd + PERF_RECORD_SWITCH = 0xe + PERF_RECORD_SWITCH_CPU_WIDE = 0xf + PERF_RECORD_NAMESPACES = 0x10 + PERF_RECORD_KSYMBOL = 0x11 + PERF_RECORD_BPF_EVENT = 0x12 + PERF_RECORD_CGROUP = 0x13 + PERF_RECORD_TEXT_POKE = 0x14 + PERF_RECORD_MAX = 0x15 + PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0x0 + PERF_RECORD_KSYMBOL_TYPE_BPF = 0x1 + PERF_RECORD_KSYMBOL_TYPE_OOL = 0x2 + PERF_RECORD_KSYMBOL_TYPE_MAX = 0x3 + PERF_BPF_EVENT_UNKNOWN = 0x0 + PERF_BPF_EVENT_PROG_LOAD = 0x1 + PERF_BPF_EVENT_PROG_UNLOAD = 0x2 + PERF_BPF_EVENT_MAX = 0x3 + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + PERF_CONTEXT_MAX = -0xfff ) type TCPMD5Sig struct { @@ -1231,6 +1324,394 @@ const ( SizeofTpacketStatsV3 = 0xc ) +const ( + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_TARGET_NETNSID = 0x2e + IFLA_CARRIER_UP_COUNT = 0x2f + IFLA_CARRIER_DOWN_COUNT = 0x30 + IFLA_NEW_IFINDEX = 0x31 + IFLA_MIN_MTU = 0x32 + IFLA_MAX_MTU = 0x33 + IFLA_PROP_LIST = 0x34 + IFLA_ALT_IFNAME = 0x35 + IFLA_PERM_ADDRESS = 0x36 + IFLA_INET_UNSPEC = 0x0 + IFLA_INET_CONF = 0x1 + IFLA_INET6_UNSPEC = 0x0 + IFLA_INET6_FLAGS = 0x1 + IFLA_INET6_CONF = 0x2 + IFLA_INET6_STATS = 0x3 + IFLA_INET6_MCAST = 0x4 + IFLA_INET6_CACHEINFO = 0x5 + IFLA_INET6_ICMP6STATS = 0x6 + IFLA_INET6_TOKEN = 0x7 + IFLA_INET6_ADDR_GEN_MODE = 0x8 + IFLA_BR_UNSPEC = 0x0 + IFLA_BR_FORWARD_DELAY = 0x1 + IFLA_BR_HELLO_TIME = 0x2 + IFLA_BR_MAX_AGE = 0x3 + IFLA_BR_AGEING_TIME = 0x4 + IFLA_BR_STP_STATE = 0x5 + IFLA_BR_PRIORITY = 0x6 + IFLA_BR_VLAN_FILTERING = 0x7 + IFLA_BR_VLAN_PROTOCOL = 0x8 + IFLA_BR_GROUP_FWD_MASK = 0x9 + IFLA_BR_ROOT_ID = 0xa + IFLA_BR_BRIDGE_ID = 0xb + IFLA_BR_ROOT_PORT = 0xc + IFLA_BR_ROOT_PATH_COST = 0xd + IFLA_BR_TOPOLOGY_CHANGE = 0xe + IFLA_BR_TOPOLOGY_CHANGE_DETECTED = 0xf + IFLA_BR_HELLO_TIMER = 0x10 + IFLA_BR_TCN_TIMER = 0x11 + IFLA_BR_TOPOLOGY_CHANGE_TIMER = 0x12 + IFLA_BR_GC_TIMER = 0x13 + IFLA_BR_GROUP_ADDR = 0x14 + IFLA_BR_FDB_FLUSH = 0x15 + IFLA_BR_MCAST_ROUTER = 0x16 + IFLA_BR_MCAST_SNOOPING = 0x17 + IFLA_BR_MCAST_QUERY_USE_IFADDR = 0x18 + IFLA_BR_MCAST_QUERIER = 0x19 + IFLA_BR_MCAST_HASH_ELASTICITY = 0x1a + IFLA_BR_MCAST_HASH_MAX = 0x1b + IFLA_BR_MCAST_LAST_MEMBER_CNT = 0x1c + IFLA_BR_MCAST_STARTUP_QUERY_CNT = 0x1d + IFLA_BR_MCAST_LAST_MEMBER_INTVL = 0x1e + IFLA_BR_MCAST_MEMBERSHIP_INTVL = 0x1f + IFLA_BR_MCAST_QUERIER_INTVL = 0x20 + IFLA_BR_MCAST_QUERY_INTVL = 0x21 + IFLA_BR_MCAST_QUERY_RESPONSE_INTVL = 0x22 + IFLA_BR_MCAST_STARTUP_QUERY_INTVL = 0x23 + IFLA_BR_NF_CALL_IPTABLES = 0x24 + IFLA_BR_NF_CALL_IP6TABLES = 0x25 + IFLA_BR_NF_CALL_ARPTABLES = 0x26 + IFLA_BR_VLAN_DEFAULT_PVID = 0x27 + IFLA_BR_PAD = 0x28 + IFLA_BR_VLAN_STATS_ENABLED = 0x29 + IFLA_BR_MCAST_STATS_ENABLED = 0x2a + IFLA_BR_MCAST_IGMP_VERSION = 0x2b + IFLA_BR_MCAST_MLD_VERSION = 0x2c + IFLA_BR_VLAN_STATS_PER_PORT = 0x2d + IFLA_BR_MULTI_BOOLOPT = 0x2e + IFLA_BRPORT_UNSPEC = 0x0 + IFLA_BRPORT_STATE = 0x1 + IFLA_BRPORT_PRIORITY = 0x2 + IFLA_BRPORT_COST = 0x3 + IFLA_BRPORT_MODE = 0x4 + IFLA_BRPORT_GUARD = 0x5 + IFLA_BRPORT_PROTECT = 0x6 + IFLA_BRPORT_FAST_LEAVE = 0x7 + IFLA_BRPORT_LEARNING = 0x8 + IFLA_BRPORT_UNICAST_FLOOD = 0x9 + IFLA_BRPORT_PROXYARP = 0xa + IFLA_BRPORT_LEARNING_SYNC = 0xb + IFLA_BRPORT_PROXYARP_WIFI = 0xc + IFLA_BRPORT_ROOT_ID = 0xd + IFLA_BRPORT_BRIDGE_ID = 0xe + IFLA_BRPORT_DESIGNATED_PORT = 0xf + IFLA_BRPORT_DESIGNATED_COST = 0x10 + IFLA_BRPORT_ID = 0x11 + IFLA_BRPORT_NO = 0x12 + IFLA_BRPORT_TOPOLOGY_CHANGE_ACK = 0x13 + IFLA_BRPORT_CONFIG_PENDING = 0x14 + IFLA_BRPORT_MESSAGE_AGE_TIMER = 0x15 + IFLA_BRPORT_FORWARD_DELAY_TIMER = 0x16 + IFLA_BRPORT_HOLD_TIMER = 0x17 + IFLA_BRPORT_FLUSH = 0x18 + IFLA_BRPORT_MULTICAST_ROUTER = 0x19 + IFLA_BRPORT_PAD = 0x1a + IFLA_BRPORT_MCAST_FLOOD = 0x1b + IFLA_BRPORT_MCAST_TO_UCAST = 0x1c + IFLA_BRPORT_VLAN_TUNNEL = 0x1d + IFLA_BRPORT_BCAST_FLOOD = 0x1e + IFLA_BRPORT_GROUP_FWD_MASK = 0x1f + IFLA_BRPORT_NEIGH_SUPPRESS = 0x20 + IFLA_BRPORT_ISOLATED = 0x21 + IFLA_BRPORT_BACKUP_PORT = 0x22 + IFLA_BRPORT_MRP_RING_OPEN = 0x23 + IFLA_INFO_UNSPEC = 0x0 + IFLA_INFO_KIND = 0x1 + IFLA_INFO_DATA = 0x2 + IFLA_INFO_XSTATS = 0x3 + IFLA_INFO_SLAVE_KIND = 0x4 + IFLA_INFO_SLAVE_DATA = 0x5 + IFLA_VLAN_UNSPEC = 0x0 + IFLA_VLAN_ID = 0x1 + IFLA_VLAN_FLAGS = 0x2 + IFLA_VLAN_EGRESS_QOS = 0x3 + IFLA_VLAN_INGRESS_QOS = 0x4 + IFLA_VLAN_PROTOCOL = 0x5 + IFLA_VLAN_QOS_UNSPEC = 0x0 + IFLA_VLAN_QOS_MAPPING = 0x1 + IFLA_MACVLAN_UNSPEC = 0x0 + IFLA_MACVLAN_MODE = 0x1 + IFLA_MACVLAN_FLAGS = 0x2 + IFLA_MACVLAN_MACADDR_MODE = 0x3 + IFLA_MACVLAN_MACADDR = 0x4 + IFLA_MACVLAN_MACADDR_DATA = 0x5 + IFLA_MACVLAN_MACADDR_COUNT = 0x6 + IFLA_VRF_UNSPEC = 0x0 + IFLA_VRF_TABLE = 0x1 + IFLA_VRF_PORT_UNSPEC = 0x0 + IFLA_VRF_PORT_TABLE = 0x1 + IFLA_MACSEC_UNSPEC = 0x0 + IFLA_MACSEC_SCI = 0x1 + IFLA_MACSEC_PORT = 0x2 + IFLA_MACSEC_ICV_LEN = 0x3 + IFLA_MACSEC_CIPHER_SUITE = 0x4 + IFLA_MACSEC_WINDOW = 0x5 + IFLA_MACSEC_ENCODING_SA = 0x6 + IFLA_MACSEC_ENCRYPT = 0x7 + IFLA_MACSEC_PROTECT = 0x8 + IFLA_MACSEC_INC_SCI = 0x9 + IFLA_MACSEC_ES = 0xa + IFLA_MACSEC_SCB = 0xb + IFLA_MACSEC_REPLAY_PROTECT = 0xc + IFLA_MACSEC_VALIDATION = 0xd + IFLA_MACSEC_PAD = 0xe + IFLA_MACSEC_OFFLOAD = 0xf + IFLA_XFRM_UNSPEC = 0x0 + IFLA_XFRM_LINK = 0x1 + IFLA_XFRM_IF_ID = 0x2 + IFLA_IPVLAN_UNSPEC = 0x0 + IFLA_IPVLAN_MODE = 0x1 + IFLA_IPVLAN_FLAGS = 0x2 + IFLA_VXLAN_UNSPEC = 0x0 + IFLA_VXLAN_ID = 0x1 + IFLA_VXLAN_GROUP = 0x2 + IFLA_VXLAN_LINK = 0x3 + IFLA_VXLAN_LOCAL = 0x4 + IFLA_VXLAN_TTL = 0x5 + IFLA_VXLAN_TOS = 0x6 + IFLA_VXLAN_LEARNING = 0x7 + IFLA_VXLAN_AGEING = 0x8 + IFLA_VXLAN_LIMIT = 0x9 + IFLA_VXLAN_PORT_RANGE = 0xa + IFLA_VXLAN_PROXY = 0xb + IFLA_VXLAN_RSC = 0xc + IFLA_VXLAN_L2MISS = 0xd + IFLA_VXLAN_L3MISS = 0xe + IFLA_VXLAN_PORT = 0xf + IFLA_VXLAN_GROUP6 = 0x10 + IFLA_VXLAN_LOCAL6 = 0x11 + IFLA_VXLAN_UDP_CSUM = 0x12 + IFLA_VXLAN_UDP_ZERO_CSUM6_TX = 0x13 + IFLA_VXLAN_UDP_ZERO_CSUM6_RX = 0x14 + IFLA_VXLAN_REMCSUM_TX = 0x15 + IFLA_VXLAN_REMCSUM_RX = 0x16 + IFLA_VXLAN_GBP = 0x17 + IFLA_VXLAN_REMCSUM_NOPARTIAL = 0x18 + IFLA_VXLAN_COLLECT_METADATA = 0x19 + IFLA_VXLAN_LABEL = 0x1a + IFLA_VXLAN_GPE = 0x1b + IFLA_VXLAN_TTL_INHERIT = 0x1c + IFLA_VXLAN_DF = 0x1d + IFLA_GENEVE_UNSPEC = 0x0 + IFLA_GENEVE_ID = 0x1 + IFLA_GENEVE_REMOTE = 0x2 + IFLA_GENEVE_TTL = 0x3 + IFLA_GENEVE_TOS = 0x4 + IFLA_GENEVE_PORT = 0x5 + IFLA_GENEVE_COLLECT_METADATA = 0x6 + IFLA_GENEVE_REMOTE6 = 0x7 + IFLA_GENEVE_UDP_CSUM = 0x8 + IFLA_GENEVE_UDP_ZERO_CSUM6_TX = 0x9 + IFLA_GENEVE_UDP_ZERO_CSUM6_RX = 0xa + IFLA_GENEVE_LABEL = 0xb + IFLA_GENEVE_TTL_INHERIT = 0xc + IFLA_GENEVE_DF = 0xd + IFLA_BAREUDP_UNSPEC = 0x0 + IFLA_BAREUDP_PORT = 0x1 + IFLA_BAREUDP_ETHERTYPE = 0x2 + IFLA_BAREUDP_SRCPORT_MIN = 0x3 + IFLA_BAREUDP_MULTIPROTO_MODE = 0x4 + IFLA_PPP_UNSPEC = 0x0 + IFLA_PPP_DEV_FD = 0x1 + IFLA_GTP_UNSPEC = 0x0 + IFLA_GTP_FD0 = 0x1 + IFLA_GTP_FD1 = 0x2 + IFLA_GTP_PDP_HASHSIZE = 0x3 + IFLA_GTP_ROLE = 0x4 + IFLA_BOND_UNSPEC = 0x0 + IFLA_BOND_MODE = 0x1 + IFLA_BOND_ACTIVE_SLAVE = 0x2 + IFLA_BOND_MIIMON = 0x3 + IFLA_BOND_UPDELAY = 0x4 + IFLA_BOND_DOWNDELAY = 0x5 + IFLA_BOND_USE_CARRIER = 0x6 + IFLA_BOND_ARP_INTERVAL = 0x7 + IFLA_BOND_ARP_IP_TARGET = 0x8 + IFLA_BOND_ARP_VALIDATE = 0x9 + IFLA_BOND_ARP_ALL_TARGETS = 0xa + IFLA_BOND_PRIMARY = 0xb + IFLA_BOND_PRIMARY_RESELECT = 0xc + IFLA_BOND_FAIL_OVER_MAC = 0xd + IFLA_BOND_XMIT_HASH_POLICY = 0xe + IFLA_BOND_RESEND_IGMP = 0xf + IFLA_BOND_NUM_PEER_NOTIF = 0x10 + IFLA_BOND_ALL_SLAVES_ACTIVE = 0x11 + IFLA_BOND_MIN_LINKS = 0x12 + IFLA_BOND_LP_INTERVAL = 0x13 + IFLA_BOND_PACKETS_PER_SLAVE = 0x14 + IFLA_BOND_AD_LACP_RATE = 0x15 + IFLA_BOND_AD_SELECT = 0x16 + IFLA_BOND_AD_INFO = 0x17 + IFLA_BOND_AD_ACTOR_SYS_PRIO = 0x18 + IFLA_BOND_AD_USER_PORT_KEY = 0x19 + IFLA_BOND_AD_ACTOR_SYSTEM = 0x1a + IFLA_BOND_TLB_DYNAMIC_LB = 0x1b + IFLA_BOND_PEER_NOTIF_DELAY = 0x1c + IFLA_BOND_AD_INFO_UNSPEC = 0x0 + IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 + IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 + IFLA_BOND_AD_INFO_ACTOR_KEY = 0x3 + IFLA_BOND_AD_INFO_PARTNER_KEY = 0x4 + IFLA_BOND_AD_INFO_PARTNER_MAC = 0x5 + IFLA_BOND_SLAVE_UNSPEC = 0x0 + IFLA_BOND_SLAVE_STATE = 0x1 + IFLA_BOND_SLAVE_MII_STATUS = 0x2 + IFLA_BOND_SLAVE_LINK_FAILURE_COUNT = 0x3 + IFLA_BOND_SLAVE_PERM_HWADDR = 0x4 + IFLA_BOND_SLAVE_QUEUE_ID = 0x5 + IFLA_BOND_SLAVE_AD_AGGREGATOR_ID = 0x6 + IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE = 0x7 + IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE = 0x8 + IFLA_VF_INFO_UNSPEC = 0x0 + IFLA_VF_INFO = 0x1 + IFLA_VF_UNSPEC = 0x0 + IFLA_VF_MAC = 0x1 + IFLA_VF_VLAN = 0x2 + IFLA_VF_TX_RATE = 0x3 + IFLA_VF_SPOOFCHK = 0x4 + IFLA_VF_LINK_STATE = 0x5 + IFLA_VF_RATE = 0x6 + IFLA_VF_RSS_QUERY_EN = 0x7 + IFLA_VF_STATS = 0x8 + IFLA_VF_TRUST = 0x9 + IFLA_VF_IB_NODE_GUID = 0xa + IFLA_VF_IB_PORT_GUID = 0xb + IFLA_VF_VLAN_LIST = 0xc + IFLA_VF_BROADCAST = 0xd + IFLA_VF_VLAN_INFO_UNSPEC = 0x0 + IFLA_VF_VLAN_INFO = 0x1 + IFLA_VF_LINK_STATE_AUTO = 0x0 + IFLA_VF_LINK_STATE_ENABLE = 0x1 + IFLA_VF_LINK_STATE_DISABLE = 0x2 + IFLA_VF_STATS_RX_PACKETS = 0x0 + IFLA_VF_STATS_TX_PACKETS = 0x1 + IFLA_VF_STATS_RX_BYTES = 0x2 + IFLA_VF_STATS_TX_BYTES = 0x3 + IFLA_VF_STATS_BROADCAST = 0x4 + IFLA_VF_STATS_MULTICAST = 0x5 + IFLA_VF_STATS_PAD = 0x6 + IFLA_VF_STATS_RX_DROPPED = 0x7 + IFLA_VF_STATS_TX_DROPPED = 0x8 + IFLA_VF_PORT_UNSPEC = 0x0 + IFLA_VF_PORT = 0x1 + IFLA_PORT_UNSPEC = 0x0 + IFLA_PORT_VF = 0x1 + IFLA_PORT_PROFILE = 0x2 + IFLA_PORT_VSI_TYPE = 0x3 + IFLA_PORT_INSTANCE_UUID = 0x4 + IFLA_PORT_HOST_UUID = 0x5 + IFLA_PORT_REQUEST = 0x6 + IFLA_PORT_RESPONSE = 0x7 + IFLA_IPOIB_UNSPEC = 0x0 + IFLA_IPOIB_PKEY = 0x1 + IFLA_IPOIB_MODE = 0x2 + IFLA_IPOIB_UMCAST = 0x3 + IFLA_HSR_UNSPEC = 0x0 + IFLA_HSR_SLAVE1 = 0x1 + IFLA_HSR_SLAVE2 = 0x2 + IFLA_HSR_MULTICAST_SPEC = 0x3 + IFLA_HSR_SUPERVISION_ADDR = 0x4 + IFLA_HSR_SEQ_NR = 0x5 + IFLA_HSR_VERSION = 0x6 + IFLA_STATS_UNSPEC = 0x0 + IFLA_STATS_LINK_64 = 0x1 + IFLA_STATS_LINK_XSTATS = 0x2 + IFLA_STATS_LINK_XSTATS_SLAVE = 0x3 + IFLA_STATS_LINK_OFFLOAD_XSTATS = 0x4 + IFLA_STATS_AF_SPEC = 0x5 + IFLA_OFFLOAD_XSTATS_UNSPEC = 0x0 + IFLA_OFFLOAD_XSTATS_CPU_HIT = 0x1 + IFLA_XDP_UNSPEC = 0x0 + IFLA_XDP_FD = 0x1 + IFLA_XDP_ATTACHED = 0x2 + IFLA_XDP_FLAGS = 0x3 + IFLA_XDP_PROG_ID = 0x4 + IFLA_XDP_DRV_PROG_ID = 0x5 + IFLA_XDP_SKB_PROG_ID = 0x6 + IFLA_XDP_HW_PROG_ID = 0x7 + IFLA_XDP_EXPECTED_FD = 0x8 + IFLA_EVENT_NONE = 0x0 + IFLA_EVENT_REBOOT = 0x1 + IFLA_EVENT_FEATURES = 0x2 + IFLA_EVENT_BONDING_FAILOVER = 0x3 + IFLA_EVENT_NOTIFY_PEERS = 0x4 + IFLA_EVENT_IGMP_RESEND = 0x5 + IFLA_EVENT_BONDING_OPTIONS = 0x6 + IFLA_TUN_UNSPEC = 0x0 + IFLA_TUN_OWNER = 0x1 + IFLA_TUN_GROUP = 0x2 + IFLA_TUN_TYPE = 0x3 + IFLA_TUN_PI = 0x4 + IFLA_TUN_VNET_HDR = 0x5 + IFLA_TUN_PERSIST = 0x6 + IFLA_TUN_MULTI_QUEUE = 0x7 + IFLA_TUN_NUM_QUEUES = 0x8 + IFLA_TUN_NUM_DISABLED_QUEUES = 0x9 + IFLA_RMNET_UNSPEC = 0x0 + IFLA_RMNET_MUX_ID = 0x1 + IFLA_RMNET_FLAGS = 0x2 +) + const ( NF_INET_PRE_ROUTING = 0x0 NF_INET_LOCAL_IN = 0x1 @@ -1318,7 +1799,7 @@ const ( NFT_MSG_DELOBJ = 0x14 NFT_MSG_GETOBJ_RESET = 0x15 NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 + NFTA_LIST_UNSPEC = 0x0 NFTA_LIST_ELEM = 0x1 NFTA_HOOK_UNSPEC = 0x0 NFTA_HOOK_HOOKNUM = 0x1 @@ -1689,6 +2170,21 @@ const ( NFT_NG_RANDOM = 0x1 ) +const ( + NFTA_TARGET_UNSPEC = 0x0 + NFTA_TARGET_NAME = 0x1 + NFTA_TARGET_REV = 0x2 + NFTA_TARGET_INFO = 0x3 + NFTA_MATCH_UNSPEC = 0x0 + NFTA_MATCH_NAME = 0x1 + NFTA_MATCH_REV = 0x2 + NFTA_MATCH_INFO = 0x3 + NFTA_COMPAT_UNSPEC = 0x0 + NFTA_COMPAT_NAME = 0x1 + NFTA_COMPAT_REV = 0x2 + NFTA_COMPAT_TYPE = 0x3 +) + type RTCTime struct { Sec int32 Min int32 @@ -1742,9 +2238,12 @@ type XDPMmapOffsets struct { } type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 + Rx_dropped uint64 + Rx_invalid_descs uint64 + Tx_invalid_descs uint64 + Rx_ring_full uint64 + Rx_fill_ring_empty_descs uint64 + Tx_ring_empty_descs uint64 } type XDPDesc struct { @@ -1912,6 +2411,10 @@ const ( BPF_MAP_DELETE_BATCH = 0x1b BPF_LINK_CREATE = 0x1c BPF_LINK_UPDATE = 0x1d + BPF_LINK_GET_FD_BY_ID = 0x1e + BPF_LINK_GET_NEXT_ID = 0x1f + BPF_ENABLE_STATS = 0x20 + BPF_ITER_CREATE = 0x21 BPF_MAP_TYPE_UNSPEC = 0x0 BPF_MAP_TYPE_HASH = 0x1 BPF_MAP_TYPE_ARRAY = 0x2 @@ -1939,6 +2442,7 @@ const ( BPF_MAP_TYPE_SK_STORAGE = 0x18 BPF_MAP_TYPE_DEVMAP_HASH = 0x19 BPF_MAP_TYPE_STRUCT_OPS = 0x1a + BPF_MAP_TYPE_RINGBUF = 0x1b BPF_PROG_TYPE_UNSPEC = 0x0 BPF_PROG_TYPE_SOCKET_FILTER = 0x1 BPF_PROG_TYPE_KPROBE = 0x2 @@ -1997,6 +2501,18 @@ const ( BPF_TRACE_FEXIT = 0x19 BPF_MODIFY_RETURN = 0x1a BPF_LSM_MAC = 0x1b + BPF_TRACE_ITER = 0x1c + BPF_CGROUP_INET4_GETPEERNAME = 0x1d + BPF_CGROUP_INET6_GETPEERNAME = 0x1e + BPF_CGROUP_INET4_GETSOCKNAME = 0x1f + BPF_CGROUP_INET6_GETSOCKNAME = 0x20 + BPF_XDP_DEVMAP = 0x21 + BPF_LINK_TYPE_UNSPEC = 0x0 + BPF_LINK_TYPE_RAW_TRACEPOINT = 0x1 + BPF_LINK_TYPE_TRACING = 0x2 + BPF_LINK_TYPE_CGROUP = 0x3 + BPF_LINK_TYPE_ITER = 0x4 + BPF_LINK_TYPE_NETNS = 0x5 BPF_ANY = 0x0 BPF_NOEXIST = 0x1 BPF_EXIST = 0x2 @@ -2012,6 +2528,7 @@ const ( BPF_F_WRONLY_PROG = 0x100 BPF_F_CLONE = 0x200 BPF_F_MMAPABLE = 0x400 + BPF_STATS_RUN_TIME = 0x0 BPF_STACK_BUILD_ID_EMPTY = 0x0 BPF_STACK_BUILD_ID_VALID = 0x1 BPF_STACK_BUILD_ID_IP = 0x2 @@ -2035,16 +2552,30 @@ const ( BPF_F_CURRENT_CPU = 0xffffffff BPF_F_CTXLEN_MASK = 0xfffff00000000 BPF_F_CURRENT_NETNS = -0x1 + BPF_CSUM_LEVEL_QUERY = 0x0 + BPF_CSUM_LEVEL_INC = 0x1 + BPF_CSUM_LEVEL_DEC = 0x2 + BPF_CSUM_LEVEL_RESET = 0x3 BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_NO_CSUM_RESET = 0x20 BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_F_GET_BRANCH_RECORDS_SIZE = 0x1 + BPF_RB_NO_WAKEUP = 0x1 + BPF_RB_FORCE_WAKEUP = 0x2 + BPF_RB_AVAIL_DATA = 0x0 + BPF_RB_RING_SIZE = 0x1 + BPF_RB_CONS_POS = 0x2 + BPF_RB_PROD_POS = 0x3 + BPF_RINGBUF_BUSY_BIT = 0x80000000 + BPF_RINGBUF_DISCARD_BIT = 0x40000000 + BPF_RINGBUF_HDR_SZ = 0x8 BPF_ADJ_ROOM_NET = 0x0 BPF_ADJ_ROOM_MAC = 0x1 BPF_HDR_START_MAC = 0x0 @@ -2359,7 +2890,7 @@ const ( DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE = 0x3c DEVLINK_ATTR_PAD = 0x3d DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 0x3e - DEVLINK_ATTR_MAX = 0x90 + DEVLINK_ATTR_MAX = 0x94 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1 DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0 @@ -2417,3 +2948,36 @@ const ( NHA_GROUPS = 0x9 NHA_MASTER = 0xa ) + +const ( + CAN_RAW_FILTER = 0x1 + CAN_RAW_ERR_FILTER = 0x2 + CAN_RAW_LOOPBACK = 0x3 + CAN_RAW_RECV_OWN_MSGS = 0x4 + CAN_RAW_FD_FRAMES = 0x5 + CAN_RAW_JOIN_FILTERS = 0x6 +) + +type WatchdogInfo struct { + Options uint32 + Version uint32 + Identity [32]uint8 +} + +type PPSFData struct { + Info PPSKInfo + Timeout PPSKTime +} + +type PPSKParams struct { + Api_version int32 + Mode int32 + Assert_off_tu PPSKTime + Clear_off_tu PPSKTime +} + +type PPSKTime struct { + Sec int64 + Nsec int32 + Flags uint32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 761b67c86..d54618aa6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -117,6 +117,11 @@ type Flock_t struct { Pid int32 } +type DmNameList struct { + Dev uint64 + Next uint32 +} + const ( FADV_DONTNEED = 0x4 FADV_NOREUSE = 0x5 @@ -597,3 +602,18 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 +} + +const ( + PPS_GETPARAMS = 0x800470a1 + PPS_SETPARAMS = 0x400470a2 + PPS_GETCAP = 0x800470a3 + PPS_FETCH = 0xc00470a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 201fb3482..741d25be9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -117,6 +117,13 @@ type Flock_t struct { _ [4]byte } +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + const ( FADV_DONTNEED = 0x4 FADV_NOREUSE = 0x5 @@ -612,3 +619,19 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x800870a1 + PPS_SETPARAMS = 0x400870a2 + PPS_GETCAP = 0x800870a3 + PPS_FETCH = 0xc00870a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 8051b5610..e8d982c3d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -121,6 +121,13 @@ type Flock_t struct { _ [4]byte } +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + const ( FADV_DONTNEED = 0x4 FADV_NOREUSE = 0x5 @@ -589,3 +596,19 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]uint8 } + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x800470a1 + PPS_SETPARAMS = 0x400470a2 + PPS_GETCAP = 0x800470a3 + PPS_FETCH = 0xc00470a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index a936f2169..311cf2155 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -118,6 +118,13 @@ type Flock_t struct { _ [4]byte } +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + const ( FADV_DONTNEED = 0x4 FADV_NOREUSE = 0x5 @@ -591,3 +598,19 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x800870a1 + PPS_SETPARAMS = 0x400870a2 + PPS_GETCAP = 0x800870a3 + PPS_FETCH = 0xc00870a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index aaca03dd7..1312bdf77 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -120,6 +120,13 @@ type Flock_t struct { _ [4]byte } +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + const ( FADV_DONTNEED = 0x4 FADV_NOREUSE = 0x5 @@ -595,3 +602,19 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x400470a1 + PPS_SETPARAMS = 0x800470a2 + PPS_GETCAP = 0x400470a3 + PPS_FETCH = 0xc00470a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 2e7f3b8ca..2a9934819 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -118,6 +118,13 @@ type Flock_t struct { _ [4]byte } +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + const ( FADV_DONTNEED = 0x4 FADV_NOREUSE = 0x5 @@ -594,3 +601,19 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x400870a1 + PPS_SETPARAMS = 0x800870a2 + PPS_GETCAP = 0x400870a3 + PPS_FETCH = 0xc00870a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 16add5a25..f964307b2 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -118,6 +118,13 @@ type Flock_t struct { _ [4]byte } +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + const ( FADV_DONTNEED = 0x4 FADV_NOREUSE = 0x5 @@ -594,3 +601,19 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x400870a1 + PPS_SETPARAMS = 0x800870a2 + PPS_GETCAP = 0x400870a3 + PPS_FETCH = 0xc00870a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 4ed2c8e54..ca0fab270 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -120,6 +120,13 @@ type Flock_t struct { _ [4]byte } +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + const ( FADV_DONTNEED = 0x4 FADV_NOREUSE = 0x5 @@ -595,3 +602,19 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x400470a1 + PPS_SETPARAMS = 0x800470a2 + PPS_GETCAP = 0x400470a3 + PPS_FETCH = 0xc00470a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 741519099..257e00424 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -119,6 +119,13 @@ type Flock_t struct { _ [4]byte } +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + const ( FADV_DONTNEED = 0x4 FADV_NOREUSE = 0x5 @@ -601,3 +608,19 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]uint8 } + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x400870a1 + PPS_SETPARAMS = 0x800870a2 + PPS_GETCAP = 0x400870a3 + PPS_FETCH = 0xc00870a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 046c2debd..980dd3173 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -119,6 +119,13 @@ type Flock_t struct { _ [4]byte } +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + const ( FADV_DONTNEED = 0x4 FADV_NOREUSE = 0x5 @@ -601,3 +608,19 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]uint8 } + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x400870a1 + PPS_SETPARAMS = 0x800870a2 + PPS_GETCAP = 0x400870a3 + PPS_FETCH = 0xc00870a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 0f2f61a6a..d9fdab20b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -118,6 +118,13 @@ type Flock_t struct { _ [4]byte } +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + const ( FADV_DONTNEED = 0x4 FADV_NOREUSE = 0x5 @@ -619,3 +626,19 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]uint8 } + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x800870a1 + PPS_SETPARAMS = 0x400870a2 + PPS_GETCAP = 0x800870a3 + PPS_FETCH = 0xc00870a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index cca1b6be2..c25de8c67 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -117,6 +117,13 @@ type Flock_t struct { _ [4]byte } +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + const ( FADV_DONTNEED = 0x6 FADV_NOREUSE = 0x7 @@ -615,3 +622,19 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x800870a1 + PPS_SETPARAMS = 0x400870a2 + PPS_GETCAP = 0x800870a3 + PPS_FETCH = 0xc00870a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 33a73bf18..97fca6534 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -121,6 +121,13 @@ type Flock_t struct { _ [2]byte } +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + const ( FADV_DONTNEED = 0x4 FADV_NOREUSE = 0x5 @@ -596,3 +603,19 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x400870a1 + PPS_SETPARAMS = 0x800870a2 + PPS_GETCAP = 0x400870a3 + PPS_FETCH = 0xc00870a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go new file mode 100644 index 000000000..992a1f8c0 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -0,0 +1,565 @@ +// cgo -godefs -- -fsigned-char types_openbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build mips64,openbsd + +package unix + +const ( + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Mode uint32 + Dev int32 + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev int32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + _ Timespec +} + +type Statfs_t struct { + F_flags uint32 + F_bsize uint32 + F_iosize uint32 + F_blocks uint64 + F_bfree uint64 + F_bavail int64 + F_files uint64 + F_ffree uint64 + F_favail int64 + F_syncwrites uint64 + F_syncreads uint64 + F_asyncwrites uint64 + F_asyncreads uint64 + F_fsid Fsid + F_namemax uint32 + F_owner uint32 + F_ctime uint64 + F_fstypename [16]int8 + F_mntonname [90]int8 + F_mntfromname [90]int8 + F_mntfromspec [90]int8 + _ [2]byte + Mount_info [160]byte +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Dirent struct { + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Namlen uint8 + _ [4]uint8 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +const ( + PathMax = 0x400 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [24]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x20 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint64 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte +} + +type FdSet struct { + Bits [32]uint32 +} + +const ( + SizeofIfMsghdr = 0xa8 + SizeofIfData = 0x90 + SizeofIfaMsghdr = 0x18 + SizeofIfAnnounceMsghdr = 0x1a + SizeofRtMsghdr = 0x60 + SizeofRtMetrics = 0x38 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Xflags int32 + Data IfData +} + +type IfData struct { + Type uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Mtu uint32 + Metric uint32 + Rdomain uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Oqdrops uint64 + Noproto uint64 + Capabilities uint32 + Lastchange Timeval +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Metric int32 +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + What uint16 + Name [16]int8 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Priority uint8 + Mpls uint8 + Addrs int32 + Flags int32 + Fmask int32 + Pid int32 + Seq int32 + Errno int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Pksent uint64 + Expire int64 + Locks uint32 + Mtu uint32 + Refcnt uint32 + Hopcount uint32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pad uint32 +} + +type Mclpool struct{} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [2]byte +} + +type BpfTimeval struct { + Sec uint32 + Usec uint32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x64 + AT_SYMLINK_FOLLOW = 0x4 + AT_SYMLINK_NOFOLLOW = 0x2 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type Sigset_t uint32 + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} + +const SizeofUvmexp = 0x158 + +type Uvmexp struct { + Pagesize int32 + Pagemask int32 + Pageshift int32 + Npages int32 + Free int32 + Active int32 + Inactive int32 + Paging int32 + Wired int32 + Zeropages int32 + Reserve_pagedaemon int32 + Reserve_kernel int32 + Unused01 int32 + Vnodepages int32 + Vtextpages int32 + Freemin int32 + Freetarg int32 + Inactarg int32 + Wiredmax int32 + Anonmin int32 + Vtextmin int32 + Vnodemin int32 + Anonminpct int32 + Vtextminpct int32 + Vnodeminpct int32 + Nswapdev int32 + Swpages int32 + Swpginuse int32 + Swpgonly int32 + Nswget int32 + Nanon int32 + Unused05 int32 + Unused06 int32 + Faults int32 + Traps int32 + Intrs int32 + Swtch int32 + Softs int32 + Syscalls int32 + Pageins int32 + Unused07 int32 + Unused08 int32 + Pgswapin int32 + Pgswapout int32 + Forks int32 + Forks_ppwait int32 + Forks_sharevm int32 + Pga_zerohit int32 + Pga_zeromiss int32 + Unused09 int32 + Fltnoram int32 + Fltnoanon int32 + Fltnoamap int32 + Fltpgwait int32 + Fltpgrele int32 + Fltrelck int32 + Fltrelckok int32 + Fltanget int32 + Fltanretry int32 + Fltamcopy int32 + Fltnamap int32 + Fltnomap int32 + Fltlget int32 + Fltget int32 + Flt_anon int32 + Flt_acow int32 + Flt_obj int32 + Flt_prcopy int32 + Flt_przero int32 + Pdwoke int32 + Pdrevs int32 + Pdswout int32 + Pdfreed int32 + Pdscans int32 + Pdanscan int32 + Pdobscan int32 + Pdreact int32 + Pdbusy int32 + Pdpageouts int32 + Pdpending int32 + Pddeact int32 + Unused11 int32 + Unused12 int32 + Unused13 int32 + Fpswtch int32 + Kmapent int32 +} + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Tickadj int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index 23ed9fe51..db817f3ba 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -88,7 +88,6 @@ type Stat_t struct { Mtim Timespec Ctim Timespec Blksize int32 - _ [4]byte Blocks int64 Fstype [16]int8 } @@ -96,7 +95,6 @@ type Stat_t struct { type Flock_t struct { Type int16 Whence int16 - _ [4]byte Start int64 Len int64 Sysid int32 @@ -138,12 +136,12 @@ type RawSockaddrInet4 struct { } type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 - X__sin6_src_id uint32 + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + _ uint32 } type RawSockaddrUnix struct { @@ -196,10 +194,8 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - _ [4]byte Iov *Iovec Iovlen int32 - _ [4]byte Accrights *int8 Accrightslen int32 _ [4]byte @@ -228,7 +224,7 @@ type IPv6MTUInfo struct { } type ICMPv6Filter struct { - X__icmp6_filt [8]uint32 + Filt [8]uint32 } const ( @@ -291,7 +287,6 @@ type IfMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Data IfData } @@ -299,7 +294,6 @@ type IfData struct { Type uint8 Addrlen uint8 Hdrlen uint8 - _ [1]byte Mtu uint32 Metric uint32 Baudrate uint32 @@ -324,7 +318,6 @@ type IfaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Metric int32 } @@ -333,7 +326,6 @@ type RtMsghdr struct { Version uint8 Type uint8 Index uint16 - _ [2]byte Flags int32 Addrs int32 Pid int32 @@ -371,15 +363,14 @@ type BpfVersion struct { } type BpfStat struct { - Recv uint64 - Drop uint64 - Capt uint64 - Padding [13]uint64 + Recv uint64 + Drop uint64 + Capt uint64 + _ [13]uint64 } type BpfProgram struct { Len uint32 - _ [4]byte Insns *BpfInsn } diff --git a/vendor/golang.org/x/sys/windows/memory_windows.go b/vendor/golang.org/x/sys/windows/memory_windows.go index e409d76f0..1adb60739 100644 --- a/vendor/golang.org/x/sys/windows/memory_windows.go +++ b/vendor/golang.org/x/sys/windows/memory_windows.go @@ -16,13 +16,19 @@ const ( MEM_RESET_UNDO = 0x01000000 MEM_LARGE_PAGES = 0x20000000 - PAGE_NOACCESS = 0x01 - PAGE_READONLY = 0x02 - PAGE_READWRITE = 0x04 - PAGE_WRITECOPY = 0x08 - PAGE_EXECUTE_READ = 0x20 - PAGE_EXECUTE_READWRITE = 0x40 - PAGE_EXECUTE_WRITECOPY = 0x80 + PAGE_NOACCESS = 0x00000001 + PAGE_READONLY = 0x00000002 + PAGE_READWRITE = 0x00000004 + PAGE_WRITECOPY = 0x00000008 + PAGE_EXECUTE = 0x00000010 + PAGE_EXECUTE_READ = 0x00000020 + PAGE_EXECUTE_READWRITE = 0x00000040 + PAGE_EXECUTE_WRITECOPY = 0x00000080 + PAGE_GUARD = 0x00000100 + PAGE_NOCACHE = 0x00000200 + PAGE_WRITECOMBINE = 0x00000400 + PAGE_TARGETS_INVALID = 0x40000000 + PAGE_TARGETS_NO_UPDATE = 0x40000000 QUOTA_LIMITS_HARDWS_MIN_DISABLE = 0x00000002 QUOTA_LIMITS_HARDWS_MIN_ENABLE = 0x00000001 diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go index 847e00bc9..f54ff90aa 100644 --- a/vendor/golang.org/x/sys/windows/service.go +++ b/vendor/golang.org/x/sys/windows/service.go @@ -65,6 +65,7 @@ const ( SERVICE_ACCEPT_HARDWAREPROFILECHANGE = 32 SERVICE_ACCEPT_POWEREVENT = 64 SERVICE_ACCEPT_SESSIONCHANGE = 128 + SERVICE_ACCEPT_PRESHUTDOWN = 256 SERVICE_CONTROL_STOP = 1 SERVICE_CONTROL_PAUSE = 2 @@ -80,6 +81,7 @@ const ( SERVICE_CONTROL_HARDWAREPROFILECHANGE = 12 SERVICE_CONTROL_POWEREVENT = 13 SERVICE_CONTROL_SESSIONCHANGE = 14 + SERVICE_CONTROL_PRESHUTDOWN = 15 SERVICE_ACTIVE = 1 SERVICE_INACTIVE = 2 diff --git a/vendor/golang.org/x/sys/windows/setupapierrors_windows.go b/vendor/golang.org/x/sys/windows/setupapierrors_windows.go new file mode 100644 index 000000000..1681810e0 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/setupapierrors_windows.go @@ -0,0 +1,100 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import "syscall" + +const ( + ERROR_EXPECTED_SECTION_NAME syscall.Errno = 0x20000000 | 0xC0000000 | 0 + ERROR_BAD_SECTION_NAME_LINE syscall.Errno = 0x20000000 | 0xC0000000 | 1 + ERROR_SECTION_NAME_TOO_LONG syscall.Errno = 0x20000000 | 0xC0000000 | 2 + ERROR_GENERAL_SYNTAX syscall.Errno = 0x20000000 | 0xC0000000 | 3 + ERROR_WRONG_INF_STYLE syscall.Errno = 0x20000000 | 0xC0000000 | 0x100 + ERROR_SECTION_NOT_FOUND syscall.Errno = 0x20000000 | 0xC0000000 | 0x101 + ERROR_LINE_NOT_FOUND syscall.Errno = 0x20000000 | 0xC0000000 | 0x102 + ERROR_NO_BACKUP syscall.Errno = 0x20000000 | 0xC0000000 | 0x103 + ERROR_NO_ASSOCIATED_CLASS syscall.Errno = 0x20000000 | 0xC0000000 | 0x200 + ERROR_CLASS_MISMATCH syscall.Errno = 0x20000000 | 0xC0000000 | 0x201 + ERROR_DUPLICATE_FOUND syscall.Errno = 0x20000000 | 0xC0000000 | 0x202 + ERROR_NO_DRIVER_SELECTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x203 + ERROR_KEY_DOES_NOT_EXIST syscall.Errno = 0x20000000 | 0xC0000000 | 0x204 + ERROR_INVALID_DEVINST_NAME syscall.Errno = 0x20000000 | 0xC0000000 | 0x205 + ERROR_INVALID_CLASS syscall.Errno = 0x20000000 | 0xC0000000 | 0x206 + ERROR_DEVINST_ALREADY_EXISTS syscall.Errno = 0x20000000 | 0xC0000000 | 0x207 + ERROR_DEVINFO_NOT_REGISTERED syscall.Errno = 0x20000000 | 0xC0000000 | 0x208 + ERROR_INVALID_REG_PROPERTY syscall.Errno = 0x20000000 | 0xC0000000 | 0x209 + ERROR_NO_INF syscall.Errno = 0x20000000 | 0xC0000000 | 0x20A + ERROR_NO_SUCH_DEVINST syscall.Errno = 0x20000000 | 0xC0000000 | 0x20B + ERROR_CANT_LOAD_CLASS_ICON syscall.Errno = 0x20000000 | 0xC0000000 | 0x20C + ERROR_INVALID_CLASS_INSTALLER syscall.Errno = 0x20000000 | 0xC0000000 | 0x20D + ERROR_DI_DO_DEFAULT syscall.Errno = 0x20000000 | 0xC0000000 | 0x20E + ERROR_DI_NOFILECOPY syscall.Errno = 0x20000000 | 0xC0000000 | 0x20F + ERROR_INVALID_HWPROFILE syscall.Errno = 0x20000000 | 0xC0000000 | 0x210 + ERROR_NO_DEVICE_SELECTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x211 + ERROR_DEVINFO_LIST_LOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x212 + ERROR_DEVINFO_DATA_LOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x213 + ERROR_DI_BAD_PATH syscall.Errno = 0x20000000 | 0xC0000000 | 0x214 + ERROR_NO_CLASSINSTALL_PARAMS syscall.Errno = 0x20000000 | 0xC0000000 | 0x215 + ERROR_FILEQUEUE_LOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x216 + ERROR_BAD_SERVICE_INSTALLSECT syscall.Errno = 0x20000000 | 0xC0000000 | 0x217 + ERROR_NO_CLASS_DRIVER_LIST syscall.Errno = 0x20000000 | 0xC0000000 | 0x218 + ERROR_NO_ASSOCIATED_SERVICE syscall.Errno = 0x20000000 | 0xC0000000 | 0x219 + ERROR_NO_DEFAULT_DEVICE_INTERFACE syscall.Errno = 0x20000000 | 0xC0000000 | 0x21A + ERROR_DEVICE_INTERFACE_ACTIVE syscall.Errno = 0x20000000 | 0xC0000000 | 0x21B + ERROR_DEVICE_INTERFACE_REMOVED syscall.Errno = 0x20000000 | 0xC0000000 | 0x21C + ERROR_BAD_INTERFACE_INSTALLSECT syscall.Errno = 0x20000000 | 0xC0000000 | 0x21D + ERROR_NO_SUCH_INTERFACE_CLASS syscall.Errno = 0x20000000 | 0xC0000000 | 0x21E + ERROR_INVALID_REFERENCE_STRING syscall.Errno = 0x20000000 | 0xC0000000 | 0x21F + ERROR_INVALID_MACHINENAME syscall.Errno = 0x20000000 | 0xC0000000 | 0x220 + ERROR_REMOTE_COMM_FAILURE syscall.Errno = 0x20000000 | 0xC0000000 | 0x221 + ERROR_MACHINE_UNAVAILABLE syscall.Errno = 0x20000000 | 0xC0000000 | 0x222 + ERROR_NO_CONFIGMGR_SERVICES syscall.Errno = 0x20000000 | 0xC0000000 | 0x223 + ERROR_INVALID_PROPPAGE_PROVIDER syscall.Errno = 0x20000000 | 0xC0000000 | 0x224 + ERROR_NO_SUCH_DEVICE_INTERFACE syscall.Errno = 0x20000000 | 0xC0000000 | 0x225 + ERROR_DI_POSTPROCESSING_REQUIRED syscall.Errno = 0x20000000 | 0xC0000000 | 0x226 + ERROR_INVALID_COINSTALLER syscall.Errno = 0x20000000 | 0xC0000000 | 0x227 + ERROR_NO_COMPAT_DRIVERS syscall.Errno = 0x20000000 | 0xC0000000 | 0x228 + ERROR_NO_DEVICE_ICON syscall.Errno = 0x20000000 | 0xC0000000 | 0x229 + ERROR_INVALID_INF_LOGCONFIG syscall.Errno = 0x20000000 | 0xC0000000 | 0x22A + ERROR_DI_DONT_INSTALL syscall.Errno = 0x20000000 | 0xC0000000 | 0x22B + ERROR_INVALID_FILTER_DRIVER syscall.Errno = 0x20000000 | 0xC0000000 | 0x22C + ERROR_NON_WINDOWS_NT_DRIVER syscall.Errno = 0x20000000 | 0xC0000000 | 0x22D + ERROR_NON_WINDOWS_DRIVER syscall.Errno = 0x20000000 | 0xC0000000 | 0x22E + ERROR_NO_CATALOG_FOR_OEM_INF syscall.Errno = 0x20000000 | 0xC0000000 | 0x22F + ERROR_DEVINSTALL_QUEUE_NONNATIVE syscall.Errno = 0x20000000 | 0xC0000000 | 0x230 + ERROR_NOT_DISABLEABLE syscall.Errno = 0x20000000 | 0xC0000000 | 0x231 + ERROR_CANT_REMOVE_DEVINST syscall.Errno = 0x20000000 | 0xC0000000 | 0x232 + ERROR_INVALID_TARGET syscall.Errno = 0x20000000 | 0xC0000000 | 0x233 + ERROR_DRIVER_NONNATIVE syscall.Errno = 0x20000000 | 0xC0000000 | 0x234 + ERROR_IN_WOW64 syscall.Errno = 0x20000000 | 0xC0000000 | 0x235 + ERROR_SET_SYSTEM_RESTORE_POINT syscall.Errno = 0x20000000 | 0xC0000000 | 0x236 + ERROR_SCE_DISABLED syscall.Errno = 0x20000000 | 0xC0000000 | 0x238 + ERROR_UNKNOWN_EXCEPTION syscall.Errno = 0x20000000 | 0xC0000000 | 0x239 + ERROR_PNP_REGISTRY_ERROR syscall.Errno = 0x20000000 | 0xC0000000 | 0x23A + ERROR_REMOTE_REQUEST_UNSUPPORTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x23B + ERROR_NOT_AN_INSTALLED_OEM_INF syscall.Errno = 0x20000000 | 0xC0000000 | 0x23C + ERROR_INF_IN_USE_BY_DEVICES syscall.Errno = 0x20000000 | 0xC0000000 | 0x23D + ERROR_DI_FUNCTION_OBSOLETE syscall.Errno = 0x20000000 | 0xC0000000 | 0x23E + ERROR_NO_AUTHENTICODE_CATALOG syscall.Errno = 0x20000000 | 0xC0000000 | 0x23F + ERROR_AUTHENTICODE_DISALLOWED syscall.Errno = 0x20000000 | 0xC0000000 | 0x240 + ERROR_AUTHENTICODE_TRUSTED_PUBLISHER syscall.Errno = 0x20000000 | 0xC0000000 | 0x241 + ERROR_AUTHENTICODE_TRUST_NOT_ESTABLISHED syscall.Errno = 0x20000000 | 0xC0000000 | 0x242 + ERROR_AUTHENTICODE_PUBLISHER_NOT_TRUSTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x243 + ERROR_SIGNATURE_OSATTRIBUTE_MISMATCH syscall.Errno = 0x20000000 | 0xC0000000 | 0x244 + ERROR_ONLY_VALIDATE_VIA_AUTHENTICODE syscall.Errno = 0x20000000 | 0xC0000000 | 0x245 + ERROR_DEVICE_INSTALLER_NOT_READY syscall.Errno = 0x20000000 | 0xC0000000 | 0x246 + ERROR_DRIVER_STORE_ADD_FAILED syscall.Errno = 0x20000000 | 0xC0000000 | 0x247 + ERROR_DEVICE_INSTALL_BLOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x248 + ERROR_DRIVER_INSTALL_BLOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x249 + ERROR_WRONG_INF_TYPE syscall.Errno = 0x20000000 | 0xC0000000 | 0x24A + ERROR_FILE_HASH_NOT_IN_CATALOG syscall.Errno = 0x20000000 | 0xC0000000 | 0x24B + ERROR_DRIVER_STORE_DELETE_FAILED syscall.Errno = 0x20000000 | 0xC0000000 | 0x24C + ERROR_UNRECOVERABLE_STACK_OVERFLOW syscall.Errno = 0x20000000 | 0xC0000000 | 0x300 + EXCEPTION_SPAPI_UNRECOVERABLE_STACK_OVERFLOW syscall.Errno = ERROR_UNRECOVERABLE_STACK_OVERFLOW + ERROR_NO_DEFAULT_INTERFACE_DEVICE syscall.Errno = ERROR_NO_DEFAULT_DEVICE_INTERFACE + ERROR_INTERFACE_DEVICE_ACTIVE syscall.Errno = ERROR_DEVICE_INTERFACE_ACTIVE + ERROR_INTERFACE_DEVICE_REMOVED syscall.Errno = ERROR_DEVICE_INTERFACE_REMOVED + ERROR_NO_SUCH_INTERFACE_DEVICE syscall.Errno = ERROR_NO_SUCH_DEVICE_INTERFACE +) diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go index af828a91b..6122f557a 100644 --- a/vendor/golang.org/x/sys/windows/syscall.go +++ b/vendor/golang.org/x/sys/windows/syscall.go @@ -25,17 +25,20 @@ package windows // import "golang.org/x/sys/windows" import ( + "bytes" + "strings" "syscall" + "unsafe" + + "golang.org/x/sys/internal/unsafeheader" ) // ByteSliceFromString returns a NUL-terminated slice of bytes // containing the text of s. If s contains a NUL byte at any // location, it returns (nil, syscall.EINVAL). func ByteSliceFromString(s string) ([]byte, error) { - for i := 0; i < len(s); i++ { - if s[i] == 0 { - return nil, syscall.EINVAL - } + if strings.IndexByte(s, 0) != -1 { + return nil, syscall.EINVAL } a := make([]byte, len(s)+1) copy(a, s) @@ -53,6 +56,41 @@ func BytePtrFromString(s string) (*byte, error) { return &a[0], nil } +// ByteSliceToString returns a string form of the text represented by the slice s, with a terminating NUL and any +// bytes after the NUL removed. +func ByteSliceToString(s []byte) string { + if i := bytes.IndexByte(s, 0); i != -1 { + s = s[:i] + } + return string(s) +} + +// BytePtrToString takes a pointer to a sequence of text and returns the corresponding string. +// If the pointer is nil, it returns the empty string. It assumes that the text sequence is terminated +// at a zero byte; if the zero byte is not present, the program may crash. +func BytePtrToString(p *byte) string { + if p == nil { + return "" + } + if *p == 0 { + return "" + } + + // Find NUL terminator. + n := 0 + for ptr := unsafe.Pointer(p); *(*byte)(ptr) != 0; n++ { + ptr = unsafe.Pointer(uintptr(ptr) + 1) + } + + var s []byte + h := (*unsafeheader.Slice)(unsafe.Pointer(&s)) + h.Data = unsafe.Pointer(p) + h.Len = n + h.Cap = n + + return string(s) +} + // Single-word zero for use when we need a valid pointer to 0 bytes. // See mksyscall.pl. var _zero uintptr diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 62cf70e9f..008ffc11a 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -92,11 +92,11 @@ func UTF16FromString(s string) ([]uint16, error) { } // UTF16ToString returns the UTF-8 encoding of the UTF-16 sequence s, -// with a terminating NUL removed. +// with a terminating NUL and any bytes after the NUL removed. func UTF16ToString(s []uint16) string { for i, v := range s { if v == 0 { - s = s[0:i] + s = s[:i] break } } @@ -120,7 +120,7 @@ func UTF16PtrFromString(s string) (*uint16, error) { } // UTF16PtrToString takes a pointer to a UTF-16 sequence and returns the corresponding UTF-8 encoded string. -// If the pointer is nil, this returns the empty string. This assumes that the UTF-16 sequence is terminated +// If the pointer is nil, it returns the empty string. It assumes that the UTF-16 sequence is terminated // at a zero word; if the zero word is not present, the program may crash. func UTF16PtrToString(p *uint16) string { if p == nil { @@ -259,6 +259,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) [failretval==nil] = crypt32.CertEnumCertificatesInStore //sys CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) = crypt32.CertAddCertificateContextToStore //sys CertCloseStore(store Handle, flags uint32) (err error) = crypt32.CertCloseStore +//sys CertDeleteCertificateFromStore(certContext *CertContext) (err error) = crypt32.CertDeleteCertificateFromStore //sys CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) = crypt32.CertGetCertificateChain //sys CertFreeCertificateChain(ctx *CertChainContext) = crypt32.CertFreeCertificateChain //sys CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) [failretval==nil] = crypt32.CertCreateCertificateContext @@ -270,9 +271,11 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegEnumKeyExW //sys RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegQueryValueExW //sys GetCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId +//sys ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) = kernel32.ProcessIdToSessionId //sys GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo +//sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot @@ -303,6 +306,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys ResumeThread(thread Handle) (ret uint32, err error) [failretval==0xffffffff] = kernel32.ResumeThread //sys SetPriorityClass(process Handle, priorityClass uint32) (err error) = kernel32.SetPriorityClass //sys GetPriorityClass(process Handle) (ret uint32, err error) = kernel32.GetPriorityClass +//sys QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) = kernel32.QueryInformationJobObject //sys SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) //sys GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) //sys GetProcessId(process Handle) (id uint32, err error) @@ -347,6 +351,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetThreadPreferredUILanguages //sys getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetUserPreferredUILanguages //sys getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetSystemPreferredUILanguages +//sys GetFinalPathNameByHandleW(file syscall.Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) = kernel32.GetFinalPathNameByHandleW // Process Status API (PSAPI) //sys EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses @@ -387,11 +392,7 @@ func GetProcAddressByOrdinal(module Handle, ordinal uintptr) (proc uintptr, err r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), ordinal, 0) proc = uintptr(r0) if proc == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } @@ -1088,11 +1089,7 @@ func WSASendMsg(fd Handle, msg *WSAMsg, flags uint32, bytesSent *uint32, overlap } r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.sendAddr, 6, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(flags), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return err } @@ -1104,11 +1101,7 @@ func WSARecvMsg(fd Handle, msg *WSAMsg, bytesReceived *uint32, overlapped *Overl } r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.recvAddr, 5, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(bytesReceived)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0) if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return err } @@ -1487,3 +1480,7 @@ func getUILanguages(flags uint32, f func(flags uint32, numLanguages *uint32, buf return languages, nil } } + +func SetConsoleCursorPosition(console Handle, position Coord) error { + return setConsoleCursorPosition(console, *((*uint32)(unsafe.Pointer(&position)))) +} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 809fff0b4..da1652e74 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1584,18 +1584,6 @@ const ( JOB_OBJECT_LIMIT_WORKINGSET = 0x00000001 ) -type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { - PerProcessUserTimeLimit int64 - PerJobUserTimeLimit int64 - LimitFlags uint32 - MinimumWorkingSetSize uintptr - MaximumWorkingSetSize uintptr - ActiveProcessLimit uint32 - Affinity uintptr - PriorityClass uint32 - SchedulingClass uint32 -} - type IO_COUNTERS struct { ReadOperationCount uint64 WriteOperationCount uint64 diff --git a/vendor/golang.org/x/sys/windows/types_windows_386.go b/vendor/golang.org/x/sys/windows/types_windows_386.go index fe0ddd031..8bce3e2fc 100644 --- a/vendor/golang.org/x/sys/windows/types_windows_386.go +++ b/vendor/golang.org/x/sys/windows/types_windows_386.go @@ -20,3 +20,16 @@ type Servent struct { Port uint16 Proto *byte } + +type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { + PerProcessUserTimeLimit int64 + PerJobUserTimeLimit int64 + LimitFlags uint32 + MinimumWorkingSetSize uintptr + MaximumWorkingSetSize uintptr + ActiveProcessLimit uint32 + Affinity uintptr + PriorityClass uint32 + SchedulingClass uint32 + _ uint32 // pad to 8 byte boundary +} diff --git a/vendor/golang.org/x/sys/windows/types_windows_amd64.go b/vendor/golang.org/x/sys/windows/types_windows_amd64.go index 7e154c2df..fdddc0c70 100644 --- a/vendor/golang.org/x/sys/windows/types_windows_amd64.go +++ b/vendor/golang.org/x/sys/windows/types_windows_amd64.go @@ -20,3 +20,15 @@ type Servent struct { Proto *byte Port uint16 } + +type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { + PerProcessUserTimeLimit int64 + PerJobUserTimeLimit int64 + LimitFlags uint32 + MinimumWorkingSetSize uintptr + MaximumWorkingSetSize uintptr + ActiveProcessLimit uint32 + Affinity uintptr + PriorityClass uint32 + SchedulingClass uint32 +} diff --git a/vendor/golang.org/x/sys/windows/types_windows_arm.go b/vendor/golang.org/x/sys/windows/types_windows_arm.go index 74571e360..321872c3e 100644 --- a/vendor/golang.org/x/sys/windows/types_windows_arm.go +++ b/vendor/golang.org/x/sys/windows/types_windows_arm.go @@ -20,3 +20,16 @@ type Servent struct { Port uint16 Proto *byte } + +type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { + PerProcessUserTimeLimit int64 + PerJobUserTimeLimit int64 + LimitFlags uint32 + MinimumWorkingSetSize uintptr + MaximumWorkingSetSize uintptr + ActiveProcessLimit uint32 + Affinity uintptr + PriorityClass uint32 + SchedulingClass uint32 + _ uint32 // pad to 8 byte boundary +} diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 8a562feed..d400c3512 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -17,6 +17,7 @@ const ( var ( errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL ) // errnoErr returns common boxed Errno values, to prevent @@ -24,7 +25,7 @@ var ( func errnoErr(e syscall.Errno) error { switch e { case 0: - return nil + return errERROR_EINVAL case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } @@ -36,2065 +37,1605 @@ func errnoErr(e syscall.Errno) error { var ( modadvapi32 = NewLazySystemDLL("advapi32.dll") + modcrypt32 = NewLazySystemDLL("crypt32.dll") + moddnsapi = NewLazySystemDLL("dnsapi.dll") + modiphlpapi = NewLazySystemDLL("iphlpapi.dll") modkernel32 = NewLazySystemDLL("kernel32.dll") - modshell32 = NewLazySystemDLL("shell32.dll") - moduserenv = NewLazySystemDLL("userenv.dll") modmswsock = NewLazySystemDLL("mswsock.dll") - modcrypt32 = NewLazySystemDLL("crypt32.dll") - moduser32 = NewLazySystemDLL("user32.dll") - modole32 = NewLazySystemDLL("ole32.dll") + modnetapi32 = NewLazySystemDLL("netapi32.dll") modntdll = NewLazySystemDLL("ntdll.dll") + modole32 = NewLazySystemDLL("ole32.dll") modpsapi = NewLazySystemDLL("psapi.dll") - modws2_32 = NewLazySystemDLL("ws2_32.dll") - moddnsapi = NewLazySystemDLL("dnsapi.dll") - modiphlpapi = NewLazySystemDLL("iphlpapi.dll") modsecur32 = NewLazySystemDLL("secur32.dll") - modnetapi32 = NewLazySystemDLL("netapi32.dll") + modshell32 = NewLazySystemDLL("shell32.dll") + moduser32 = NewLazySystemDLL("user32.dll") + moduserenv = NewLazySystemDLL("userenv.dll") + modws2_32 = NewLazySystemDLL("ws2_32.dll") modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") - procRegisterEventSourceW = modadvapi32.NewProc("RegisterEventSourceW") - procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") - procReportEventW = modadvapi32.NewProc("ReportEventW") - procOpenSCManagerW = modadvapi32.NewProc("OpenSCManagerW") + procAdjustTokenGroups = modadvapi32.NewProc("AdjustTokenGroups") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid") + procBuildSecurityDescriptorW = modadvapi32.NewProc("BuildSecurityDescriptorW") + procChangeServiceConfig2W = modadvapi32.NewProc("ChangeServiceConfig2W") + procChangeServiceConfigW = modadvapi32.NewProc("ChangeServiceConfigW") + procCheckTokenMembership = modadvapi32.NewProc("CheckTokenMembership") procCloseServiceHandle = modadvapi32.NewProc("CloseServiceHandle") + procControlService = modadvapi32.NewProc("ControlService") + procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") + procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") + procCopySid = modadvapi32.NewProc("CopySid") procCreateServiceW = modadvapi32.NewProc("CreateServiceW") - procOpenServiceW = modadvapi32.NewProc("OpenServiceW") + procCreateWellKnownSid = modadvapi32.NewProc("CreateWellKnownSid") + procCryptAcquireContextW = modadvapi32.NewProc("CryptAcquireContextW") + procCryptGenRandom = modadvapi32.NewProc("CryptGenRandom") + procCryptReleaseContext = modadvapi32.NewProc("CryptReleaseContext") procDeleteService = modadvapi32.NewProc("DeleteService") - procStartServiceW = modadvapi32.NewProc("StartServiceW") - procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") - procQueryServiceLockStatusW = modadvapi32.NewProc("QueryServiceLockStatusW") - procControlService = modadvapi32.NewProc("ControlService") - procStartServiceCtrlDispatcherW = modadvapi32.NewProc("StartServiceCtrlDispatcherW") - procSetServiceStatus = modadvapi32.NewProc("SetServiceStatus") - procChangeServiceConfigW = modadvapi32.NewProc("ChangeServiceConfigW") - procQueryServiceConfigW = modadvapi32.NewProc("QueryServiceConfigW") - procChangeServiceConfig2W = modadvapi32.NewProc("ChangeServiceConfig2W") - procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W") + procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") + procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") - procQueryServiceStatusEx = modadvapi32.NewProc("QueryServiceStatusEx") + procEqualSid = modadvapi32.NewProc("EqualSid") + procFreeSid = modadvapi32.NewProc("FreeSid") + procGetLengthSid = modadvapi32.NewProc("GetLengthSid") + procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") + procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") + procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") + procGetSecurityDescriptorGroup = modadvapi32.NewProc("GetSecurityDescriptorGroup") + procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") + procGetSecurityDescriptorOwner = modadvapi32.NewProc("GetSecurityDescriptorOwner") + procGetSecurityDescriptorRMControl = modadvapi32.NewProc("GetSecurityDescriptorRMControl") + procGetSecurityDescriptorSacl = modadvapi32.NewProc("GetSecurityDescriptorSacl") + procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo") + procGetSidIdentifierAuthority = modadvapi32.NewProc("GetSidIdentifierAuthority") + procGetSidSubAuthority = modadvapi32.NewProc("GetSidSubAuthority") + procGetSidSubAuthorityCount = modadvapi32.NewProc("GetSidSubAuthorityCount") + procGetTokenInformation = modadvapi32.NewProc("GetTokenInformation") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procInitializeSecurityDescriptor = modadvapi32.NewProc("InitializeSecurityDescriptor") + procInitiateSystemShutdownExW = modadvapi32.NewProc("InitiateSystemShutdownExW") + procIsValidSecurityDescriptor = modadvapi32.NewProc("IsValidSecurityDescriptor") + procIsValidSid = modadvapi32.NewProc("IsValidSid") + procIsWellKnownSid = modadvapi32.NewProc("IsWellKnownSid") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procMakeAbsoluteSD = modadvapi32.NewProc("MakeAbsoluteSD") + procMakeSelfRelativeSD = modadvapi32.NewProc("MakeSelfRelativeSD") procNotifyServiceStatusChangeW = modadvapi32.NewProc("NotifyServiceStatusChangeW") - procGetLastError = modkernel32.NewProc("GetLastError") - procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") - procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") - procFreeLibrary = modkernel32.NewProc("FreeLibrary") - procGetProcAddress = modkernel32.NewProc("GetProcAddress") - procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") - procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") - procGetVersion = modkernel32.NewProc("GetVersion") - procFormatMessageW = modkernel32.NewProc("FormatMessageW") - procExitProcess = modkernel32.NewProc("ExitProcess") - procIsWow64Process = modkernel32.NewProc("IsWow64Process") - procCreateFileW = modkernel32.NewProc("CreateFileW") - procReadFile = modkernel32.NewProc("ReadFile") - procWriteFile = modkernel32.NewProc("WriteFile") - procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") - procSetFilePointer = modkernel32.NewProc("SetFilePointer") - procCloseHandle = modkernel32.NewProc("CloseHandle") - procGetStdHandle = modkernel32.NewProc("GetStdHandle") - procSetStdHandle = modkernel32.NewProc("SetStdHandle") - procFindFirstFileW = modkernel32.NewProc("FindFirstFileW") - procFindNextFileW = modkernel32.NewProc("FindNextFileW") - procFindClose = modkernel32.NewProc("FindClose") - procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") - procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") - procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") - procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") - procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") - procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") - procDeleteFileW = modkernel32.NewProc("DeleteFileW") - procMoveFileW = modkernel32.NewProc("MoveFileW") - procMoveFileExW = modkernel32.NewProc("MoveFileExW") - procLockFileEx = modkernel32.NewProc("LockFileEx") - procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") - procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") - procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") - procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") - procGetSystemTimeAsFileTime = modkernel32.NewProc("GetSystemTimeAsFileTime") - procGetSystemTimePreciseAsFileTime = modkernel32.NewProc("GetSystemTimePreciseAsFileTime") - procGetTimeZoneInformation = modkernel32.NewProc("GetTimeZoneInformation") - procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") - procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") - procPostQueuedCompletionStatus = modkernel32.NewProc("PostQueuedCompletionStatus") - procCancelIo = modkernel32.NewProc("CancelIo") - procCancelIoEx = modkernel32.NewProc("CancelIoEx") - procCreateProcessW = modkernel32.NewProc("CreateProcessW") - procOpenProcess = modkernel32.NewProc("OpenProcess") - procShellExecuteW = modshell32.NewProc("ShellExecuteW") - procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") - procTerminateProcess = modkernel32.NewProc("TerminateProcess") - procGetExitCodeProcess = modkernel32.NewProc("GetExitCodeProcess") - procGetStartupInfoW = modkernel32.NewProc("GetStartupInfoW") - procGetProcessTimes = modkernel32.NewProc("GetProcessTimes") - procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") - procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") - procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") - procGetTempPathW = modkernel32.NewProc("GetTempPathW") - procCreatePipe = modkernel32.NewProc("CreatePipe") - procGetFileType = modkernel32.NewProc("GetFileType") - procCryptAcquireContextW = modadvapi32.NewProc("CryptAcquireContextW") - procCryptReleaseContext = modadvapi32.NewProc("CryptReleaseContext") - procCryptGenRandom = modadvapi32.NewProc("CryptGenRandom") - procGetEnvironmentStringsW = modkernel32.NewProc("GetEnvironmentStringsW") - procFreeEnvironmentStringsW = modkernel32.NewProc("FreeEnvironmentStringsW") - procGetEnvironmentVariableW = modkernel32.NewProc("GetEnvironmentVariableW") - procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") - procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") - procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") - procGetTickCount64 = modkernel32.NewProc("GetTickCount64") - procSetFileTime = modkernel32.NewProc("SetFileTime") - procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") - procSetFileAttributesW = modkernel32.NewProc("SetFileAttributesW") - procGetFileAttributesExW = modkernel32.NewProc("GetFileAttributesExW") - procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") - procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") - procLocalFree = modkernel32.NewProc("LocalFree") - procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") - procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") - procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") - procGetLongPathNameW = modkernel32.NewProc("GetLongPathNameW") - procGetShortPathNameW = modkernel32.NewProc("GetShortPathNameW") - procCreateFileMappingW = modkernel32.NewProc("CreateFileMappingW") - procMapViewOfFile = modkernel32.NewProc("MapViewOfFile") - procUnmapViewOfFile = modkernel32.NewProc("UnmapViewOfFile") - procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") - procVirtualLock = modkernel32.NewProc("VirtualLock") - procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") - procVirtualAlloc = modkernel32.NewProc("VirtualAlloc") - procVirtualFree = modkernel32.NewProc("VirtualFree") - procVirtualProtect = modkernel32.NewProc("VirtualProtect") - procTransmitFile = modmswsock.NewProc("TransmitFile") - procReadDirectoryChangesW = modkernel32.NewProc("ReadDirectoryChangesW") - procCertOpenSystemStoreW = modcrypt32.NewProc("CertOpenSystemStoreW") - procCertOpenStore = modcrypt32.NewProc("CertOpenStore") - procCertEnumCertificatesInStore = modcrypt32.NewProc("CertEnumCertificatesInStore") + procOpenProcessToken = modadvapi32.NewProc("OpenProcessToken") + procOpenSCManagerW = modadvapi32.NewProc("OpenSCManagerW") + procOpenServiceW = modadvapi32.NewProc("OpenServiceW") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W") + procQueryServiceConfigW = modadvapi32.NewProc("QueryServiceConfigW") + procQueryServiceLockStatusW = modadvapi32.NewProc("QueryServiceLockStatusW") + procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") + procQueryServiceStatusEx = modadvapi32.NewProc("QueryServiceStatusEx") + procRegCloseKey = modadvapi32.NewProc("RegCloseKey") + procRegEnumKeyExW = modadvapi32.NewProc("RegEnumKeyExW") + procRegOpenKeyExW = modadvapi32.NewProc("RegOpenKeyExW") + procRegQueryInfoKeyW = modadvapi32.NewProc("RegQueryInfoKeyW") + procRegQueryValueExW = modadvapi32.NewProc("RegQueryValueExW") + procRegisterEventSourceW = modadvapi32.NewProc("RegisterEventSourceW") + procReportEventW = modadvapi32.NewProc("ReportEventW") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW") + procSetNamedSecurityInfoW = modadvapi32.NewProc("SetNamedSecurityInfoW") + procSetSecurityDescriptorControl = modadvapi32.NewProc("SetSecurityDescriptorControl") + procSetSecurityDescriptorDacl = modadvapi32.NewProc("SetSecurityDescriptorDacl") + procSetSecurityDescriptorGroup = modadvapi32.NewProc("SetSecurityDescriptorGroup") + procSetSecurityDescriptorOwner = modadvapi32.NewProc("SetSecurityDescriptorOwner") + procSetSecurityDescriptorRMControl = modadvapi32.NewProc("SetSecurityDescriptorRMControl") + procSetSecurityDescriptorSacl = modadvapi32.NewProc("SetSecurityDescriptorSacl") + procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo") + procSetServiceStatus = modadvapi32.NewProc("SetServiceStatus") + procSetThreadToken = modadvapi32.NewProc("SetThreadToken") + procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation") + procStartServiceCtrlDispatcherW = modadvapi32.NewProc("StartServiceCtrlDispatcherW") + procStartServiceW = modadvapi32.NewProc("StartServiceW") procCertAddCertificateContextToStore = modcrypt32.NewProc("CertAddCertificateContextToStore") procCertCloseStore = modcrypt32.NewProc("CertCloseStore") - procCertGetCertificateChain = modcrypt32.NewProc("CertGetCertificateChain") - procCertFreeCertificateChain = modcrypt32.NewProc("CertFreeCertificateChain") procCertCreateCertificateContext = modcrypt32.NewProc("CertCreateCertificateContext") + procCertDeleteCertificateFromStore = modcrypt32.NewProc("CertDeleteCertificateFromStore") + procCertEnumCertificatesInStore = modcrypt32.NewProc("CertEnumCertificatesInStore") + procCertFreeCertificateChain = modcrypt32.NewProc("CertFreeCertificateChain") procCertFreeCertificateContext = modcrypt32.NewProc("CertFreeCertificateContext") + procCertGetCertificateChain = modcrypt32.NewProc("CertGetCertificateChain") + procCertOpenStore = modcrypt32.NewProc("CertOpenStore") + procCertOpenSystemStoreW = modcrypt32.NewProc("CertOpenSystemStoreW") procCertVerifyCertificateChainPolicy = modcrypt32.NewProc("CertVerifyCertificateChainPolicy") - procRegOpenKeyExW = modadvapi32.NewProc("RegOpenKeyExW") - procRegCloseKey = modadvapi32.NewProc("RegCloseKey") - procRegQueryInfoKeyW = modadvapi32.NewProc("RegQueryInfoKeyW") - procRegEnumKeyExW = modadvapi32.NewProc("RegEnumKeyExW") - procRegQueryValueExW = modadvapi32.NewProc("RegQueryValueExW") - procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") - procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") - procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") - procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") - procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") - procReadConsoleW = modkernel32.NewProc("ReadConsoleW") - procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") - procProcess32FirstW = modkernel32.NewProc("Process32FirstW") - procProcess32NextW = modkernel32.NewProc("Process32NextW") - procThread32First = modkernel32.NewProc("Thread32First") - procThread32Next = modkernel32.NewProc("Thread32Next") - procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") - procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") - procCreateHardLinkW = modkernel32.NewProc("CreateHardLinkW") - procGetCurrentThreadId = modkernel32.NewProc("GetCurrentThreadId") - procCreateEventW = modkernel32.NewProc("CreateEventW") + procDnsNameCompare_W = moddnsapi.NewProc("DnsNameCompare_W") + procDnsQuery_W = moddnsapi.NewProc("DnsQuery_W") + procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") + procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") + procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") + procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") + procCancelIo = modkernel32.NewProc("CancelIo") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procCloseHandle = modkernel32.NewProc("CloseHandle") + procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") procCreateEventExW = modkernel32.NewProc("CreateEventExW") - procOpenEventW = modkernel32.NewProc("OpenEventW") - procSetEvent = modkernel32.NewProc("SetEvent") - procResetEvent = modkernel32.NewProc("ResetEvent") - procPulseEvent = modkernel32.NewProc("PulseEvent") - procCreateMutexW = modkernel32.NewProc("CreateMutexW") - procCreateMutexExW = modkernel32.NewProc("CreateMutexExW") - procOpenMutexW = modkernel32.NewProc("OpenMutexW") - procReleaseMutex = modkernel32.NewProc("ReleaseMutex") - procSleepEx = modkernel32.NewProc("SleepEx") + procCreateEventW = modkernel32.NewProc("CreateEventW") + procCreateFileMappingW = modkernel32.NewProc("CreateFileMappingW") + procCreateFileW = modkernel32.NewProc("CreateFileW") + procCreateHardLinkW = modkernel32.NewProc("CreateHardLinkW") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") procCreateJobObjectW = modkernel32.NewProc("CreateJobObjectW") - procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") - procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") - procSetErrorMode = modkernel32.NewProc("SetErrorMode") - procResumeThread = modkernel32.NewProc("ResumeThread") - procSetPriorityClass = modkernel32.NewProc("SetPriorityClass") - procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") - procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") - procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") - procGetProcessId = modkernel32.NewProc("GetProcessId") - procOpenThread = modkernel32.NewProc("OpenThread") - procSetProcessPriorityBoost = modkernel32.NewProc("SetProcessPriorityBoost") - procGetProcessWorkingSetSizeEx = modkernel32.NewProc("GetProcessWorkingSetSizeEx") - procSetProcessWorkingSetSizeEx = modkernel32.NewProc("SetProcessWorkingSetSizeEx") + procCreateMutexExW = modkernel32.NewProc("CreateMutexExW") + procCreateMutexW = modkernel32.NewProc("CreateMutexW") + procCreatePipe = modkernel32.NewProc("CreatePipe") + procCreateProcessW = modkernel32.NewProc("CreateProcessW") + procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") + procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") + procDeleteFileW = modkernel32.NewProc("DeleteFileW") procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") - procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") + procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") + procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") + procExitProcess = modkernel32.NewProc("ExitProcess") + procFindClose = modkernel32.NewProc("FindClose") + procFindFirstFileW = modkernel32.NewProc("FindFirstFileW") procFindFirstVolumeMountPointW = modkernel32.NewProc("FindFirstVolumeMountPointW") - procFindNextVolumeW = modkernel32.NewProc("FindNextVolumeW") + procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") + procFindNextFileW = modkernel32.NewProc("FindNextFileW") procFindNextVolumeMountPointW = modkernel32.NewProc("FindNextVolumeMountPointW") + procFindNextVolumeW = modkernel32.NewProc("FindNextVolumeW") procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") + procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") + procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") + procFormatMessageW = modkernel32.NewProc("FormatMessageW") + procFreeEnvironmentStringsW = modkernel32.NewProc("FreeEnvironmentStringsW") + procFreeLibrary = modkernel32.NewProc("FreeLibrary") + procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") + procGetACP = modkernel32.NewProc("GetACP") + procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") + procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") + procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") + procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") + procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") + procGetCurrentThreadId = modkernel32.NewProc("GetCurrentThreadId") procGetDiskFreeSpaceExW = modkernel32.NewProc("GetDiskFreeSpaceExW") procGetDriveTypeW = modkernel32.NewProc("GetDriveTypeW") - procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") + procGetEnvironmentStringsW = modkernel32.NewProc("GetEnvironmentStringsW") + procGetEnvironmentVariableW = modkernel32.NewProc("GetEnvironmentVariableW") + procGetExitCodeProcess = modkernel32.NewProc("GetExitCodeProcess") + procGetFileAttributesExW = modkernel32.NewProc("GetFileAttributesExW") + procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") + procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") + procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = modkernel32.NewProc("GetFileType") + procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") + procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") + procGetLastError = modkernel32.NewProc("GetLastError") procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW") - procGetVolumeInformationW = modkernel32.NewProc("GetVolumeInformationW") + procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") + procGetLongPathNameW = modkernel32.NewProc("GetLongPathNameW") + procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") + procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") + procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") + procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") + procGetProcAddress = modkernel32.NewProc("GetProcAddress") + procGetProcessId = modkernel32.NewProc("GetProcessId") + procGetProcessPreferredUILanguages = modkernel32.NewProc("GetProcessPreferredUILanguages") + procGetProcessShutdownParameters = modkernel32.NewProc("GetProcessShutdownParameters") + procGetProcessTimes = modkernel32.NewProc("GetProcessTimes") + procGetProcessWorkingSetSizeEx = modkernel32.NewProc("GetProcessWorkingSetSizeEx") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procGetShortPathNameW = modkernel32.NewProc("GetShortPathNameW") + procGetStartupInfoW = modkernel32.NewProc("GetStartupInfoW") + procGetStdHandle = modkernel32.NewProc("GetStdHandle") + procGetSystemDirectoryW = modkernel32.NewProc("GetSystemDirectoryW") + procGetSystemPreferredUILanguages = modkernel32.NewProc("GetSystemPreferredUILanguages") + procGetSystemTimeAsFileTime = modkernel32.NewProc("GetSystemTimeAsFileTime") + procGetSystemTimePreciseAsFileTime = modkernel32.NewProc("GetSystemTimePreciseAsFileTime") + procGetSystemWindowsDirectoryW = modkernel32.NewProc("GetSystemWindowsDirectoryW") + procGetTempPathW = modkernel32.NewProc("GetTempPathW") + procGetThreadPreferredUILanguages = modkernel32.NewProc("GetThreadPreferredUILanguages") + procGetTickCount64 = modkernel32.NewProc("GetTickCount64") + procGetTimeZoneInformation = modkernel32.NewProc("GetTimeZoneInformation") + procGetUserPreferredUILanguages = modkernel32.NewProc("GetUserPreferredUILanguages") + procGetVersion = modkernel32.NewProc("GetVersion") procGetVolumeInformationByHandleW = modkernel32.NewProc("GetVolumeInformationByHandleW") + procGetVolumeInformationW = modkernel32.NewProc("GetVolumeInformationW") procGetVolumeNameForVolumeMountPointW = modkernel32.NewProc("GetVolumeNameForVolumeMountPointW") procGetVolumePathNameW = modkernel32.NewProc("GetVolumePathNameW") procGetVolumePathNamesForVolumeNameW = modkernel32.NewProc("GetVolumePathNamesForVolumeNameW") + procGetWindowsDirectoryW = modkernel32.NewProc("GetWindowsDirectoryW") + procIsWow64Process = modkernel32.NewProc("IsWow64Process") + procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") + procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") + procLocalFree = modkernel32.NewProc("LocalFree") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procMapViewOfFile = modkernel32.NewProc("MapViewOfFile") + procMoveFileExW = modkernel32.NewProc("MoveFileExW") + procMoveFileW = modkernel32.NewProc("MoveFileW") + procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") + procOpenEventW = modkernel32.NewProc("OpenEventW") + procOpenMutexW = modkernel32.NewProc("OpenMutexW") + procOpenProcess = modkernel32.NewProc("OpenProcess") + procOpenThread = modkernel32.NewProc("OpenThread") + procPostQueuedCompletionStatus = modkernel32.NewProc("PostQueuedCompletionStatus") + procProcess32FirstW = modkernel32.NewProc("Process32FirstW") + procProcess32NextW = modkernel32.NewProc("Process32NextW") + procProcessIdToSessionId = modkernel32.NewProc("ProcessIdToSessionId") + procPulseEvent = modkernel32.NewProc("PulseEvent") procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") + procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") + procReadConsoleW = modkernel32.NewProc("ReadConsoleW") + procReadDirectoryChangesW = modkernel32.NewProc("ReadDirectoryChangesW") + procReadFile = modkernel32.NewProc("ReadFile") + procReleaseMutex = modkernel32.NewProc("ReleaseMutex") + procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") + procResetEvent = modkernel32.NewProc("ResetEvent") + procResumeThread = modkernel32.NewProc("ResumeThread") + procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") + procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") + procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") + procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") + procSetErrorMode = modkernel32.NewProc("SetErrorMode") + procSetEvent = modkernel32.NewProc("SetEvent") + procSetFileAttributesW = modkernel32.NewProc("SetFileAttributesW") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procSetFilePointer = modkernel32.NewProc("SetFilePointer") + procSetFileTime = modkernel32.NewProc("SetFileTime") + procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") + procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") + procSetPriorityClass = modkernel32.NewProc("SetPriorityClass") + procSetProcessPriorityBoost = modkernel32.NewProc("SetProcessPriorityBoost") + procSetProcessShutdownParameters = modkernel32.NewProc("SetProcessShutdownParameters") + procSetProcessWorkingSetSizeEx = modkernel32.NewProc("SetProcessWorkingSetSizeEx") + procSetStdHandle = modkernel32.NewProc("SetStdHandle") procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") - procMessageBoxW = moduser32.NewProc("MessageBoxW") - procExitWindowsEx = moduser32.NewProc("ExitWindowsEx") - procInitiateSystemShutdownExW = modadvapi32.NewProc("InitiateSystemShutdownExW") - procSetProcessShutdownParameters = modkernel32.NewProc("SetProcessShutdownParameters") - procGetProcessShutdownParameters = modkernel32.NewProc("GetProcessShutdownParameters") + procSleepEx = modkernel32.NewProc("SleepEx") + procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") + procTerminateProcess = modkernel32.NewProc("TerminateProcess") + procThread32First = modkernel32.NewProc("Thread32First") + procThread32Next = modkernel32.NewProc("Thread32Next") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") + procUnmapViewOfFile = modkernel32.NewProc("UnmapViewOfFile") + procVirtualAlloc = modkernel32.NewProc("VirtualAlloc") + procVirtualFree = modkernel32.NewProc("VirtualFree") + procVirtualLock = modkernel32.NewProc("VirtualLock") + procVirtualProtect = modkernel32.NewProc("VirtualProtect") + procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") + procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") + procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") + procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") + procWriteFile = modkernel32.NewProc("WriteFile") + procAcceptEx = modmswsock.NewProc("AcceptEx") + procGetAcceptExSockaddrs = modmswsock.NewProc("GetAcceptExSockaddrs") + procTransmitFile = modmswsock.NewProc("TransmitFile") + procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") + procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") + procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") + procRtlGetNtVersionNumbers = modntdll.NewProc("RtlGetNtVersionNumbers") + procRtlGetVersion = modntdll.NewProc("RtlGetVersion") procCLSIDFromString = modole32.NewProc("CLSIDFromString") - procStringFromGUID2 = modole32.NewProc("StringFromGUID2") procCoCreateGuid = modole32.NewProc("CoCreateGuid") procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") - procRtlGetVersion = modntdll.NewProc("RtlGetVersion") - procRtlGetNtVersionNumbers = modntdll.NewProc("RtlGetNtVersionNumbers") - procGetProcessPreferredUILanguages = modkernel32.NewProc("GetProcessPreferredUILanguages") - procGetThreadPreferredUILanguages = modkernel32.NewProc("GetThreadPreferredUILanguages") - procGetUserPreferredUILanguages = modkernel32.NewProc("GetUserPreferredUILanguages") - procGetSystemPreferredUILanguages = modkernel32.NewProc("GetSystemPreferredUILanguages") + procStringFromGUID2 = modole32.NewProc("StringFromGUID2") procEnumProcesses = modpsapi.NewProc("EnumProcesses") - procWSAStartup = modws2_32.NewProc("WSAStartup") + procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") + procTranslateNameW = modsecur32.NewProc("TranslateNameW") + procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") + procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") + procShellExecuteW = modshell32.NewProc("ShellExecuteW") + procExitWindowsEx = moduser32.NewProc("ExitWindowsEx") + procMessageBoxW = moduser32.NewProc("MessageBoxW") + procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") + procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") + procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") + procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") + procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") procWSACleanup = modws2_32.NewProc("WSACleanup") + procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") procWSAIoctl = modws2_32.NewProc("WSAIoctl") - procsocket = modws2_32.NewProc("socket") - procsendto = modws2_32.NewProc("sendto") - procrecvfrom = modws2_32.NewProc("recvfrom") - procsetsockopt = modws2_32.NewProc("setsockopt") - procgetsockopt = modws2_32.NewProc("getsockopt") - procbind = modws2_32.NewProc("bind") - procconnect = modws2_32.NewProc("connect") - procgetsockname = modws2_32.NewProc("getsockname") - procgetpeername = modws2_32.NewProc("getpeername") - proclisten = modws2_32.NewProc("listen") - procshutdown = modws2_32.NewProc("shutdown") - procclosesocket = modws2_32.NewProc("closesocket") - procAcceptEx = modmswsock.NewProc("AcceptEx") - procGetAcceptExSockaddrs = modmswsock.NewProc("GetAcceptExSockaddrs") procWSARecv = modws2_32.NewProc("WSARecv") - procWSASend = modws2_32.NewProc("WSASend") procWSARecvFrom = modws2_32.NewProc("WSARecvFrom") + procWSASend = modws2_32.NewProc("WSASend") procWSASendTo = modws2_32.NewProc("WSASendTo") + procWSAStartup = modws2_32.NewProc("WSAStartup") + procbind = modws2_32.NewProc("bind") + procclosesocket = modws2_32.NewProc("closesocket") + procconnect = modws2_32.NewProc("connect") procgethostbyname = modws2_32.NewProc("gethostbyname") + procgetpeername = modws2_32.NewProc("getpeername") + procgetprotobyname = modws2_32.NewProc("getprotobyname") procgetservbyname = modws2_32.NewProc("getservbyname") + procgetsockname = modws2_32.NewProc("getsockname") + procgetsockopt = modws2_32.NewProc("getsockopt") + proclisten = modws2_32.NewProc("listen") procntohs = modws2_32.NewProc("ntohs") - procgetprotobyname = modws2_32.NewProc("getprotobyname") - procDnsQuery_W = moddnsapi.NewProc("DnsQuery_W") - procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") - procDnsNameCompare_W = moddnsapi.NewProc("DnsNameCompare_W") - procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") - procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") - procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") - procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") - procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") - procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") - procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") - procGetACP = modkernel32.NewProc("GetACP") - procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") - procTranslateNameW = modsecur32.NewProc("TranslateNameW") - procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") - procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") - procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") - procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") - procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") - procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") - procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") - procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") - procGetLengthSid = modadvapi32.NewProc("GetLengthSid") - procCopySid = modadvapi32.NewProc("CopySid") - procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid") - procCreateWellKnownSid = modadvapi32.NewProc("CreateWellKnownSid") - procIsWellKnownSid = modadvapi32.NewProc("IsWellKnownSid") - procFreeSid = modadvapi32.NewProc("FreeSid") - procEqualSid = modadvapi32.NewProc("EqualSid") - procGetSidIdentifierAuthority = modadvapi32.NewProc("GetSidIdentifierAuthority") - procGetSidSubAuthorityCount = modadvapi32.NewProc("GetSidSubAuthorityCount") - procGetSidSubAuthority = modadvapi32.NewProc("GetSidSubAuthority") - procIsValidSid = modadvapi32.NewProc("IsValidSid") - procCheckTokenMembership = modadvapi32.NewProc("CheckTokenMembership") - procOpenProcessToken = modadvapi32.NewProc("OpenProcessToken") - procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") - procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") - procRevertToSelf = modadvapi32.NewProc("RevertToSelf") - procSetThreadToken = modadvapi32.NewProc("SetThreadToken") - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") - procAdjustTokenGroups = modadvapi32.NewProc("AdjustTokenGroups") - procGetTokenInformation = modadvapi32.NewProc("GetTokenInformation") - procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation") - procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") - procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") - procGetSystemDirectoryW = modkernel32.NewProc("GetSystemDirectoryW") - procGetWindowsDirectoryW = modkernel32.NewProc("GetWindowsDirectoryW") - procGetSystemWindowsDirectoryW = modkernel32.NewProc("GetSystemWindowsDirectoryW") - procWTSQueryUserToken = modwtsapi32.NewProc("WTSQueryUserToken") - procWTSEnumerateSessionsW = modwtsapi32.NewProc("WTSEnumerateSessionsW") - procWTSFreeMemory = modwtsapi32.NewProc("WTSFreeMemory") - procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo") - procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo") - procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") - procSetNamedSecurityInfoW = modadvapi32.NewProc("SetNamedSecurityInfoW") - procBuildSecurityDescriptorW = modadvapi32.NewProc("BuildSecurityDescriptorW") - procInitializeSecurityDescriptor = modadvapi32.NewProc("InitializeSecurityDescriptor") - procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") - procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") - procGetSecurityDescriptorSacl = modadvapi32.NewProc("GetSecurityDescriptorSacl") - procGetSecurityDescriptorOwner = modadvapi32.NewProc("GetSecurityDescriptorOwner") - procGetSecurityDescriptorGroup = modadvapi32.NewProc("GetSecurityDescriptorGroup") - procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") - procGetSecurityDescriptorRMControl = modadvapi32.NewProc("GetSecurityDescriptorRMControl") - procIsValidSecurityDescriptor = modadvapi32.NewProc("IsValidSecurityDescriptor") - procSetSecurityDescriptorControl = modadvapi32.NewProc("SetSecurityDescriptorControl") - procSetSecurityDescriptorDacl = modadvapi32.NewProc("SetSecurityDescriptorDacl") - procSetSecurityDescriptorSacl = modadvapi32.NewProc("SetSecurityDescriptorSacl") - procSetSecurityDescriptorOwner = modadvapi32.NewProc("SetSecurityDescriptorOwner") - procSetSecurityDescriptorGroup = modadvapi32.NewProc("SetSecurityDescriptorGroup") - procSetSecurityDescriptorRMControl = modadvapi32.NewProc("SetSecurityDescriptorRMControl") - procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") - procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") - procMakeAbsoluteSD = modadvapi32.NewProc("MakeAbsoluteSD") - procMakeSelfRelativeSD = modadvapi32.NewProc("MakeSelfRelativeSD") - procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW") + procrecvfrom = modws2_32.NewProc("recvfrom") + procsendto = modws2_32.NewProc("sendto") + procsetsockopt = modws2_32.NewProc("setsockopt") + procshutdown = modws2_32.NewProc("shutdown") + procsocket = modws2_32.NewProc("socket") + procWTSEnumerateSessionsW = modwtsapi32.NewProc("WTSEnumerateSessionsW") + procWTSFreeMemory = modwtsapi32.NewProc("WTSFreeMemory") + procWTSQueryUserToken = modwtsapi32.NewProc("WTSQueryUserToken") ) -func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) { + var _p0 uint32 + if resetToDefault { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + if r1 == 0 { + err = errnoErr(e1) } return } -func DeregisterEventSource(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0) +func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) { + var _p0 uint32 + if disableAllPrivileges { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) +func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) { + r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) { + r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func CloseServiceHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0) +func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) { + r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) { + r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { + r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) + if r1 == 0 { + err = errnoErr(e1) } return } -func DeleteService(service Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0) +func CloseServiceHandle(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) +func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0) +func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) +func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(str) + if err != nil { + return } - return + return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) } -func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0) +func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0) +func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { + r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0) +func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) { + r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) +func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) +func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) +func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { + r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) +func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) { - r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) - if r0 != 0 { - ret = syscall.Errno(r0) +func DeleteService(service Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetLastError() (lasterr error) { - r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) - if r0 != 0 { - lasterr = syscall.Errno(r0) +func DeregisterEventSource(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func LoadLibrary(libname string) (handle Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(libname) - if err != nil { - return +func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) { + r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) + if r1 == 0 { + err = errnoErr(e1) } - return _LoadLibrary(_p0) + return } -func _LoadLibrary(libname *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { + r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(libname) - if err != nil { - return - } - return _LoadLibraryEx(_p0, zero, flags) +func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { + r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0) + isEqual = r0 != 0 + return } -func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func FreeSid(sid *SID) (err error) { + r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + if r1 != 0 { + err = errnoErr(e1) } return } -func FreeLibrary(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func GetLengthSid(sid *SID) (len uint32) { + r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + len = uint32(r0) return } -func GetProcAddress(module Handle, procname string) (proc uintptr, err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(procname) - if err != nil { +func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { + var _p0 *uint16 + _p0, ret = syscall.UTF16PtrFromString(objectName) + if ret != nil { return } - return _GetProcAddress(module, _p0) -} - -func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { - r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0) - proc = uintptr(r0) - if proc == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return + return _getNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl, sd) } -func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { + r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) { - r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) +func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetVersion() (ver uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0) - ver = uint32(r0) - if ver == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl **ACL, daclDefaulted *bool) (err error) { + var _p0 uint32 + if *daclPresent { + _p0 = 1 } - return -} - -func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) { - var _p0 *uint16 - if len(buf) > 0 { - _p0 = &buf[0] + var _p1 uint32 + if *daclDefaulted { + _p1 = 1 } - r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + *daclPresent = _p0 != 0 + *daclDefaulted = _p1 != 0 + if r1 == 0 { + err = errnoErr(e1) } return } -func ExitProcess(exitcode uint32) { - syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) - return -} - -func IsWow64Process(handle Handle, isWow64 *bool) (err error) { +func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefaulted *bool) (err error) { var _p0 uint32 - if *isWow64 { + if *groupDefaulted { _p0 = 1 - } else { - _p0 = 0 } - r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0) - *isWow64 = _p0 != 0 + r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) + *groupDefaulted = _p0 != 0 if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + len = uint32(r0) return -} - -func ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { - var _p0 *byte - if len(buf) > 0 { - _p0 = &buf[0] +} + +func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefaulted *bool) (err error) { + var _p0 uint32 + if *ownerDefaulted { + _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) + *ownerDefaulted = _p0 != 0 if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { - var _p0 *byte - if len(buf) > 0 { - _p0 = &buf[0] - } - r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { +func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl **ACL, saclDefaulted *bool) (err error) { var _p0 uint32 - if wait { + if *saclPresent { _p0 = 1 - } else { - _p0 = 0 } - r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0) + var _p1 uint32 + if *saclDefaulted { + _p1 = 1 + } + r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + *saclPresent = _p0 != 0 + *saclDefaulted = _p1 != 0 if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) { - r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0) - newlowoffset = uint32(r0) - if newlowoffset == 0xffffffff { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { + r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func CloseHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) { + r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0)) return } -func GetStdHandle(stdhandle uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) { + r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0) + subAuthority = (*uint32)(unsafe.Pointer(r0)) return } -func SetStdHandle(stdhandle uint32, handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func getSidSubAuthorityCount(sid *SID) (count *uint8) { + r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + count = (*uint8)(unsafe.Pointer(r0)) return } -func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func findNextFile1(handle Handle, data *win32finddata1) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) +func ImpersonateSelf(impersonationlevel uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func FindClose(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) +func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) { + r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) +func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint32, forceAppsClosed bool, rebootAfterShutdown bool, reason uint32) (err error) { + var _p0 uint32 + if forceAppsClosed { + _p0 = 1 + } + var _p1 uint32 + if rebootAfterShutdown { + _p1 = 1 + } + r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) { + r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + isValid = r0 != 0 return } -func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func isValidSid(sid *SID) (isValid bool) { + r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + isValid = r0 != 0 return } -func SetCurrentDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) { + r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0) + isWellKnown = r0 != 0 return } -func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { - r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0) +func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func RemoveDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) +func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func DeleteFile(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) +func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func MoveFile(from *uint16, to *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0) +func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) +func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) { + r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) +func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { + r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetComputerName(buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func SetEndOfFile(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0) +func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) { + var _p0 uint32 + if openAsSelf { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetSystemTimeAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) +func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func GetSystemTimePreciseAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) +func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0) - rc = uint32(r0) - if rc == 0xffffffff { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uint32, threadcnt uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uint32, overlapped **Overlapped, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) +func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func RegCloseKey(key Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) } return } -func CancelIo(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) } return } -func CancelIoEx(s Handle, o *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) } return } -func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) { - var _p0 uint32 - if inheritHandles { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) { + r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) + if r0 != 0 { + regerrno = syscall.Errno(r0) } return } -func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } else { - _p0 = 0 +func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) + if r0 != 0 { + regerrno = syscall.Errno(r0) } - r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) + return +} + +func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0) handle = Handle(r0) if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) { - r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) - if r1 <= 32 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) + if r1 == 0 { + err = errnoErr(e1) } return } -func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) { - r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0) +func RevertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) { + r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0) if r0 != 0 { ret = syscall.Errno(r0) } return } -func TerminateProcess(handle Handle, exitcode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { + var _p0 *uint16 + _p0, ret = syscall.UTF16PtrFromString(objectName) + if ret != nil { + return + } + return _SetNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl) +} + +func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { + r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0) +func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) { + r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetStartupInfo(startupInfo *StartupInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) +func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *ACL, daclDefaulted bool) (err error) { + var _p0 uint32 + if daclPresent { + _p0 = 1 + } + var _p1 uint32 + if daclDefaulted { + _p1 = 1 + } + r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) +func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaulted bool) (err error) { + var _p0 uint32 + if groupDefaulted { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { +func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaulted bool) (err error) { var _p0 uint32 - if bInheritHandle { + if ownerDefaulted { _p0 = 1 - } else { - _p0 = 0 } - r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0) + r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) { - r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0) - event = uint32(r0) - if event == 0xffffffff { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) { + syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) return } -func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { +func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *ACL, saclDefaulted bool) (err error) { var _p0 uint32 - if waitAll { + if saclPresent { _p0 = 1 - } else { - _p0 = 0 } - r0, _, e1 := syscall.Syscall6(procWaitForMultipleObjects.Addr(), 4, uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds), 0, 0) - event = uint32(r0) - if event == 0xffffffff { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + var _p1 uint32 + if saclDefaulted { + _p1 = 1 + } + r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) { + syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) return } -func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0) +func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetFileType(filehandle Handle) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetThreadToken(thread *Handle, token Token) (err error) { + r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0) +func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0) +func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { + r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { - r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) +func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetEnvironmentStrings() (envs *uint16, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0) - envs = (*uint16)(unsafe.Pointer(r0)) - if envs == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) { + r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func FreeEnvironmentStrings(envs *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0) +func CertCloseStore(store Handle, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) { + r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) + context = (*CertContext)(unsafe.Pointer(r0)) + if context == nil { + err = errnoErr(e1) } return } -func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) +func CertDeleteCertificateFromStore(certContext *CertContext) (err error) { + r1, _, e1 := syscall.Syscall(procCertDeleteCertificateFromStore.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) { - var _p0 uint32 - if inheritExisting { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) { + r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0) + context = (*CertContext)(unsafe.Pointer(r0)) + if context == nil { + err = errnoErr(e1) } return } -func DestroyEnvironmentBlock(block *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0) +func CertFreeCertificateChain(ctx *CertChainContext) { + syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + return +} + +func CertFreeCertificateContext(ctx *CertContext) (err error) { + r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func getTickCount64() (ms uint64) { - r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) - ms = uint64(r0) +func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) { + r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) } return } -func GetFileAttributes(name *uint16) (attrs uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) - attrs = uint32(r0) - if attrs == INVALID_FILE_ATTRIBUTES { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { + r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0) + store = Handle(r0) + if store == 0 { + err = errnoErr(e1) } return } -func SetFileAttributes(name *uint16, attrs uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0) +func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) { + r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) { + r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0) + same = r0 != 0 + return +} + +func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { + var _p0 *uint16 + _p0, status = syscall.UTF16PtrFromString(name) + if status != nil { + return + } + return _DnsQuery(_p0, qtype, options, extra, qrs, pr) +} + +func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { + r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) + if r0 != 0 { + status = syscall.Errno(r0) } return } -func GetCommandLine() (cmd *uint16) { - r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0) - cmd = (*uint16)(unsafe.Pointer(r0)) +func DnsRecordListFree(rl *DNSRecord, freetype uint32) { + syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0) return } -func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { - r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) - argv = (*[8192]*[8192]uint16)(unsafe.Pointer(r0)) - if argv == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { + r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) } return } -func LocalFree(hmem Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0) - handle = Handle(r0) - if handle != 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { + r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) } return } -func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) +func GetIfEntry(pIfRow *MibIfRow) (errcode error) { + r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func AssignProcessToJobObject(job Handle, process Handle) (err error) { + r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func FlushFileBuffers(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0) +func CancelIo(s Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CancelIoEx(s Handle, o *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CloseHandle(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { + r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) +func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) handle = Handle(r0) if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0) - addr = uintptr(r0) - if addr == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func UnmapViewOfFile(addr uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func FlushViewOfFile(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) } return } -func VirtualLock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) + if r1&0xff == 0 { + err = errnoErr(e1) } return } -func VirtualUnlock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uint32, threadcnt uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0) - value = uintptr(r0) - if value == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if initialOwner { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0) +func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { +func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) { var _p0 uint32 - if watchSubTree { + if inheritHandles { _p0 = 1 - } else { - _p0 = 0 } - r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0) + r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { - r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0) - store = Handle(r0) - if store == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) + if r1&0xff == 0 { + err = errnoErr(e1) } return } -func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0) +func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0) handle = Handle(r0) if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0) - context = (*CertContext)(unsafe.Pointer(r0)) - if context == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) + if r1 == 0 { + err = errnoErr(e1) } return } -func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) { - r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0) +func DeleteFile(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CertCloseStore(store Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0) +func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) { - r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0) +func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CertFreeCertificateChain(ctx *CertChainContext) { - syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) +func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { + var _p0 uint32 + if bInheritHandle { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) - context = (*CertContext)(unsafe.Pointer(r0)) - if context == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func ExitProcess(exitcode uint32) { + syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) return } -func CertFreeCertificateContext(ctx *CertContext) (err error) { - r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) +func FindClose(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) { - r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) } return } -func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) +func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) } return } -func RegCloseKey(key Handle) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) +func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) } return } -func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) - if r0 != 0 { - regerrno = syscall.Errno(r0) +func findNextFile1(handle Handle, data *win32finddata1) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) +func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + if r1 == 0 { + err = errnoErr(e1) } return } -func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) - if r0 != 0 { - regerrno = syscall.Errno(r0) +func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetCurrentProcessId() (pid uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) - pid = uint32(r0) +func FindVolumeClose(findVolume Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func GetConsoleMode(console Handle, mode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) +func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func SetConsoleMode(console Handle, mode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0) +func FlushFileBuffers(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) +func FlushViewOfFile(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) { + var _p0 *uint16 + if len(buf) > 0 { + _p0 = &buf[0] + } + r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0) +func FreeEnvironmentStrings(envs *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func FreeLibrary(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) +func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) +func GetACP() (acp uint32) { + r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) + acp = uint32(r0) + return +} + +func GetCommandLine() (cmd *uint16) { + r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0) + cmd = (*uint16)(unsafe.Pointer(r0)) + return +} + +func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) +func GetComputerName(buf *uint16, n *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) +func GetConsoleMode(console Handle, mode *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) +func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { + r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) - if r1&0xff == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) - if r1&0xff == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func GetCurrentProcessId() (pid uint32) { + r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) + pid = uint32(r0) return } @@ -2104,226 +1645,166 @@ func GetCurrentThreadId() (id uint32) { return } -func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) { + r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetDriveType(rootPathName *uint16) (driveType uint32) { + r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) + driveType = uint32(r0) + return +} + +func GetEnvironmentStrings() (envs *uint16, err error) { + r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0) + envs = (*uint16)(unsafe.Pointer(r0)) + if envs == nil { + err = errnoErr(e1) } return } -func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func SetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0) +func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { + r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func ResetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetFileAttributes(name *uint16) (attrs uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + attrs = uint32(r0) + if attrs == INVALID_FILE_ATTRIBUTES { + err = errnoErr(e1) } return } -func PulseEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0) +func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) { + r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) { - var _p0 uint32 - if initialOwner { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetFileType(filehandle Handle) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetFinalPathNameByHandleW(file syscall.Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall6(procGetFinalPathNameByHandleW.Addr(), 4, uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags), 0, 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func ReleaseMutex(mutex Handle) (err error) { - r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { - var _p0 uint32 - if alertable { - _p0 = 1 - } else { - _p0 = 0 +func GetLastError() (lasterr error) { + r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) + if r0 != 0 { + lasterr = syscall.Errno(r0) } - r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0) - ret = uint32(r0) return } -func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func AssignProcessToJobObject(job Handle, process Handle) (err error) { - r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetLogicalDrives() (drivesBitMask uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0) + drivesBitMask = uint32(r0) + if drivesBitMask == 0 { + err = errnoErr(e1) } return } -func TerminateJobObject(job Handle, exitCode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func SetErrorMode(mode uint32) (ret uint32) { - r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0) - ret = uint32(r0) +func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } return } -func ResumeThread(thread Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) - ret = uint32(r0) - if ret == 0xffffffff { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) { + r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) + if r1 == 0 { + err = errnoErr(e1) } return } -func SetPriorityClass(process Handle, priorityClass uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0) +func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } @@ -2332,36 +1813,25 @@ func GetPriorityClass(process Handle) (ret uint32, err error) { r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0) ret = uint32(r0) if ret == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) { - r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0) - ret = int(r0) - if ret == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetProcAddress(module Handle, procname string) (proc uintptr, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(procname) + if err != nil { + return } - return + return _GetProcAddress(module, _p0) } -func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { + r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0) + proc = uintptr(r0) + if proc == 0 { + err = errnoErr(e1) } return } @@ -2370,1701 +1840,1236 @@ func GetProcessId(process Handle) (id uint32, err error) { r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0) id = uint32(r0) if id == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetProcessPriorityBoost(process Handle, disable bool) (err error) { - var _p0 uint32 - if disable { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) { - syscall.Syscall6(procGetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)), 0, 0) - return -} - -func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0) +func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetProcessPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) +func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0) +func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) { + syscall.Syscall6(procGetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)), 0, 0) return } -func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uint32, overlapped **Overlapped, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) +func GetStartupInfo(startupInfo *StartupInfo) (err error) { + r1, _, e1 := syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func FindVolumeClose(findVolume Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetStdHandle(stdhandle uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) } return } -func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + len = uint32(r0) + if len == 0 { + err = errnoErr(e1) } return } -func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) { - r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) +func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetSystemPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetDriveType(rootPathName *uint16) (driveType uint32) { - r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) - driveType = uint32(r0) +func GetSystemTimeAsFileTime(time *Filetime) { + syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) return } -func GetLogicalDrives() (drivesBitMask uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0) - drivesBitMask = uint32(r0) - if drivesBitMask == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func GetSystemTimePreciseAsFileTime(time *Filetime) { + syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) return } -func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + len = uint32(r0) + if len == 0 { + err = errnoErr(e1) } return } -func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) +func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetThreadPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func getTickCount64() (ms uint64) { + r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) + ms = uint64(r0) return } -func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0) + rc = uint32(r0) + if rc == 0xffffffff { + err = errnoErr(e1) } return } -func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) +func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetUserPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetVersion() (ver uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0) + ver = uint32(r0) + if ver == 0 { + err = errnoErr(e1) } return } -func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0) +func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0) +func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func MessageBox(hwnd Handle, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { - r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) - ret = int32(r0) - if ret == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) + if r1 == 0 { + err = errnoErr(e1) } return } -func ExitWindowsEx(flags uint32, reason uint32) (err error) { - r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) +func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint32, forceAppsClosed bool, rebootAfterShutdown bool, reason uint32) (err error) { - var _p0 uint32 - if forceAppsClosed { - _p0 = 1 - } else { - _p0 = 0 - } - var _p1 uint32 - if rebootAfterShutdown { - _p1 = 1 - } else { - _p1 = 0 - } - r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) +func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + len = uint32(r0) + if len == 0 { + err = errnoErr(e1) } return } -func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0) +func IsWow64Process(handle Handle, isWow64 *bool) (err error) { + var _p0 uint32 + if *isWow64 { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0) + *isWow64 = _p0 != 0 if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) - if r0 != 0 { - ret = syscall.Errno(r0) +func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(libname) + if err != nil { + return } - return -} - -func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { - r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) - chars = int32(r0) - return + return _LoadLibraryEx(_p0, zero, flags) } -func coCreateGuid(pguid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) +func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func CoTaskMemFree(address unsafe.Pointer) { - syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0) - return +func LoadLibrary(libname string) (handle Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(libname) + if err != nil { + return + } + return _LoadLibrary(_p0) } -func rtlGetVersion(info *OsVersionInfoEx) (ret error) { - r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) +func _LoadLibrary(libname *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) { - syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) +func LocalFree(hmem Handle) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0) + handle = Handle(r0) + if handle != 0 { + err = errnoErr(e1) + } return } -func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) +func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetThreadPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) { + r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0) + addr = uintptr(r0) + if addr == 0 { + err = errnoErr(e1) } return } -func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetUserPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) +func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetSystemPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) +func MoveFile(from *uint16, to *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) { - var _p0 *uint32 - if len(processIds) > 0 { - _p0 = &processIds[0] - } - r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(processIds)), uintptr(unsafe.Pointer(bytesReturned))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { + r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) + nwrite = int32(r0) + if nwrite == 0 { + err = errnoErr(e1) } return } -func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { - r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) - if r0 != 0 { - sockerr = syscall.Errno(r0) +func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func WSACleanup() (err error) { - r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol)) +func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + if handle == 0 { + err = errnoErr(e1) } return } -func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (err error) { - var _p0 *byte - if len(buf) > 0 { - _p0 = &buf[0] - } - r1, _, e1 := syscall.Syscall6(procsendto.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *int32) (n int32, err error) { - var _p0 *byte - if len(buf) > 0 { - _p0 = &buf[0] - } - r0, _, e1 := syscall.Syscall6(procrecvfrom.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) - n = int32(r0) - if n == -1 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) { - r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) { - r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procProcessIdToSessionId.Addr(), 2, uintptr(pid), uintptr(unsafe.Pointer(sessionid)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func PulseEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) { + r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func listen(s Handle, backlog int32) (err error) { - r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { + var _p0 uint32 + if watchSubTree { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func shutdown(s Handle, how int32) (err error) { - r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func Closesocket(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func ReleaseMutex(mutex Handle) (err error) { + r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0) +func RemoveDirectory(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) { - syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0) +func ResetEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func ResumeThread(thread Handle) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) + ret = uint32(r0) + if ret == 0xffffffff { + err = errnoErr(e1) } return } -func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func setConsoleCursorPosition(console Handle, position uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetConsoleMode(console Handle, mode uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetCurrentDirectory(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetHostByName(name string) (h *Hostent, err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(name) - if err != nil { - return +func SetEndOfFile(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } - return _GetHostByName(_p0) + return } -func _GetHostByName(name *byte) (h *Hostent, err error) { - r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) - h = (*Hostent)(unsafe.Pointer(r0)) - if h == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetServByName(name string, proto string) (s *Servent, err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(name) - if err != nil { - return - } - var _p1 *byte - _p1, err = syscall.BytePtrFromString(proto) - if err != nil { - return +func SetErrorMode(mode uint32) (ret uint32) { + r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0) + ret = uint32(r0) + return +} + +func SetEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } - return _GetServByName(_p0, _p1) + return } -func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { - r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0) - s = (*Servent)(unsafe.Pointer(r0)) - if s == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetFileAttributes(name *uint16, attrs uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func Ntohs(netshort uint16) (u uint16) { - r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0) - u = uint16(r0) +func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func GetProtoByName(name string) (p *Protoent, err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(name) - if err != nil { - return +func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) { + r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0) + newlowoffset = uint32(r0) + if newlowoffset == 0xffffffff { + err = errnoErr(e1) } - return _GetProtoByName(_p0) + return } -func _GetProtoByName(name *byte) (p *Protoent, err error) { - r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) - p = (*Protoent)(unsafe.Pointer(r0)) - if p == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { - var _p0 *uint16 - _p0, status = syscall.UTF16PtrFromString(name) - if status != nil { - return +func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) + if r1 == 0 { + err = errnoErr(e1) } - return _DnsQuery(_p0, qtype, options, extra, qrs, pr) + return } -func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { - r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) - if r0 != 0 { - status = syscall.Errno(r0) +func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) { + r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0) + ret = int(r0) + if ret == 0 { + err = errnoErr(e1) } return } -func DnsRecordListFree(rl *DNSRecord, freetype uint32) { - syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0) +func SetPriorityClass(process Handle, priorityClass uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) { - r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0) - same = r0 != 0 +func SetProcessPriorityBoost(process Handle, disable bool) (err error) { + var _p0 uint32 + if disable { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) { - r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0) - if r0 != 0 { - sockerr = syscall.Errno(r0) +func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func FreeAddrInfoW(addrinfo *AddrinfoW) { - syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0) +func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func GetIfEntry(pIfRow *MibIfRow) (errcode error) { - r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) - if r0 != 0 { - errcode = syscall.Errno(r0) +func SetStdHandle(stdhandle uint32, handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { - r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0) - if r0 != 0 { - errcode = syscall.Errno(r0) +func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0) +func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) { - r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) - n = int32(r0) - if n == -1 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { + var _p0 uint32 + if alertable { + _p0 = 1 } + r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0) + ret = uint32(r0) return } -func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { - r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) - if r0 != 0 { - errcode = syscall.Errno(r0) +func TerminateJobObject(job Handle, exitCode uint32) (err error) { + r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetACP() (acp uint32) { - r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) - acp = uint32(r0) +func TerminateProcess(handle Handle, exitcode uint32) (err error) { + r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { - r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) - nwrite = int32(r0) - if nwrite == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0) - if r1&0xff == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) - if r1&0xff == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { - r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) - if r0 != 0 { - neterr = syscall.Errno(r0) +func UnmapViewOfFile(addr uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) { - r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) - if r0 != 0 { - neterr = syscall.Errno(r0) +func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) { + r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0) + value = uintptr(r0) + if value == 0 { + err = errnoErr(e1) } return } -func NetApiBufferFree(buf *byte) (neterr error) { - r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0) - if r0 != 0 { - neterr = syscall.Errno(r0) +func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype)) + if r1 == 0 { + err = errnoErr(e1) } return } -func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) +func VirtualLock(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) +func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0) +func VirtualUnlock(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { + var _p0 uint32 + if waitAll { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall6(procWaitForMultipleObjects.Addr(), 4, uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds), 0, 0) + event = uint32(r0) + if event == 0xffffffff { + err = errnoErr(e1) } return } -func GetLengthSid(sid *SID) (len uint32) { - r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - len = uint32(r0) +func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) { + r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0) + event = uint32(r0) + if event == 0xffffffff { + err = errnoErr(e1) + } return } -func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) +func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) { + r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0) +func WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0) +func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) { - r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0) - isWellKnown = r0 != 0 +func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) { + syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0) return } -func FreeSid(sid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - if r1 != 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { - r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0) - isEqual = r0 != 0 +func NetApiBufferFree(buf *byte) (neterr error) { + r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } return } -func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) { - r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0)) +func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) { + r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) + if r0 != 0 { + neterr = syscall.Errno(r0) + } return } -func getSidSubAuthorityCount(sid *SID) (count *uint8) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - count = (*uint8)(unsafe.Pointer(r0)) +func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { + r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } return } -func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0) - subAuthority = (*uint32)(unsafe.Pointer(r0)) +func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) { + syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) return } -func isValidSid(sid *SID) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - isValid = r0 != 0 +func rtlGetVersion(info *OsVersionInfoEx) (ret error) { + r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } return } -func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { - r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { + r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func coCreateGuid(pguid *GUID) (ret error) { + r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) { - var _p0 uint32 - if openAsSelf { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func CoTaskMemFree(address unsafe.Pointer) { + syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0) return } -func ImpersonateSelf(impersonationlevel uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { + r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) + chars = int32(r0) return } -func RevertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) +func EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) { + var _p0 *uint32 + if len(processIds) > 0 { + _p0 = &processIds[0] + } + r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(processIds)), uintptr(unsafe.Pointer(bytesReturned))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func SetThreadToken(thread *Handle, token Token) (err error) { - r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) + if r1&0xff == 0 { + err = errnoErr(e1) } return } -func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0) + if r1&0xff == 0 { + err = errnoErr(e1) } return } -func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) { - var _p0 uint32 - if disableAllPrivileges { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { + r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) + argv = (*[8192]*[8192]uint16)(unsafe.Pointer(r0)) + if argv == nil { + err = errnoErr(e1) } return } -func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) { - var _p0 uint32 - if resetToDefault { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) { + r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) { + r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) + if r1 <= 32 { + err = errnoErr(e1) } return } -func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0) +func ExitWindowsEx(flags uint32, reason uint32) (err error) { + r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) { - r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func MessageBox(hwnd Handle, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { + r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) + ret = int32(r0) + if ret == 0 { + err = errnoErr(e1) } return } -func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) +func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) { + var _p0 uint32 + if inheritExisting { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) - len = uint32(r0) - if len == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func DestroyEnvironmentBlock(block *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) - len = uint32(r0) - if len == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) + if r1 == 0 { + err = errnoErr(e1) } return } -func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) - len = uint32(r0) - if len == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func FreeAddrInfoW(addrinfo *AddrinfoW) { + syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0) return } -func WTSQueryUserToken(session uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) { + r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0) + if r0 != 0 { + sockerr = syscall.Errno(r0) } return } -func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func WSACleanup() (err error) { + r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0) + if r1 == socket_error { + err = errnoErr(e1) } return } -func WTSFreeMemory(ptr uintptr) { - syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0) +func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) { + r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) + n = int32(r0) + if n == -1 { + err = errnoErr(e1) + } return } -func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) - if r0 != 0 { - ret = syscall.Errno(r0) +func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { + r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) + if r1 == socket_error { + err = errnoErr(e1) } return } -func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) { - syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) +func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) + } return } -func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - var _p0 *uint16 - _p0, ret = syscall.UTF16PtrFromString(objectName) - if ret != nil { - return +func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + err = errnoErr(e1) } - return _getNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl, sd) + return } -func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) - if r0 != 0 { - ret = syscall.Errno(r0) +func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) } return } -func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - var _p0 *uint16 - _p0, ret = syscall.UTF16PtrFromString(objectName) - if ret != nil { - return +func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + err = errnoErr(e1) } - return _SetNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl) + return } -func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) +func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { + r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) if r0 != 0 { - ret = syscall.Errno(r0) + sockerr = syscall.Errno(r0) } return } -func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) - if r0 != 0 { - ret = syscall.Errno(r0) +func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socket_error { + err = errnoErr(e1) } return } -func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) { - r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func Closesocket(s Handle) (err error) { + r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) } return } -func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socket_error { + err = errnoErr(e1) } return } -func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl **ACL, daclDefaulted *bool) (err error) { - var _p0 uint32 - if *daclPresent { - _p0 = 1 - } else { - _p0 = 0 - } - var _p1 uint32 - if *daclDefaulted { - _p1 = 1 - } else { - _p1 = 0 - } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) - *daclPresent = _p0 != 0 - *daclDefaulted = _p1 != 0 - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetHostByName(name string) (h *Hostent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return } - return + return _GetHostByName(_p0) } -func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl **ACL, saclDefaulted *bool) (err error) { - var _p0 uint32 - if *saclPresent { - _p0 = 1 - } else { - _p0 = 0 - } - var _p1 uint32 - if *saclDefaulted { - _p1 = 1 - } else { - _p1 = 0 - } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) - *saclPresent = _p0 != 0 - *saclDefaulted = _p1 != 0 - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func _GetHostByName(name *byte) (h *Hostent, err error) { + r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + h = (*Hostent)(unsafe.Pointer(r0)) + if h == nil { + err = errnoErr(e1) } return } -func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefaulted *bool) (err error) { - var _p0 uint32 - if *ownerDefaulted { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) - *ownerDefaulted = _p0 != 0 - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if r1 == socket_error { + err = errnoErr(e1) } return } -func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefaulted *bool) (err error) { - var _p0 uint32 - if *groupDefaulted { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) - *groupDefaulted = _p0 != 0 - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetProtoByName(name string) (p *Protoent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return } - return + return _GetProtoByName(_p0) } -func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) - len = uint32(r0) +func _GetProtoByName(name *byte) (p *Protoent, err error) { + r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + p = (*Protoent)(unsafe.Pointer(r0)) + if p == nil { + err = errnoErr(e1) + } return } -func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) - if r0 != 0 { - ret = syscall.Errno(r0) +func GetServByName(name string, proto string) (s *Servent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return } - return + var _p1 *byte + _p1, err = syscall.BytePtrFromString(proto) + if err != nil { + return + } + return _GetServByName(_p0, _p1) } -func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) - isValid = r0 != 0 +func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { + r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0) + s = (*Servent)(unsafe.Pointer(r0)) + if s == nil { + err = errnoErr(e1) + } return } -func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) { - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if r1 == socket_error { + err = errnoErr(e1) } return } -func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *ACL, daclDefaulted bool) (err error) { - var _p0 uint32 - if daclPresent { - _p0 = 1 - } else { - _p0 = 0 - } - var _p1 uint32 - if daclDefaulted { - _p1 = 1 - } else { - _p1 = 0 - } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) { + r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0) + if r1 == socket_error { + err = errnoErr(e1) } return } -func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *ACL, saclDefaulted bool) (err error) { - var _p0 uint32 - if saclPresent { - _p0 = 1 - } else { - _p0 = 0 - } - var _p1 uint32 - if saclDefaulted { - _p1 = 1 - } else { - _p1 = 0 - } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func listen(s Handle, backlog int32) (err error) { + r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0) + if r1 == socket_error { + err = errnoErr(e1) } return } -func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaulted bool) (err error) { - var _p0 uint32 - if ownerDefaulted { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func Ntohs(netshort uint16) (u uint16) { + r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0) + u = uint16(r0) return } -func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaulted bool) (err error) { - var _p0 uint32 - if groupDefaulted { - _p0 = 1 - } else { - _p0 = 0 +func recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *int32) (n int32, err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + r0, _, e1 := syscall.Syscall6(procrecvfrom.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int32(r0) + if n == -1 { + err = errnoErr(e1) } return } -func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) { - syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) +func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r1, _, e1 := syscall.Syscall6(procsendto.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) + if r1 == socket_error { + err = errnoErr(e1) + } return } -func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(str) - if err != nil { - return +func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) { + r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0) + if r1 == socket_error { + err = errnoErr(e1) } - return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) + return } -func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func shutdown(s Handle, how int32) (err error) { + r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0) + if r1 == socket_error { + err = errnoErr(e1) } return } -func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol)) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) } return } -func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0) +func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func WTSFreeMemory(ptr uintptr) { + syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0) return } -func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) { - r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) +func WTSQueryUserToken(session uint32, token *Token) (err error) { + r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } diff --git a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go index 7ffa36512..647f2d427 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.14 +// +build go1.14,!go1.16 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go new file mode 100644 index 000000000..c937d0976 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go @@ -0,0 +1,1955 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.16 + +package bidi + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "13.0.0" + +// xorMasks contains masks to be xor-ed with brackets to get the reverse +// version. +var xorMasks = []int32{ // 8 elements + 0, 1, 6, 7, 3, 15, 29, 63, +} // Size: 56 bytes + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookup(s []byte) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupUnsafe(s []byte) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookupString(s string) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupStringUnsafe(s string) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// bidiTrie. Total size: 17408 bytes (17.00 KiB). Checksum: df85fcbfe9b8377f. +type bidiTrie struct{} + +func newBidiTrie(i int) *bidiTrie { + return &bidiTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *bidiTrie) lookupValue(n uint32, b byte) uint8 { + switch { + default: + return uint8(bidiValues[n<<6+uint32(b)]) + } +} + +// bidiValues: 248 blocks, 15872 entries, 15872 bytes +// The third block is the zero block. +var bidiValues = [15872]uint8{ + // Block 0x0, offset 0x0 + 0x00: 0x000b, 0x01: 0x000b, 0x02: 0x000b, 0x03: 0x000b, 0x04: 0x000b, 0x05: 0x000b, + 0x06: 0x000b, 0x07: 0x000b, 0x08: 0x000b, 0x09: 0x0008, 0x0a: 0x0007, 0x0b: 0x0008, + 0x0c: 0x0009, 0x0d: 0x0007, 0x0e: 0x000b, 0x0f: 0x000b, 0x10: 0x000b, 0x11: 0x000b, + 0x12: 0x000b, 0x13: 0x000b, 0x14: 0x000b, 0x15: 0x000b, 0x16: 0x000b, 0x17: 0x000b, + 0x18: 0x000b, 0x19: 0x000b, 0x1a: 0x000b, 0x1b: 0x000b, 0x1c: 0x0007, 0x1d: 0x0007, + 0x1e: 0x0007, 0x1f: 0x0008, 0x20: 0x0009, 0x21: 0x000a, 0x22: 0x000a, 0x23: 0x0004, + 0x24: 0x0004, 0x25: 0x0004, 0x26: 0x000a, 0x27: 0x000a, 0x28: 0x003a, 0x29: 0x002a, + 0x2a: 0x000a, 0x2b: 0x0003, 0x2c: 0x0006, 0x2d: 0x0003, 0x2e: 0x0006, 0x2f: 0x0006, + 0x30: 0x0002, 0x31: 0x0002, 0x32: 0x0002, 0x33: 0x0002, 0x34: 0x0002, 0x35: 0x0002, + 0x36: 0x0002, 0x37: 0x0002, 0x38: 0x0002, 0x39: 0x0002, 0x3a: 0x0006, 0x3b: 0x000a, + 0x3c: 0x000a, 0x3d: 0x000a, 0x3e: 0x000a, 0x3f: 0x000a, + // Block 0x1, offset 0x40 + 0x40: 0x000a, + 0x5b: 0x005a, 0x5c: 0x000a, 0x5d: 0x004a, + 0x5e: 0x000a, 0x5f: 0x000a, 0x60: 0x000a, + 0x7b: 0x005a, + 0x7c: 0x000a, 0x7d: 0x004a, 0x7e: 0x000a, 0x7f: 0x000b, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x000b, 0xc1: 0x000b, 0xc2: 0x000b, 0xc3: 0x000b, 0xc4: 0x000b, 0xc5: 0x0007, + 0xc6: 0x000b, 0xc7: 0x000b, 0xc8: 0x000b, 0xc9: 0x000b, 0xca: 0x000b, 0xcb: 0x000b, + 0xcc: 0x000b, 0xcd: 0x000b, 0xce: 0x000b, 0xcf: 0x000b, 0xd0: 0x000b, 0xd1: 0x000b, + 0xd2: 0x000b, 0xd3: 0x000b, 0xd4: 0x000b, 0xd5: 0x000b, 0xd6: 0x000b, 0xd7: 0x000b, + 0xd8: 0x000b, 0xd9: 0x000b, 0xda: 0x000b, 0xdb: 0x000b, 0xdc: 0x000b, 0xdd: 0x000b, + 0xde: 0x000b, 0xdf: 0x000b, 0xe0: 0x0006, 0xe1: 0x000a, 0xe2: 0x0004, 0xe3: 0x0004, + 0xe4: 0x0004, 0xe5: 0x0004, 0xe6: 0x000a, 0xe7: 0x000a, 0xe8: 0x000a, 0xe9: 0x000a, + 0xeb: 0x000a, 0xec: 0x000a, 0xed: 0x000b, 0xee: 0x000a, 0xef: 0x000a, + 0xf0: 0x0004, 0xf1: 0x0004, 0xf2: 0x0002, 0xf3: 0x0002, 0xf4: 0x000a, + 0xf6: 0x000a, 0xf7: 0x000a, 0xf8: 0x000a, 0xf9: 0x0002, 0xfb: 0x000a, + 0xfc: 0x000a, 0xfd: 0x000a, 0xfe: 0x000a, 0xff: 0x000a, + // Block 0x4, offset 0x100 + 0x117: 0x000a, + 0x137: 0x000a, + // Block 0x5, offset 0x140 + 0x179: 0x000a, 0x17a: 0x000a, + // Block 0x6, offset 0x180 + 0x182: 0x000a, 0x183: 0x000a, 0x184: 0x000a, 0x185: 0x000a, + 0x186: 0x000a, 0x187: 0x000a, 0x188: 0x000a, 0x189: 0x000a, 0x18a: 0x000a, 0x18b: 0x000a, + 0x18c: 0x000a, 0x18d: 0x000a, 0x18e: 0x000a, 0x18f: 0x000a, + 0x192: 0x000a, 0x193: 0x000a, 0x194: 0x000a, 0x195: 0x000a, 0x196: 0x000a, 0x197: 0x000a, + 0x198: 0x000a, 0x199: 0x000a, 0x19a: 0x000a, 0x19b: 0x000a, 0x19c: 0x000a, 0x19d: 0x000a, + 0x19e: 0x000a, 0x19f: 0x000a, + 0x1a5: 0x000a, 0x1a6: 0x000a, 0x1a7: 0x000a, 0x1a8: 0x000a, 0x1a9: 0x000a, + 0x1aa: 0x000a, 0x1ab: 0x000a, 0x1ac: 0x000a, 0x1ad: 0x000a, 0x1af: 0x000a, + 0x1b0: 0x000a, 0x1b1: 0x000a, 0x1b2: 0x000a, 0x1b3: 0x000a, 0x1b4: 0x000a, 0x1b5: 0x000a, + 0x1b6: 0x000a, 0x1b7: 0x000a, 0x1b8: 0x000a, 0x1b9: 0x000a, 0x1ba: 0x000a, 0x1bb: 0x000a, + 0x1bc: 0x000a, 0x1bd: 0x000a, 0x1be: 0x000a, 0x1bf: 0x000a, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x000c, 0x1c1: 0x000c, 0x1c2: 0x000c, 0x1c3: 0x000c, 0x1c4: 0x000c, 0x1c5: 0x000c, + 0x1c6: 0x000c, 0x1c7: 0x000c, 0x1c8: 0x000c, 0x1c9: 0x000c, 0x1ca: 0x000c, 0x1cb: 0x000c, + 0x1cc: 0x000c, 0x1cd: 0x000c, 0x1ce: 0x000c, 0x1cf: 0x000c, 0x1d0: 0x000c, 0x1d1: 0x000c, + 0x1d2: 0x000c, 0x1d3: 0x000c, 0x1d4: 0x000c, 0x1d5: 0x000c, 0x1d6: 0x000c, 0x1d7: 0x000c, + 0x1d8: 0x000c, 0x1d9: 0x000c, 0x1da: 0x000c, 0x1db: 0x000c, 0x1dc: 0x000c, 0x1dd: 0x000c, + 0x1de: 0x000c, 0x1df: 0x000c, 0x1e0: 0x000c, 0x1e1: 0x000c, 0x1e2: 0x000c, 0x1e3: 0x000c, + 0x1e4: 0x000c, 0x1e5: 0x000c, 0x1e6: 0x000c, 0x1e7: 0x000c, 0x1e8: 0x000c, 0x1e9: 0x000c, + 0x1ea: 0x000c, 0x1eb: 0x000c, 0x1ec: 0x000c, 0x1ed: 0x000c, 0x1ee: 0x000c, 0x1ef: 0x000c, + 0x1f0: 0x000c, 0x1f1: 0x000c, 0x1f2: 0x000c, 0x1f3: 0x000c, 0x1f4: 0x000c, 0x1f5: 0x000c, + 0x1f6: 0x000c, 0x1f7: 0x000c, 0x1f8: 0x000c, 0x1f9: 0x000c, 0x1fa: 0x000c, 0x1fb: 0x000c, + 0x1fc: 0x000c, 0x1fd: 0x000c, 0x1fe: 0x000c, 0x1ff: 0x000c, + // Block 0x8, offset 0x200 + 0x200: 0x000c, 0x201: 0x000c, 0x202: 0x000c, 0x203: 0x000c, 0x204: 0x000c, 0x205: 0x000c, + 0x206: 0x000c, 0x207: 0x000c, 0x208: 0x000c, 0x209: 0x000c, 0x20a: 0x000c, 0x20b: 0x000c, + 0x20c: 0x000c, 0x20d: 0x000c, 0x20e: 0x000c, 0x20f: 0x000c, 0x210: 0x000c, 0x211: 0x000c, + 0x212: 0x000c, 0x213: 0x000c, 0x214: 0x000c, 0x215: 0x000c, 0x216: 0x000c, 0x217: 0x000c, + 0x218: 0x000c, 0x219: 0x000c, 0x21a: 0x000c, 0x21b: 0x000c, 0x21c: 0x000c, 0x21d: 0x000c, + 0x21e: 0x000c, 0x21f: 0x000c, 0x220: 0x000c, 0x221: 0x000c, 0x222: 0x000c, 0x223: 0x000c, + 0x224: 0x000c, 0x225: 0x000c, 0x226: 0x000c, 0x227: 0x000c, 0x228: 0x000c, 0x229: 0x000c, + 0x22a: 0x000c, 0x22b: 0x000c, 0x22c: 0x000c, 0x22d: 0x000c, 0x22e: 0x000c, 0x22f: 0x000c, + 0x234: 0x000a, 0x235: 0x000a, + 0x23e: 0x000a, + // Block 0x9, offset 0x240 + 0x244: 0x000a, 0x245: 0x000a, + 0x247: 0x000a, + // Block 0xa, offset 0x280 + 0x2b6: 0x000a, + // Block 0xb, offset 0x2c0 + 0x2c3: 0x000c, 0x2c4: 0x000c, 0x2c5: 0x000c, + 0x2c6: 0x000c, 0x2c7: 0x000c, 0x2c8: 0x000c, 0x2c9: 0x000c, + // Block 0xc, offset 0x300 + 0x30a: 0x000a, + 0x30d: 0x000a, 0x30e: 0x000a, 0x30f: 0x0004, 0x310: 0x0001, 0x311: 0x000c, + 0x312: 0x000c, 0x313: 0x000c, 0x314: 0x000c, 0x315: 0x000c, 0x316: 0x000c, 0x317: 0x000c, + 0x318: 0x000c, 0x319: 0x000c, 0x31a: 0x000c, 0x31b: 0x000c, 0x31c: 0x000c, 0x31d: 0x000c, + 0x31e: 0x000c, 0x31f: 0x000c, 0x320: 0x000c, 0x321: 0x000c, 0x322: 0x000c, 0x323: 0x000c, + 0x324: 0x000c, 0x325: 0x000c, 0x326: 0x000c, 0x327: 0x000c, 0x328: 0x000c, 0x329: 0x000c, + 0x32a: 0x000c, 0x32b: 0x000c, 0x32c: 0x000c, 0x32d: 0x000c, 0x32e: 0x000c, 0x32f: 0x000c, + 0x330: 0x000c, 0x331: 0x000c, 0x332: 0x000c, 0x333: 0x000c, 0x334: 0x000c, 0x335: 0x000c, + 0x336: 0x000c, 0x337: 0x000c, 0x338: 0x000c, 0x339: 0x000c, 0x33a: 0x000c, 0x33b: 0x000c, + 0x33c: 0x000c, 0x33d: 0x000c, 0x33e: 0x0001, 0x33f: 0x000c, + // Block 0xd, offset 0x340 + 0x340: 0x0001, 0x341: 0x000c, 0x342: 0x000c, 0x343: 0x0001, 0x344: 0x000c, 0x345: 0x000c, + 0x346: 0x0001, 0x347: 0x000c, 0x348: 0x0001, 0x349: 0x0001, 0x34a: 0x0001, 0x34b: 0x0001, + 0x34c: 0x0001, 0x34d: 0x0001, 0x34e: 0x0001, 0x34f: 0x0001, 0x350: 0x0001, 0x351: 0x0001, + 0x352: 0x0001, 0x353: 0x0001, 0x354: 0x0001, 0x355: 0x0001, 0x356: 0x0001, 0x357: 0x0001, + 0x358: 0x0001, 0x359: 0x0001, 0x35a: 0x0001, 0x35b: 0x0001, 0x35c: 0x0001, 0x35d: 0x0001, + 0x35e: 0x0001, 0x35f: 0x0001, 0x360: 0x0001, 0x361: 0x0001, 0x362: 0x0001, 0x363: 0x0001, + 0x364: 0x0001, 0x365: 0x0001, 0x366: 0x0001, 0x367: 0x0001, 0x368: 0x0001, 0x369: 0x0001, + 0x36a: 0x0001, 0x36b: 0x0001, 0x36c: 0x0001, 0x36d: 0x0001, 0x36e: 0x0001, 0x36f: 0x0001, + 0x370: 0x0001, 0x371: 0x0001, 0x372: 0x0001, 0x373: 0x0001, 0x374: 0x0001, 0x375: 0x0001, + 0x376: 0x0001, 0x377: 0x0001, 0x378: 0x0001, 0x379: 0x0001, 0x37a: 0x0001, 0x37b: 0x0001, + 0x37c: 0x0001, 0x37d: 0x0001, 0x37e: 0x0001, 0x37f: 0x0001, + // Block 0xe, offset 0x380 + 0x380: 0x0005, 0x381: 0x0005, 0x382: 0x0005, 0x383: 0x0005, 0x384: 0x0005, 0x385: 0x0005, + 0x386: 0x000a, 0x387: 0x000a, 0x388: 0x000d, 0x389: 0x0004, 0x38a: 0x0004, 0x38b: 0x000d, + 0x38c: 0x0006, 0x38d: 0x000d, 0x38e: 0x000a, 0x38f: 0x000a, 0x390: 0x000c, 0x391: 0x000c, + 0x392: 0x000c, 0x393: 0x000c, 0x394: 0x000c, 0x395: 0x000c, 0x396: 0x000c, 0x397: 0x000c, + 0x398: 0x000c, 0x399: 0x000c, 0x39a: 0x000c, 0x39b: 0x000d, 0x39c: 0x000d, 0x39d: 0x000d, + 0x39e: 0x000d, 0x39f: 0x000d, 0x3a0: 0x000d, 0x3a1: 0x000d, 0x3a2: 0x000d, 0x3a3: 0x000d, + 0x3a4: 0x000d, 0x3a5: 0x000d, 0x3a6: 0x000d, 0x3a7: 0x000d, 0x3a8: 0x000d, 0x3a9: 0x000d, + 0x3aa: 0x000d, 0x3ab: 0x000d, 0x3ac: 0x000d, 0x3ad: 0x000d, 0x3ae: 0x000d, 0x3af: 0x000d, + 0x3b0: 0x000d, 0x3b1: 0x000d, 0x3b2: 0x000d, 0x3b3: 0x000d, 0x3b4: 0x000d, 0x3b5: 0x000d, + 0x3b6: 0x000d, 0x3b7: 0x000d, 0x3b8: 0x000d, 0x3b9: 0x000d, 0x3ba: 0x000d, 0x3bb: 0x000d, + 0x3bc: 0x000d, 0x3bd: 0x000d, 0x3be: 0x000d, 0x3bf: 0x000d, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x000d, 0x3c1: 0x000d, 0x3c2: 0x000d, 0x3c3: 0x000d, 0x3c4: 0x000d, 0x3c5: 0x000d, + 0x3c6: 0x000d, 0x3c7: 0x000d, 0x3c8: 0x000d, 0x3c9: 0x000d, 0x3ca: 0x000d, 0x3cb: 0x000c, + 0x3cc: 0x000c, 0x3cd: 0x000c, 0x3ce: 0x000c, 0x3cf: 0x000c, 0x3d0: 0x000c, 0x3d1: 0x000c, + 0x3d2: 0x000c, 0x3d3: 0x000c, 0x3d4: 0x000c, 0x3d5: 0x000c, 0x3d6: 0x000c, 0x3d7: 0x000c, + 0x3d8: 0x000c, 0x3d9: 0x000c, 0x3da: 0x000c, 0x3db: 0x000c, 0x3dc: 0x000c, 0x3dd: 0x000c, + 0x3de: 0x000c, 0x3df: 0x000c, 0x3e0: 0x0005, 0x3e1: 0x0005, 0x3e2: 0x0005, 0x3e3: 0x0005, + 0x3e4: 0x0005, 0x3e5: 0x0005, 0x3e6: 0x0005, 0x3e7: 0x0005, 0x3e8: 0x0005, 0x3e9: 0x0005, + 0x3ea: 0x0004, 0x3eb: 0x0005, 0x3ec: 0x0005, 0x3ed: 0x000d, 0x3ee: 0x000d, 0x3ef: 0x000d, + 0x3f0: 0x000c, 0x3f1: 0x000d, 0x3f2: 0x000d, 0x3f3: 0x000d, 0x3f4: 0x000d, 0x3f5: 0x000d, + 0x3f6: 0x000d, 0x3f7: 0x000d, 0x3f8: 0x000d, 0x3f9: 0x000d, 0x3fa: 0x000d, 0x3fb: 0x000d, + 0x3fc: 0x000d, 0x3fd: 0x000d, 0x3fe: 0x000d, 0x3ff: 0x000d, + // Block 0x10, offset 0x400 + 0x400: 0x000d, 0x401: 0x000d, 0x402: 0x000d, 0x403: 0x000d, 0x404: 0x000d, 0x405: 0x000d, + 0x406: 0x000d, 0x407: 0x000d, 0x408: 0x000d, 0x409: 0x000d, 0x40a: 0x000d, 0x40b: 0x000d, + 0x40c: 0x000d, 0x40d: 0x000d, 0x40e: 0x000d, 0x40f: 0x000d, 0x410: 0x000d, 0x411: 0x000d, + 0x412: 0x000d, 0x413: 0x000d, 0x414: 0x000d, 0x415: 0x000d, 0x416: 0x000d, 0x417: 0x000d, + 0x418: 0x000d, 0x419: 0x000d, 0x41a: 0x000d, 0x41b: 0x000d, 0x41c: 0x000d, 0x41d: 0x000d, + 0x41e: 0x000d, 0x41f: 0x000d, 0x420: 0x000d, 0x421: 0x000d, 0x422: 0x000d, 0x423: 0x000d, + 0x424: 0x000d, 0x425: 0x000d, 0x426: 0x000d, 0x427: 0x000d, 0x428: 0x000d, 0x429: 0x000d, + 0x42a: 0x000d, 0x42b: 0x000d, 0x42c: 0x000d, 0x42d: 0x000d, 0x42e: 0x000d, 0x42f: 0x000d, + 0x430: 0x000d, 0x431: 0x000d, 0x432: 0x000d, 0x433: 0x000d, 0x434: 0x000d, 0x435: 0x000d, + 0x436: 0x000d, 0x437: 0x000d, 0x438: 0x000d, 0x439: 0x000d, 0x43a: 0x000d, 0x43b: 0x000d, + 0x43c: 0x000d, 0x43d: 0x000d, 0x43e: 0x000d, 0x43f: 0x000d, + // Block 0x11, offset 0x440 + 0x440: 0x000d, 0x441: 0x000d, 0x442: 0x000d, 0x443: 0x000d, 0x444: 0x000d, 0x445: 0x000d, + 0x446: 0x000d, 0x447: 0x000d, 0x448: 0x000d, 0x449: 0x000d, 0x44a: 0x000d, 0x44b: 0x000d, + 0x44c: 0x000d, 0x44d: 0x000d, 0x44e: 0x000d, 0x44f: 0x000d, 0x450: 0x000d, 0x451: 0x000d, + 0x452: 0x000d, 0x453: 0x000d, 0x454: 0x000d, 0x455: 0x000d, 0x456: 0x000c, 0x457: 0x000c, + 0x458: 0x000c, 0x459: 0x000c, 0x45a: 0x000c, 0x45b: 0x000c, 0x45c: 0x000c, 0x45d: 0x0005, + 0x45e: 0x000a, 0x45f: 0x000c, 0x460: 0x000c, 0x461: 0x000c, 0x462: 0x000c, 0x463: 0x000c, + 0x464: 0x000c, 0x465: 0x000d, 0x466: 0x000d, 0x467: 0x000c, 0x468: 0x000c, 0x469: 0x000a, + 0x46a: 0x000c, 0x46b: 0x000c, 0x46c: 0x000c, 0x46d: 0x000c, 0x46e: 0x000d, 0x46f: 0x000d, + 0x470: 0x0002, 0x471: 0x0002, 0x472: 0x0002, 0x473: 0x0002, 0x474: 0x0002, 0x475: 0x0002, + 0x476: 0x0002, 0x477: 0x0002, 0x478: 0x0002, 0x479: 0x0002, 0x47a: 0x000d, 0x47b: 0x000d, + 0x47c: 0x000d, 0x47d: 0x000d, 0x47e: 0x000d, 0x47f: 0x000d, + // Block 0x12, offset 0x480 + 0x480: 0x000d, 0x481: 0x000d, 0x482: 0x000d, 0x483: 0x000d, 0x484: 0x000d, 0x485: 0x000d, + 0x486: 0x000d, 0x487: 0x000d, 0x488: 0x000d, 0x489: 0x000d, 0x48a: 0x000d, 0x48b: 0x000d, + 0x48c: 0x000d, 0x48d: 0x000d, 0x48e: 0x000d, 0x48f: 0x000d, 0x490: 0x000d, 0x491: 0x000c, + 0x492: 0x000d, 0x493: 0x000d, 0x494: 0x000d, 0x495: 0x000d, 0x496: 0x000d, 0x497: 0x000d, + 0x498: 0x000d, 0x499: 0x000d, 0x49a: 0x000d, 0x49b: 0x000d, 0x49c: 0x000d, 0x49d: 0x000d, + 0x49e: 0x000d, 0x49f: 0x000d, 0x4a0: 0x000d, 0x4a1: 0x000d, 0x4a2: 0x000d, 0x4a3: 0x000d, + 0x4a4: 0x000d, 0x4a5: 0x000d, 0x4a6: 0x000d, 0x4a7: 0x000d, 0x4a8: 0x000d, 0x4a9: 0x000d, + 0x4aa: 0x000d, 0x4ab: 0x000d, 0x4ac: 0x000d, 0x4ad: 0x000d, 0x4ae: 0x000d, 0x4af: 0x000d, + 0x4b0: 0x000c, 0x4b1: 0x000c, 0x4b2: 0x000c, 0x4b3: 0x000c, 0x4b4: 0x000c, 0x4b5: 0x000c, + 0x4b6: 0x000c, 0x4b7: 0x000c, 0x4b8: 0x000c, 0x4b9: 0x000c, 0x4ba: 0x000c, 0x4bb: 0x000c, + 0x4bc: 0x000c, 0x4bd: 0x000c, 0x4be: 0x000c, 0x4bf: 0x000c, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x000c, 0x4c1: 0x000c, 0x4c2: 0x000c, 0x4c3: 0x000c, 0x4c4: 0x000c, 0x4c5: 0x000c, + 0x4c6: 0x000c, 0x4c7: 0x000c, 0x4c8: 0x000c, 0x4c9: 0x000c, 0x4ca: 0x000c, 0x4cb: 0x000d, + 0x4cc: 0x000d, 0x4cd: 0x000d, 0x4ce: 0x000d, 0x4cf: 0x000d, 0x4d0: 0x000d, 0x4d1: 0x000d, + 0x4d2: 0x000d, 0x4d3: 0x000d, 0x4d4: 0x000d, 0x4d5: 0x000d, 0x4d6: 0x000d, 0x4d7: 0x000d, + 0x4d8: 0x000d, 0x4d9: 0x000d, 0x4da: 0x000d, 0x4db: 0x000d, 0x4dc: 0x000d, 0x4dd: 0x000d, + 0x4de: 0x000d, 0x4df: 0x000d, 0x4e0: 0x000d, 0x4e1: 0x000d, 0x4e2: 0x000d, 0x4e3: 0x000d, + 0x4e4: 0x000d, 0x4e5: 0x000d, 0x4e6: 0x000d, 0x4e7: 0x000d, 0x4e8: 0x000d, 0x4e9: 0x000d, + 0x4ea: 0x000d, 0x4eb: 0x000d, 0x4ec: 0x000d, 0x4ed: 0x000d, 0x4ee: 0x000d, 0x4ef: 0x000d, + 0x4f0: 0x000d, 0x4f1: 0x000d, 0x4f2: 0x000d, 0x4f3: 0x000d, 0x4f4: 0x000d, 0x4f5: 0x000d, + 0x4f6: 0x000d, 0x4f7: 0x000d, 0x4f8: 0x000d, 0x4f9: 0x000d, 0x4fa: 0x000d, 0x4fb: 0x000d, + 0x4fc: 0x000d, 0x4fd: 0x000d, 0x4fe: 0x000d, 0x4ff: 0x000d, + // Block 0x14, offset 0x500 + 0x500: 0x000d, 0x501: 0x000d, 0x502: 0x000d, 0x503: 0x000d, 0x504: 0x000d, 0x505: 0x000d, + 0x506: 0x000d, 0x507: 0x000d, 0x508: 0x000d, 0x509: 0x000d, 0x50a: 0x000d, 0x50b: 0x000d, + 0x50c: 0x000d, 0x50d: 0x000d, 0x50e: 0x000d, 0x50f: 0x000d, 0x510: 0x000d, 0x511: 0x000d, + 0x512: 0x000d, 0x513: 0x000d, 0x514: 0x000d, 0x515: 0x000d, 0x516: 0x000d, 0x517: 0x000d, + 0x518: 0x000d, 0x519: 0x000d, 0x51a: 0x000d, 0x51b: 0x000d, 0x51c: 0x000d, 0x51d: 0x000d, + 0x51e: 0x000d, 0x51f: 0x000d, 0x520: 0x000d, 0x521: 0x000d, 0x522: 0x000d, 0x523: 0x000d, + 0x524: 0x000d, 0x525: 0x000d, 0x526: 0x000c, 0x527: 0x000c, 0x528: 0x000c, 0x529: 0x000c, + 0x52a: 0x000c, 0x52b: 0x000c, 0x52c: 0x000c, 0x52d: 0x000c, 0x52e: 0x000c, 0x52f: 0x000c, + 0x530: 0x000c, 0x531: 0x000d, 0x532: 0x000d, 0x533: 0x000d, 0x534: 0x000d, 0x535: 0x000d, + 0x536: 0x000d, 0x537: 0x000d, 0x538: 0x000d, 0x539: 0x000d, 0x53a: 0x000d, 0x53b: 0x000d, + 0x53c: 0x000d, 0x53d: 0x000d, 0x53e: 0x000d, 0x53f: 0x000d, + // Block 0x15, offset 0x540 + 0x540: 0x0001, 0x541: 0x0001, 0x542: 0x0001, 0x543: 0x0001, 0x544: 0x0001, 0x545: 0x0001, + 0x546: 0x0001, 0x547: 0x0001, 0x548: 0x0001, 0x549: 0x0001, 0x54a: 0x0001, 0x54b: 0x0001, + 0x54c: 0x0001, 0x54d: 0x0001, 0x54e: 0x0001, 0x54f: 0x0001, 0x550: 0x0001, 0x551: 0x0001, + 0x552: 0x0001, 0x553: 0x0001, 0x554: 0x0001, 0x555: 0x0001, 0x556: 0x0001, 0x557: 0x0001, + 0x558: 0x0001, 0x559: 0x0001, 0x55a: 0x0001, 0x55b: 0x0001, 0x55c: 0x0001, 0x55d: 0x0001, + 0x55e: 0x0001, 0x55f: 0x0001, 0x560: 0x0001, 0x561: 0x0001, 0x562: 0x0001, 0x563: 0x0001, + 0x564: 0x0001, 0x565: 0x0001, 0x566: 0x0001, 0x567: 0x0001, 0x568: 0x0001, 0x569: 0x0001, + 0x56a: 0x0001, 0x56b: 0x000c, 0x56c: 0x000c, 0x56d: 0x000c, 0x56e: 0x000c, 0x56f: 0x000c, + 0x570: 0x000c, 0x571: 0x000c, 0x572: 0x000c, 0x573: 0x000c, 0x574: 0x0001, 0x575: 0x0001, + 0x576: 0x000a, 0x577: 0x000a, 0x578: 0x000a, 0x579: 0x000a, 0x57a: 0x0001, 0x57b: 0x0001, + 0x57c: 0x0001, 0x57d: 0x000c, 0x57e: 0x0001, 0x57f: 0x0001, + // Block 0x16, offset 0x580 + 0x580: 0x0001, 0x581: 0x0001, 0x582: 0x0001, 0x583: 0x0001, 0x584: 0x0001, 0x585: 0x0001, + 0x586: 0x0001, 0x587: 0x0001, 0x588: 0x0001, 0x589: 0x0001, 0x58a: 0x0001, 0x58b: 0x0001, + 0x58c: 0x0001, 0x58d: 0x0001, 0x58e: 0x0001, 0x58f: 0x0001, 0x590: 0x0001, 0x591: 0x0001, + 0x592: 0x0001, 0x593: 0x0001, 0x594: 0x0001, 0x595: 0x0001, 0x596: 0x000c, 0x597: 0x000c, + 0x598: 0x000c, 0x599: 0x000c, 0x59a: 0x0001, 0x59b: 0x000c, 0x59c: 0x000c, 0x59d: 0x000c, + 0x59e: 0x000c, 0x59f: 0x000c, 0x5a0: 0x000c, 0x5a1: 0x000c, 0x5a2: 0x000c, 0x5a3: 0x000c, + 0x5a4: 0x0001, 0x5a5: 0x000c, 0x5a6: 0x000c, 0x5a7: 0x000c, 0x5a8: 0x0001, 0x5a9: 0x000c, + 0x5aa: 0x000c, 0x5ab: 0x000c, 0x5ac: 0x000c, 0x5ad: 0x000c, 0x5ae: 0x0001, 0x5af: 0x0001, + 0x5b0: 0x0001, 0x5b1: 0x0001, 0x5b2: 0x0001, 0x5b3: 0x0001, 0x5b4: 0x0001, 0x5b5: 0x0001, + 0x5b6: 0x0001, 0x5b7: 0x0001, 0x5b8: 0x0001, 0x5b9: 0x0001, 0x5ba: 0x0001, 0x5bb: 0x0001, + 0x5bc: 0x0001, 0x5bd: 0x0001, 0x5be: 0x0001, 0x5bf: 0x0001, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0001, 0x5c1: 0x0001, 0x5c2: 0x0001, 0x5c3: 0x0001, 0x5c4: 0x0001, 0x5c5: 0x0001, + 0x5c6: 0x0001, 0x5c7: 0x0001, 0x5c8: 0x0001, 0x5c9: 0x0001, 0x5ca: 0x0001, 0x5cb: 0x0001, + 0x5cc: 0x0001, 0x5cd: 0x0001, 0x5ce: 0x0001, 0x5cf: 0x0001, 0x5d0: 0x0001, 0x5d1: 0x0001, + 0x5d2: 0x0001, 0x5d3: 0x0001, 0x5d4: 0x0001, 0x5d5: 0x0001, 0x5d6: 0x0001, 0x5d7: 0x0001, + 0x5d8: 0x0001, 0x5d9: 0x000c, 0x5da: 0x000c, 0x5db: 0x000c, 0x5dc: 0x0001, 0x5dd: 0x0001, + 0x5de: 0x0001, 0x5df: 0x0001, 0x5e0: 0x000d, 0x5e1: 0x000d, 0x5e2: 0x000d, 0x5e3: 0x000d, + 0x5e4: 0x000d, 0x5e5: 0x000d, 0x5e6: 0x000d, 0x5e7: 0x000d, 0x5e8: 0x000d, 0x5e9: 0x000d, + 0x5ea: 0x000d, 0x5eb: 0x000d, 0x5ec: 0x000d, 0x5ed: 0x000d, 0x5ee: 0x000d, 0x5ef: 0x000d, + 0x5f0: 0x0001, 0x5f1: 0x0001, 0x5f2: 0x0001, 0x5f3: 0x0001, 0x5f4: 0x0001, 0x5f5: 0x0001, + 0x5f6: 0x0001, 0x5f7: 0x0001, 0x5f8: 0x0001, 0x5f9: 0x0001, 0x5fa: 0x0001, 0x5fb: 0x0001, + 0x5fc: 0x0001, 0x5fd: 0x0001, 0x5fe: 0x0001, 0x5ff: 0x0001, + // Block 0x18, offset 0x600 + 0x600: 0x0001, 0x601: 0x0001, 0x602: 0x0001, 0x603: 0x0001, 0x604: 0x0001, 0x605: 0x0001, + 0x606: 0x0001, 0x607: 0x0001, 0x608: 0x0001, 0x609: 0x0001, 0x60a: 0x0001, 0x60b: 0x0001, + 0x60c: 0x0001, 0x60d: 0x0001, 0x60e: 0x0001, 0x60f: 0x0001, 0x610: 0x0001, 0x611: 0x0001, + 0x612: 0x0001, 0x613: 0x0001, 0x614: 0x0001, 0x615: 0x0001, 0x616: 0x0001, 0x617: 0x0001, + 0x618: 0x0001, 0x619: 0x0001, 0x61a: 0x0001, 0x61b: 0x0001, 0x61c: 0x0001, 0x61d: 0x0001, + 0x61e: 0x0001, 0x61f: 0x0001, 0x620: 0x000d, 0x621: 0x000d, 0x622: 0x000d, 0x623: 0x000d, + 0x624: 0x000d, 0x625: 0x000d, 0x626: 0x000d, 0x627: 0x000d, 0x628: 0x000d, 0x629: 0x000d, + 0x62a: 0x000d, 0x62b: 0x000d, 0x62c: 0x000d, 0x62d: 0x000d, 0x62e: 0x000d, 0x62f: 0x000d, + 0x630: 0x000d, 0x631: 0x000d, 0x632: 0x000d, 0x633: 0x000d, 0x634: 0x000d, 0x635: 0x000d, + 0x636: 0x000d, 0x637: 0x000d, 0x638: 0x000d, 0x639: 0x000d, 0x63a: 0x000d, 0x63b: 0x000d, + 0x63c: 0x000d, 0x63d: 0x000d, 0x63e: 0x000d, 0x63f: 0x000d, + // Block 0x19, offset 0x640 + 0x640: 0x000d, 0x641: 0x000d, 0x642: 0x000d, 0x643: 0x000d, 0x644: 0x000d, 0x645: 0x000d, + 0x646: 0x000d, 0x647: 0x000d, 0x648: 0x000d, 0x649: 0x000d, 0x64a: 0x000d, 0x64b: 0x000d, + 0x64c: 0x000d, 0x64d: 0x000d, 0x64e: 0x000d, 0x64f: 0x000d, 0x650: 0x000d, 0x651: 0x000d, + 0x652: 0x000d, 0x653: 0x000c, 0x654: 0x000c, 0x655: 0x000c, 0x656: 0x000c, 0x657: 0x000c, + 0x658: 0x000c, 0x659: 0x000c, 0x65a: 0x000c, 0x65b: 0x000c, 0x65c: 0x000c, 0x65d: 0x000c, + 0x65e: 0x000c, 0x65f: 0x000c, 0x660: 0x000c, 0x661: 0x000c, 0x662: 0x0005, 0x663: 0x000c, + 0x664: 0x000c, 0x665: 0x000c, 0x666: 0x000c, 0x667: 0x000c, 0x668: 0x000c, 0x669: 0x000c, + 0x66a: 0x000c, 0x66b: 0x000c, 0x66c: 0x000c, 0x66d: 0x000c, 0x66e: 0x000c, 0x66f: 0x000c, + 0x670: 0x000c, 0x671: 0x000c, 0x672: 0x000c, 0x673: 0x000c, 0x674: 0x000c, 0x675: 0x000c, + 0x676: 0x000c, 0x677: 0x000c, 0x678: 0x000c, 0x679: 0x000c, 0x67a: 0x000c, 0x67b: 0x000c, + 0x67c: 0x000c, 0x67d: 0x000c, 0x67e: 0x000c, 0x67f: 0x000c, + // Block 0x1a, offset 0x680 + 0x680: 0x000c, 0x681: 0x000c, 0x682: 0x000c, + 0x6ba: 0x000c, + 0x6bc: 0x000c, + // Block 0x1b, offset 0x6c0 + 0x6c1: 0x000c, 0x6c2: 0x000c, 0x6c3: 0x000c, 0x6c4: 0x000c, 0x6c5: 0x000c, + 0x6c6: 0x000c, 0x6c7: 0x000c, 0x6c8: 0x000c, + 0x6cd: 0x000c, 0x6d1: 0x000c, + 0x6d2: 0x000c, 0x6d3: 0x000c, 0x6d4: 0x000c, 0x6d5: 0x000c, 0x6d6: 0x000c, 0x6d7: 0x000c, + 0x6e2: 0x000c, 0x6e3: 0x000c, + // Block 0x1c, offset 0x700 + 0x701: 0x000c, + 0x73c: 0x000c, + // Block 0x1d, offset 0x740 + 0x741: 0x000c, 0x742: 0x000c, 0x743: 0x000c, 0x744: 0x000c, + 0x74d: 0x000c, + 0x762: 0x000c, 0x763: 0x000c, + 0x772: 0x0004, 0x773: 0x0004, + 0x77b: 0x0004, + 0x77e: 0x000c, + // Block 0x1e, offset 0x780 + 0x781: 0x000c, 0x782: 0x000c, + 0x7bc: 0x000c, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x000c, 0x7c2: 0x000c, + 0x7c7: 0x000c, 0x7c8: 0x000c, 0x7cb: 0x000c, + 0x7cc: 0x000c, 0x7cd: 0x000c, 0x7d1: 0x000c, + 0x7f0: 0x000c, 0x7f1: 0x000c, 0x7f5: 0x000c, + // Block 0x20, offset 0x800 + 0x801: 0x000c, 0x802: 0x000c, 0x803: 0x000c, 0x804: 0x000c, 0x805: 0x000c, + 0x807: 0x000c, 0x808: 0x000c, + 0x80d: 0x000c, + 0x822: 0x000c, 0x823: 0x000c, + 0x831: 0x0004, + 0x83a: 0x000c, 0x83b: 0x000c, + 0x83c: 0x000c, 0x83d: 0x000c, 0x83e: 0x000c, 0x83f: 0x000c, + // Block 0x21, offset 0x840 + 0x841: 0x000c, + 0x87c: 0x000c, 0x87f: 0x000c, + // Block 0x22, offset 0x880 + 0x881: 0x000c, 0x882: 0x000c, 0x883: 0x000c, 0x884: 0x000c, + 0x88d: 0x000c, + 0x895: 0x000c, 0x896: 0x000c, + 0x8a2: 0x000c, 0x8a3: 0x000c, + // Block 0x23, offset 0x8c0 + 0x8c2: 0x000c, + // Block 0x24, offset 0x900 + 0x900: 0x000c, + 0x90d: 0x000c, + 0x933: 0x000a, 0x934: 0x000a, 0x935: 0x000a, + 0x936: 0x000a, 0x937: 0x000a, 0x938: 0x000a, 0x939: 0x0004, 0x93a: 0x000a, + // Block 0x25, offset 0x940 + 0x940: 0x000c, 0x944: 0x000c, + 0x97e: 0x000c, 0x97f: 0x000c, + // Block 0x26, offset 0x980 + 0x980: 0x000c, + 0x986: 0x000c, 0x987: 0x000c, 0x988: 0x000c, 0x98a: 0x000c, 0x98b: 0x000c, + 0x98c: 0x000c, 0x98d: 0x000c, + 0x995: 0x000c, 0x996: 0x000c, + 0x9a2: 0x000c, 0x9a3: 0x000c, + 0x9b8: 0x000a, 0x9b9: 0x000a, 0x9ba: 0x000a, 0x9bb: 0x000a, + 0x9bc: 0x000a, 0x9bd: 0x000a, 0x9be: 0x000a, + // Block 0x27, offset 0x9c0 + 0x9cc: 0x000c, 0x9cd: 0x000c, + 0x9e2: 0x000c, 0x9e3: 0x000c, + // Block 0x28, offset 0xa00 + 0xa00: 0x000c, 0xa01: 0x000c, + 0xa3b: 0x000c, + 0xa3c: 0x000c, + // Block 0x29, offset 0xa40 + 0xa41: 0x000c, 0xa42: 0x000c, 0xa43: 0x000c, 0xa44: 0x000c, + 0xa4d: 0x000c, + 0xa62: 0x000c, 0xa63: 0x000c, + // Block 0x2a, offset 0xa80 + 0xa81: 0x000c, + // Block 0x2b, offset 0xac0 + 0xaca: 0x000c, + 0xad2: 0x000c, 0xad3: 0x000c, 0xad4: 0x000c, 0xad6: 0x000c, + // Block 0x2c, offset 0xb00 + 0xb31: 0x000c, 0xb34: 0x000c, 0xb35: 0x000c, + 0xb36: 0x000c, 0xb37: 0x000c, 0xb38: 0x000c, 0xb39: 0x000c, 0xb3a: 0x000c, + 0xb3f: 0x0004, + // Block 0x2d, offset 0xb40 + 0xb47: 0x000c, 0xb48: 0x000c, 0xb49: 0x000c, 0xb4a: 0x000c, 0xb4b: 0x000c, + 0xb4c: 0x000c, 0xb4d: 0x000c, 0xb4e: 0x000c, + // Block 0x2e, offset 0xb80 + 0xbb1: 0x000c, 0xbb4: 0x000c, 0xbb5: 0x000c, + 0xbb6: 0x000c, 0xbb7: 0x000c, 0xbb8: 0x000c, 0xbb9: 0x000c, 0xbba: 0x000c, 0xbbb: 0x000c, + 0xbbc: 0x000c, + // Block 0x2f, offset 0xbc0 + 0xbc8: 0x000c, 0xbc9: 0x000c, 0xbca: 0x000c, 0xbcb: 0x000c, + 0xbcc: 0x000c, 0xbcd: 0x000c, + // Block 0x30, offset 0xc00 + 0xc18: 0x000c, 0xc19: 0x000c, + 0xc35: 0x000c, + 0xc37: 0x000c, 0xc39: 0x000c, 0xc3a: 0x003a, 0xc3b: 0x002a, + 0xc3c: 0x003a, 0xc3d: 0x002a, + // Block 0x31, offset 0xc40 + 0xc71: 0x000c, 0xc72: 0x000c, 0xc73: 0x000c, 0xc74: 0x000c, 0xc75: 0x000c, + 0xc76: 0x000c, 0xc77: 0x000c, 0xc78: 0x000c, 0xc79: 0x000c, 0xc7a: 0x000c, 0xc7b: 0x000c, + 0xc7c: 0x000c, 0xc7d: 0x000c, 0xc7e: 0x000c, + // Block 0x32, offset 0xc80 + 0xc80: 0x000c, 0xc81: 0x000c, 0xc82: 0x000c, 0xc83: 0x000c, 0xc84: 0x000c, + 0xc86: 0x000c, 0xc87: 0x000c, + 0xc8d: 0x000c, 0xc8e: 0x000c, 0xc8f: 0x000c, 0xc90: 0x000c, 0xc91: 0x000c, + 0xc92: 0x000c, 0xc93: 0x000c, 0xc94: 0x000c, 0xc95: 0x000c, 0xc96: 0x000c, 0xc97: 0x000c, + 0xc99: 0x000c, 0xc9a: 0x000c, 0xc9b: 0x000c, 0xc9c: 0x000c, 0xc9d: 0x000c, + 0xc9e: 0x000c, 0xc9f: 0x000c, 0xca0: 0x000c, 0xca1: 0x000c, 0xca2: 0x000c, 0xca3: 0x000c, + 0xca4: 0x000c, 0xca5: 0x000c, 0xca6: 0x000c, 0xca7: 0x000c, 0xca8: 0x000c, 0xca9: 0x000c, + 0xcaa: 0x000c, 0xcab: 0x000c, 0xcac: 0x000c, 0xcad: 0x000c, 0xcae: 0x000c, 0xcaf: 0x000c, + 0xcb0: 0x000c, 0xcb1: 0x000c, 0xcb2: 0x000c, 0xcb3: 0x000c, 0xcb4: 0x000c, 0xcb5: 0x000c, + 0xcb6: 0x000c, 0xcb7: 0x000c, 0xcb8: 0x000c, 0xcb9: 0x000c, 0xcba: 0x000c, 0xcbb: 0x000c, + 0xcbc: 0x000c, + // Block 0x33, offset 0xcc0 + 0xcc6: 0x000c, + // Block 0x34, offset 0xd00 + 0xd2d: 0x000c, 0xd2e: 0x000c, 0xd2f: 0x000c, + 0xd30: 0x000c, 0xd32: 0x000c, 0xd33: 0x000c, 0xd34: 0x000c, 0xd35: 0x000c, + 0xd36: 0x000c, 0xd37: 0x000c, 0xd39: 0x000c, 0xd3a: 0x000c, + 0xd3d: 0x000c, 0xd3e: 0x000c, + // Block 0x35, offset 0xd40 + 0xd58: 0x000c, 0xd59: 0x000c, + 0xd5e: 0x000c, 0xd5f: 0x000c, 0xd60: 0x000c, + 0xd71: 0x000c, 0xd72: 0x000c, 0xd73: 0x000c, 0xd74: 0x000c, + // Block 0x36, offset 0xd80 + 0xd82: 0x000c, 0xd85: 0x000c, + 0xd86: 0x000c, + 0xd8d: 0x000c, + 0xd9d: 0x000c, + // Block 0x37, offset 0xdc0 + 0xddd: 0x000c, + 0xdde: 0x000c, 0xddf: 0x000c, + // Block 0x38, offset 0xe00 + 0xe10: 0x000a, 0xe11: 0x000a, + 0xe12: 0x000a, 0xe13: 0x000a, 0xe14: 0x000a, 0xe15: 0x000a, 0xe16: 0x000a, 0xe17: 0x000a, + 0xe18: 0x000a, 0xe19: 0x000a, + // Block 0x39, offset 0xe40 + 0xe40: 0x000a, + // Block 0x3a, offset 0xe80 + 0xe80: 0x0009, + 0xe9b: 0x007a, 0xe9c: 0x006a, + // Block 0x3b, offset 0xec0 + 0xed2: 0x000c, 0xed3: 0x000c, 0xed4: 0x000c, + 0xef2: 0x000c, 0xef3: 0x000c, 0xef4: 0x000c, + // Block 0x3c, offset 0xf00 + 0xf12: 0x000c, 0xf13: 0x000c, + 0xf32: 0x000c, 0xf33: 0x000c, + // Block 0x3d, offset 0xf40 + 0xf74: 0x000c, 0xf75: 0x000c, + 0xf77: 0x000c, 0xf78: 0x000c, 0xf79: 0x000c, 0xf7a: 0x000c, 0xf7b: 0x000c, + 0xf7c: 0x000c, 0xf7d: 0x000c, + // Block 0x3e, offset 0xf80 + 0xf86: 0x000c, 0xf89: 0x000c, 0xf8a: 0x000c, 0xf8b: 0x000c, + 0xf8c: 0x000c, 0xf8d: 0x000c, 0xf8e: 0x000c, 0xf8f: 0x000c, 0xf90: 0x000c, 0xf91: 0x000c, + 0xf92: 0x000c, 0xf93: 0x000c, + 0xf9b: 0x0004, 0xf9d: 0x000c, + 0xfb0: 0x000a, 0xfb1: 0x000a, 0xfb2: 0x000a, 0xfb3: 0x000a, 0xfb4: 0x000a, 0xfb5: 0x000a, + 0xfb6: 0x000a, 0xfb7: 0x000a, 0xfb8: 0x000a, 0xfb9: 0x000a, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x000a, 0xfc1: 0x000a, 0xfc2: 0x000a, 0xfc3: 0x000a, 0xfc4: 0x000a, 0xfc5: 0x000a, + 0xfc6: 0x000a, 0xfc7: 0x000a, 0xfc8: 0x000a, 0xfc9: 0x000a, 0xfca: 0x000a, 0xfcb: 0x000c, + 0xfcc: 0x000c, 0xfcd: 0x000c, 0xfce: 0x000b, + // Block 0x40, offset 0x1000 + 0x1005: 0x000c, + 0x1006: 0x000c, + 0x1029: 0x000c, + // Block 0x41, offset 0x1040 + 0x1060: 0x000c, 0x1061: 0x000c, 0x1062: 0x000c, + 0x1067: 0x000c, 0x1068: 0x000c, + 0x1072: 0x000c, + 0x1079: 0x000c, 0x107a: 0x000c, 0x107b: 0x000c, + // Block 0x42, offset 0x1080 + 0x1080: 0x000a, 0x1084: 0x000a, 0x1085: 0x000a, + // Block 0x43, offset 0x10c0 + 0x10de: 0x000a, 0x10df: 0x000a, 0x10e0: 0x000a, 0x10e1: 0x000a, 0x10e2: 0x000a, 0x10e3: 0x000a, + 0x10e4: 0x000a, 0x10e5: 0x000a, 0x10e6: 0x000a, 0x10e7: 0x000a, 0x10e8: 0x000a, 0x10e9: 0x000a, + 0x10ea: 0x000a, 0x10eb: 0x000a, 0x10ec: 0x000a, 0x10ed: 0x000a, 0x10ee: 0x000a, 0x10ef: 0x000a, + 0x10f0: 0x000a, 0x10f1: 0x000a, 0x10f2: 0x000a, 0x10f3: 0x000a, 0x10f4: 0x000a, 0x10f5: 0x000a, + 0x10f6: 0x000a, 0x10f7: 0x000a, 0x10f8: 0x000a, 0x10f9: 0x000a, 0x10fa: 0x000a, 0x10fb: 0x000a, + 0x10fc: 0x000a, 0x10fd: 0x000a, 0x10fe: 0x000a, 0x10ff: 0x000a, + // Block 0x44, offset 0x1100 + 0x1117: 0x000c, + 0x1118: 0x000c, 0x111b: 0x000c, + // Block 0x45, offset 0x1140 + 0x1156: 0x000c, + 0x1158: 0x000c, 0x1159: 0x000c, 0x115a: 0x000c, 0x115b: 0x000c, 0x115c: 0x000c, 0x115d: 0x000c, + 0x115e: 0x000c, 0x1160: 0x000c, 0x1162: 0x000c, + 0x1165: 0x000c, 0x1166: 0x000c, 0x1167: 0x000c, 0x1168: 0x000c, 0x1169: 0x000c, + 0x116a: 0x000c, 0x116b: 0x000c, 0x116c: 0x000c, + 0x1173: 0x000c, 0x1174: 0x000c, 0x1175: 0x000c, + 0x1176: 0x000c, 0x1177: 0x000c, 0x1178: 0x000c, 0x1179: 0x000c, 0x117a: 0x000c, 0x117b: 0x000c, + 0x117c: 0x000c, 0x117f: 0x000c, + // Block 0x46, offset 0x1180 + 0x11b0: 0x000c, 0x11b1: 0x000c, 0x11b2: 0x000c, 0x11b3: 0x000c, 0x11b4: 0x000c, 0x11b5: 0x000c, + 0x11b6: 0x000c, 0x11b7: 0x000c, 0x11b8: 0x000c, 0x11b9: 0x000c, 0x11ba: 0x000c, 0x11bb: 0x000c, + 0x11bc: 0x000c, 0x11bd: 0x000c, 0x11be: 0x000c, 0x11bf: 0x000c, + // Block 0x47, offset 0x11c0 + 0x11c0: 0x000c, + // Block 0x48, offset 0x1200 + 0x1200: 0x000c, 0x1201: 0x000c, 0x1202: 0x000c, 0x1203: 0x000c, + 0x1234: 0x000c, + 0x1236: 0x000c, 0x1237: 0x000c, 0x1238: 0x000c, 0x1239: 0x000c, 0x123a: 0x000c, + 0x123c: 0x000c, + // Block 0x49, offset 0x1240 + 0x1242: 0x000c, + 0x126b: 0x000c, 0x126c: 0x000c, 0x126d: 0x000c, 0x126e: 0x000c, 0x126f: 0x000c, + 0x1270: 0x000c, 0x1271: 0x000c, 0x1272: 0x000c, 0x1273: 0x000c, + // Block 0x4a, offset 0x1280 + 0x1280: 0x000c, 0x1281: 0x000c, + 0x12a2: 0x000c, 0x12a3: 0x000c, + 0x12a4: 0x000c, 0x12a5: 0x000c, 0x12a8: 0x000c, 0x12a9: 0x000c, + 0x12ab: 0x000c, 0x12ac: 0x000c, 0x12ad: 0x000c, + // Block 0x4b, offset 0x12c0 + 0x12e6: 0x000c, 0x12e8: 0x000c, 0x12e9: 0x000c, + 0x12ed: 0x000c, 0x12ef: 0x000c, + 0x12f0: 0x000c, 0x12f1: 0x000c, + // Block 0x4c, offset 0x1300 + 0x132c: 0x000c, 0x132d: 0x000c, 0x132e: 0x000c, 0x132f: 0x000c, + 0x1330: 0x000c, 0x1331: 0x000c, 0x1332: 0x000c, 0x1333: 0x000c, + 0x1336: 0x000c, 0x1337: 0x000c, + // Block 0x4d, offset 0x1340 + 0x1350: 0x000c, 0x1351: 0x000c, + 0x1352: 0x000c, 0x1354: 0x000c, 0x1355: 0x000c, 0x1356: 0x000c, 0x1357: 0x000c, + 0x1358: 0x000c, 0x1359: 0x000c, 0x135a: 0x000c, 0x135b: 0x000c, 0x135c: 0x000c, 0x135d: 0x000c, + 0x135e: 0x000c, 0x135f: 0x000c, 0x1360: 0x000c, 0x1362: 0x000c, 0x1363: 0x000c, + 0x1364: 0x000c, 0x1365: 0x000c, 0x1366: 0x000c, 0x1367: 0x000c, 0x1368: 0x000c, + 0x136d: 0x000c, + 0x1374: 0x000c, + 0x1378: 0x000c, 0x1379: 0x000c, + // Block 0x4e, offset 0x1380 + 0x1380: 0x000c, 0x1381: 0x000c, 0x1382: 0x000c, 0x1383: 0x000c, 0x1384: 0x000c, 0x1385: 0x000c, + 0x1386: 0x000c, 0x1387: 0x000c, 0x1388: 0x000c, 0x1389: 0x000c, 0x138a: 0x000c, 0x138b: 0x000c, + 0x138c: 0x000c, 0x138d: 0x000c, 0x138e: 0x000c, 0x138f: 0x000c, 0x1390: 0x000c, 0x1391: 0x000c, + 0x1392: 0x000c, 0x1393: 0x000c, 0x1394: 0x000c, 0x1395: 0x000c, 0x1396: 0x000c, 0x1397: 0x000c, + 0x1398: 0x000c, 0x1399: 0x000c, 0x139a: 0x000c, 0x139b: 0x000c, 0x139c: 0x000c, 0x139d: 0x000c, + 0x139e: 0x000c, 0x139f: 0x000c, 0x13a0: 0x000c, 0x13a1: 0x000c, 0x13a2: 0x000c, 0x13a3: 0x000c, + 0x13a4: 0x000c, 0x13a5: 0x000c, 0x13a6: 0x000c, 0x13a7: 0x000c, 0x13a8: 0x000c, 0x13a9: 0x000c, + 0x13aa: 0x000c, 0x13ab: 0x000c, 0x13ac: 0x000c, 0x13ad: 0x000c, 0x13ae: 0x000c, 0x13af: 0x000c, + 0x13b0: 0x000c, 0x13b1: 0x000c, 0x13b2: 0x000c, 0x13b3: 0x000c, 0x13b4: 0x000c, 0x13b5: 0x000c, + 0x13b6: 0x000c, 0x13b7: 0x000c, 0x13b8: 0x000c, 0x13b9: 0x000c, 0x13bb: 0x000c, + 0x13bc: 0x000c, 0x13bd: 0x000c, 0x13be: 0x000c, 0x13bf: 0x000c, + // Block 0x4f, offset 0x13c0 + 0x13fd: 0x000a, 0x13ff: 0x000a, + // Block 0x50, offset 0x1400 + 0x1400: 0x000a, 0x1401: 0x000a, + 0x140d: 0x000a, 0x140e: 0x000a, 0x140f: 0x000a, + 0x141d: 0x000a, + 0x141e: 0x000a, 0x141f: 0x000a, + 0x142d: 0x000a, 0x142e: 0x000a, 0x142f: 0x000a, + 0x143d: 0x000a, 0x143e: 0x000a, + // Block 0x51, offset 0x1440 + 0x1440: 0x0009, 0x1441: 0x0009, 0x1442: 0x0009, 0x1443: 0x0009, 0x1444: 0x0009, 0x1445: 0x0009, + 0x1446: 0x0009, 0x1447: 0x0009, 0x1448: 0x0009, 0x1449: 0x0009, 0x144a: 0x0009, 0x144b: 0x000b, + 0x144c: 0x000b, 0x144d: 0x000b, 0x144f: 0x0001, 0x1450: 0x000a, 0x1451: 0x000a, + 0x1452: 0x000a, 0x1453: 0x000a, 0x1454: 0x000a, 0x1455: 0x000a, 0x1456: 0x000a, 0x1457: 0x000a, + 0x1458: 0x000a, 0x1459: 0x000a, 0x145a: 0x000a, 0x145b: 0x000a, 0x145c: 0x000a, 0x145d: 0x000a, + 0x145e: 0x000a, 0x145f: 0x000a, 0x1460: 0x000a, 0x1461: 0x000a, 0x1462: 0x000a, 0x1463: 0x000a, + 0x1464: 0x000a, 0x1465: 0x000a, 0x1466: 0x000a, 0x1467: 0x000a, 0x1468: 0x0009, 0x1469: 0x0007, + 0x146a: 0x000e, 0x146b: 0x000e, 0x146c: 0x000e, 0x146d: 0x000e, 0x146e: 0x000e, 0x146f: 0x0006, + 0x1470: 0x0004, 0x1471: 0x0004, 0x1472: 0x0004, 0x1473: 0x0004, 0x1474: 0x0004, 0x1475: 0x000a, + 0x1476: 0x000a, 0x1477: 0x000a, 0x1478: 0x000a, 0x1479: 0x000a, 0x147a: 0x000a, 0x147b: 0x000a, + 0x147c: 0x000a, 0x147d: 0x000a, 0x147e: 0x000a, 0x147f: 0x000a, + // Block 0x52, offset 0x1480 + 0x1480: 0x000a, 0x1481: 0x000a, 0x1482: 0x000a, 0x1483: 0x000a, 0x1484: 0x0006, 0x1485: 0x009a, + 0x1486: 0x008a, 0x1487: 0x000a, 0x1488: 0x000a, 0x1489: 0x000a, 0x148a: 0x000a, 0x148b: 0x000a, + 0x148c: 0x000a, 0x148d: 0x000a, 0x148e: 0x000a, 0x148f: 0x000a, 0x1490: 0x000a, 0x1491: 0x000a, + 0x1492: 0x000a, 0x1493: 0x000a, 0x1494: 0x000a, 0x1495: 0x000a, 0x1496: 0x000a, 0x1497: 0x000a, + 0x1498: 0x000a, 0x1499: 0x000a, 0x149a: 0x000a, 0x149b: 0x000a, 0x149c: 0x000a, 0x149d: 0x000a, + 0x149e: 0x000a, 0x149f: 0x0009, 0x14a0: 0x000b, 0x14a1: 0x000b, 0x14a2: 0x000b, 0x14a3: 0x000b, + 0x14a4: 0x000b, 0x14a5: 0x000b, 0x14a6: 0x000e, 0x14a7: 0x000e, 0x14a8: 0x000e, 0x14a9: 0x000e, + 0x14aa: 0x000b, 0x14ab: 0x000b, 0x14ac: 0x000b, 0x14ad: 0x000b, 0x14ae: 0x000b, 0x14af: 0x000b, + 0x14b0: 0x0002, 0x14b4: 0x0002, 0x14b5: 0x0002, + 0x14b6: 0x0002, 0x14b7: 0x0002, 0x14b8: 0x0002, 0x14b9: 0x0002, 0x14ba: 0x0003, 0x14bb: 0x0003, + 0x14bc: 0x000a, 0x14bd: 0x009a, 0x14be: 0x008a, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x0002, 0x14c1: 0x0002, 0x14c2: 0x0002, 0x14c3: 0x0002, 0x14c4: 0x0002, 0x14c5: 0x0002, + 0x14c6: 0x0002, 0x14c7: 0x0002, 0x14c8: 0x0002, 0x14c9: 0x0002, 0x14ca: 0x0003, 0x14cb: 0x0003, + 0x14cc: 0x000a, 0x14cd: 0x009a, 0x14ce: 0x008a, + 0x14e0: 0x0004, 0x14e1: 0x0004, 0x14e2: 0x0004, 0x14e3: 0x0004, + 0x14e4: 0x0004, 0x14e5: 0x0004, 0x14e6: 0x0004, 0x14e7: 0x0004, 0x14e8: 0x0004, 0x14e9: 0x0004, + 0x14ea: 0x0004, 0x14eb: 0x0004, 0x14ec: 0x0004, 0x14ed: 0x0004, 0x14ee: 0x0004, 0x14ef: 0x0004, + 0x14f0: 0x0004, 0x14f1: 0x0004, 0x14f2: 0x0004, 0x14f3: 0x0004, 0x14f4: 0x0004, 0x14f5: 0x0004, + 0x14f6: 0x0004, 0x14f7: 0x0004, 0x14f8: 0x0004, 0x14f9: 0x0004, 0x14fa: 0x0004, 0x14fb: 0x0004, + 0x14fc: 0x0004, 0x14fd: 0x0004, 0x14fe: 0x0004, 0x14ff: 0x0004, + // Block 0x54, offset 0x1500 + 0x1500: 0x0004, 0x1501: 0x0004, 0x1502: 0x0004, 0x1503: 0x0004, 0x1504: 0x0004, 0x1505: 0x0004, + 0x1506: 0x0004, 0x1507: 0x0004, 0x1508: 0x0004, 0x1509: 0x0004, 0x150a: 0x0004, 0x150b: 0x0004, + 0x150c: 0x0004, 0x150d: 0x0004, 0x150e: 0x0004, 0x150f: 0x0004, 0x1510: 0x000c, 0x1511: 0x000c, + 0x1512: 0x000c, 0x1513: 0x000c, 0x1514: 0x000c, 0x1515: 0x000c, 0x1516: 0x000c, 0x1517: 0x000c, + 0x1518: 0x000c, 0x1519: 0x000c, 0x151a: 0x000c, 0x151b: 0x000c, 0x151c: 0x000c, 0x151d: 0x000c, + 0x151e: 0x000c, 0x151f: 0x000c, 0x1520: 0x000c, 0x1521: 0x000c, 0x1522: 0x000c, 0x1523: 0x000c, + 0x1524: 0x000c, 0x1525: 0x000c, 0x1526: 0x000c, 0x1527: 0x000c, 0x1528: 0x000c, 0x1529: 0x000c, + 0x152a: 0x000c, 0x152b: 0x000c, 0x152c: 0x000c, 0x152d: 0x000c, 0x152e: 0x000c, 0x152f: 0x000c, + 0x1530: 0x000c, + // Block 0x55, offset 0x1540 + 0x1540: 0x000a, 0x1541: 0x000a, 0x1543: 0x000a, 0x1544: 0x000a, 0x1545: 0x000a, + 0x1546: 0x000a, 0x1548: 0x000a, 0x1549: 0x000a, + 0x1554: 0x000a, 0x1556: 0x000a, 0x1557: 0x000a, + 0x1558: 0x000a, + 0x155e: 0x000a, 0x155f: 0x000a, 0x1560: 0x000a, 0x1561: 0x000a, 0x1562: 0x000a, 0x1563: 0x000a, + 0x1565: 0x000a, 0x1567: 0x000a, 0x1569: 0x000a, + 0x156e: 0x0004, + 0x157a: 0x000a, 0x157b: 0x000a, + // Block 0x56, offset 0x1580 + 0x1580: 0x000a, 0x1581: 0x000a, 0x1582: 0x000a, 0x1583: 0x000a, 0x1584: 0x000a, + 0x158a: 0x000a, 0x158b: 0x000a, + 0x158c: 0x000a, 0x158d: 0x000a, 0x1590: 0x000a, 0x1591: 0x000a, + 0x1592: 0x000a, 0x1593: 0x000a, 0x1594: 0x000a, 0x1595: 0x000a, 0x1596: 0x000a, 0x1597: 0x000a, + 0x1598: 0x000a, 0x1599: 0x000a, 0x159a: 0x000a, 0x159b: 0x000a, 0x159c: 0x000a, 0x159d: 0x000a, + 0x159e: 0x000a, 0x159f: 0x000a, + // Block 0x57, offset 0x15c0 + 0x15c9: 0x000a, 0x15ca: 0x000a, 0x15cb: 0x000a, + 0x15d0: 0x000a, 0x15d1: 0x000a, + 0x15d2: 0x000a, 0x15d3: 0x000a, 0x15d4: 0x000a, 0x15d5: 0x000a, 0x15d6: 0x000a, 0x15d7: 0x000a, + 0x15d8: 0x000a, 0x15d9: 0x000a, 0x15da: 0x000a, 0x15db: 0x000a, 0x15dc: 0x000a, 0x15dd: 0x000a, + 0x15de: 0x000a, 0x15df: 0x000a, 0x15e0: 0x000a, 0x15e1: 0x000a, 0x15e2: 0x000a, 0x15e3: 0x000a, + 0x15e4: 0x000a, 0x15e5: 0x000a, 0x15e6: 0x000a, 0x15e7: 0x000a, 0x15e8: 0x000a, 0x15e9: 0x000a, + 0x15ea: 0x000a, 0x15eb: 0x000a, 0x15ec: 0x000a, 0x15ed: 0x000a, 0x15ee: 0x000a, 0x15ef: 0x000a, + 0x15f0: 0x000a, 0x15f1: 0x000a, 0x15f2: 0x000a, 0x15f3: 0x000a, 0x15f4: 0x000a, 0x15f5: 0x000a, + 0x15f6: 0x000a, 0x15f7: 0x000a, 0x15f8: 0x000a, 0x15f9: 0x000a, 0x15fa: 0x000a, 0x15fb: 0x000a, + 0x15fc: 0x000a, 0x15fd: 0x000a, 0x15fe: 0x000a, 0x15ff: 0x000a, + // Block 0x58, offset 0x1600 + 0x1600: 0x000a, 0x1601: 0x000a, 0x1602: 0x000a, 0x1603: 0x000a, 0x1604: 0x000a, 0x1605: 0x000a, + 0x1606: 0x000a, 0x1607: 0x000a, 0x1608: 0x000a, 0x1609: 0x000a, 0x160a: 0x000a, 0x160b: 0x000a, + 0x160c: 0x000a, 0x160d: 0x000a, 0x160e: 0x000a, 0x160f: 0x000a, 0x1610: 0x000a, 0x1611: 0x000a, + 0x1612: 0x000a, 0x1613: 0x000a, 0x1614: 0x000a, 0x1615: 0x000a, 0x1616: 0x000a, 0x1617: 0x000a, + 0x1618: 0x000a, 0x1619: 0x000a, 0x161a: 0x000a, 0x161b: 0x000a, 0x161c: 0x000a, 0x161d: 0x000a, + 0x161e: 0x000a, 0x161f: 0x000a, 0x1620: 0x000a, 0x1621: 0x000a, 0x1622: 0x000a, 0x1623: 0x000a, + 0x1624: 0x000a, 0x1625: 0x000a, 0x1626: 0x000a, 0x1627: 0x000a, 0x1628: 0x000a, 0x1629: 0x000a, + 0x162a: 0x000a, 0x162b: 0x000a, 0x162c: 0x000a, 0x162d: 0x000a, 0x162e: 0x000a, 0x162f: 0x000a, + 0x1630: 0x000a, 0x1631: 0x000a, 0x1632: 0x000a, 0x1633: 0x000a, 0x1634: 0x000a, 0x1635: 0x000a, + 0x1636: 0x000a, 0x1637: 0x000a, 0x1638: 0x000a, 0x1639: 0x000a, 0x163a: 0x000a, 0x163b: 0x000a, + 0x163c: 0x000a, 0x163d: 0x000a, 0x163e: 0x000a, 0x163f: 0x000a, + // Block 0x59, offset 0x1640 + 0x1640: 0x000a, 0x1641: 0x000a, 0x1642: 0x000a, 0x1643: 0x000a, 0x1644: 0x000a, 0x1645: 0x000a, + 0x1646: 0x000a, 0x1647: 0x000a, 0x1648: 0x000a, 0x1649: 0x000a, 0x164a: 0x000a, 0x164b: 0x000a, + 0x164c: 0x000a, 0x164d: 0x000a, 0x164e: 0x000a, 0x164f: 0x000a, 0x1650: 0x000a, 0x1651: 0x000a, + 0x1652: 0x0003, 0x1653: 0x0004, 0x1654: 0x000a, 0x1655: 0x000a, 0x1656: 0x000a, 0x1657: 0x000a, + 0x1658: 0x000a, 0x1659: 0x000a, 0x165a: 0x000a, 0x165b: 0x000a, 0x165c: 0x000a, 0x165d: 0x000a, + 0x165e: 0x000a, 0x165f: 0x000a, 0x1660: 0x000a, 0x1661: 0x000a, 0x1662: 0x000a, 0x1663: 0x000a, + 0x1664: 0x000a, 0x1665: 0x000a, 0x1666: 0x000a, 0x1667: 0x000a, 0x1668: 0x000a, 0x1669: 0x000a, + 0x166a: 0x000a, 0x166b: 0x000a, 0x166c: 0x000a, 0x166d: 0x000a, 0x166e: 0x000a, 0x166f: 0x000a, + 0x1670: 0x000a, 0x1671: 0x000a, 0x1672: 0x000a, 0x1673: 0x000a, 0x1674: 0x000a, 0x1675: 0x000a, + 0x1676: 0x000a, 0x1677: 0x000a, 0x1678: 0x000a, 0x1679: 0x000a, 0x167a: 0x000a, 0x167b: 0x000a, + 0x167c: 0x000a, 0x167d: 0x000a, 0x167e: 0x000a, 0x167f: 0x000a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x000a, 0x1681: 0x000a, 0x1682: 0x000a, 0x1683: 0x000a, 0x1684: 0x000a, 0x1685: 0x000a, + 0x1686: 0x000a, 0x1687: 0x000a, 0x1688: 0x003a, 0x1689: 0x002a, 0x168a: 0x003a, 0x168b: 0x002a, + 0x168c: 0x000a, 0x168d: 0x000a, 0x168e: 0x000a, 0x168f: 0x000a, 0x1690: 0x000a, 0x1691: 0x000a, + 0x1692: 0x000a, 0x1693: 0x000a, 0x1694: 0x000a, 0x1695: 0x000a, 0x1696: 0x000a, 0x1697: 0x000a, + 0x1698: 0x000a, 0x1699: 0x000a, 0x169a: 0x000a, 0x169b: 0x000a, 0x169c: 0x000a, 0x169d: 0x000a, + 0x169e: 0x000a, 0x169f: 0x000a, 0x16a0: 0x000a, 0x16a1: 0x000a, 0x16a2: 0x000a, 0x16a3: 0x000a, + 0x16a4: 0x000a, 0x16a5: 0x000a, 0x16a6: 0x000a, 0x16a7: 0x000a, 0x16a8: 0x000a, 0x16a9: 0x009a, + 0x16aa: 0x008a, 0x16ab: 0x000a, 0x16ac: 0x000a, 0x16ad: 0x000a, 0x16ae: 0x000a, 0x16af: 0x000a, + 0x16b0: 0x000a, 0x16b1: 0x000a, 0x16b2: 0x000a, 0x16b3: 0x000a, 0x16b4: 0x000a, 0x16b5: 0x000a, + // Block 0x5b, offset 0x16c0 + 0x16fb: 0x000a, + 0x16fc: 0x000a, 0x16fd: 0x000a, 0x16fe: 0x000a, 0x16ff: 0x000a, + // Block 0x5c, offset 0x1700 + 0x1700: 0x000a, 0x1701: 0x000a, 0x1702: 0x000a, 0x1703: 0x000a, 0x1704: 0x000a, 0x1705: 0x000a, + 0x1706: 0x000a, 0x1707: 0x000a, 0x1708: 0x000a, 0x1709: 0x000a, 0x170a: 0x000a, 0x170b: 0x000a, + 0x170c: 0x000a, 0x170d: 0x000a, 0x170e: 0x000a, 0x170f: 0x000a, 0x1710: 0x000a, 0x1711: 0x000a, + 0x1712: 0x000a, 0x1713: 0x000a, 0x1714: 0x000a, 0x1716: 0x000a, 0x1717: 0x000a, + 0x1718: 0x000a, 0x1719: 0x000a, 0x171a: 0x000a, 0x171b: 0x000a, 0x171c: 0x000a, 0x171d: 0x000a, + 0x171e: 0x000a, 0x171f: 0x000a, 0x1720: 0x000a, 0x1721: 0x000a, 0x1722: 0x000a, 0x1723: 0x000a, + 0x1724: 0x000a, 0x1725: 0x000a, 0x1726: 0x000a, 0x1727: 0x000a, 0x1728: 0x000a, 0x1729: 0x000a, + 0x172a: 0x000a, 0x172b: 0x000a, 0x172c: 0x000a, 0x172d: 0x000a, 0x172e: 0x000a, 0x172f: 0x000a, + 0x1730: 0x000a, 0x1731: 0x000a, 0x1732: 0x000a, 0x1733: 0x000a, 0x1734: 0x000a, 0x1735: 0x000a, + 0x1736: 0x000a, 0x1737: 0x000a, 0x1738: 0x000a, 0x1739: 0x000a, 0x173a: 0x000a, 0x173b: 0x000a, + 0x173c: 0x000a, 0x173d: 0x000a, 0x173e: 0x000a, 0x173f: 0x000a, + // Block 0x5d, offset 0x1740 + 0x1740: 0x000a, 0x1741: 0x000a, 0x1742: 0x000a, 0x1743: 0x000a, 0x1744: 0x000a, 0x1745: 0x000a, + 0x1746: 0x000a, 0x1747: 0x000a, 0x1748: 0x000a, 0x1749: 0x000a, 0x174a: 0x000a, 0x174b: 0x000a, + 0x174c: 0x000a, 0x174d: 0x000a, 0x174e: 0x000a, 0x174f: 0x000a, 0x1750: 0x000a, 0x1751: 0x000a, + 0x1752: 0x000a, 0x1753: 0x000a, 0x1754: 0x000a, 0x1755: 0x000a, 0x1756: 0x000a, 0x1757: 0x000a, + 0x1758: 0x000a, 0x1759: 0x000a, 0x175a: 0x000a, 0x175b: 0x000a, 0x175c: 0x000a, 0x175d: 0x000a, + 0x175e: 0x000a, 0x175f: 0x000a, 0x1760: 0x000a, 0x1761: 0x000a, 0x1762: 0x000a, 0x1763: 0x000a, + 0x1764: 0x000a, 0x1765: 0x000a, 0x1766: 0x000a, + // Block 0x5e, offset 0x1780 + 0x1780: 0x000a, 0x1781: 0x000a, 0x1782: 0x000a, 0x1783: 0x000a, 0x1784: 0x000a, 0x1785: 0x000a, + 0x1786: 0x000a, 0x1787: 0x000a, 0x1788: 0x000a, 0x1789: 0x000a, 0x178a: 0x000a, + 0x17a0: 0x000a, 0x17a1: 0x000a, 0x17a2: 0x000a, 0x17a3: 0x000a, + 0x17a4: 0x000a, 0x17a5: 0x000a, 0x17a6: 0x000a, 0x17a7: 0x000a, 0x17a8: 0x000a, 0x17a9: 0x000a, + 0x17aa: 0x000a, 0x17ab: 0x000a, 0x17ac: 0x000a, 0x17ad: 0x000a, 0x17ae: 0x000a, 0x17af: 0x000a, + 0x17b0: 0x000a, 0x17b1: 0x000a, 0x17b2: 0x000a, 0x17b3: 0x000a, 0x17b4: 0x000a, 0x17b5: 0x000a, + 0x17b6: 0x000a, 0x17b7: 0x000a, 0x17b8: 0x000a, 0x17b9: 0x000a, 0x17ba: 0x000a, 0x17bb: 0x000a, + 0x17bc: 0x000a, 0x17bd: 0x000a, 0x17be: 0x000a, 0x17bf: 0x000a, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x000a, 0x17c1: 0x000a, 0x17c2: 0x000a, 0x17c3: 0x000a, 0x17c4: 0x000a, 0x17c5: 0x000a, + 0x17c6: 0x000a, 0x17c7: 0x000a, 0x17c8: 0x0002, 0x17c9: 0x0002, 0x17ca: 0x0002, 0x17cb: 0x0002, + 0x17cc: 0x0002, 0x17cd: 0x0002, 0x17ce: 0x0002, 0x17cf: 0x0002, 0x17d0: 0x0002, 0x17d1: 0x0002, + 0x17d2: 0x0002, 0x17d3: 0x0002, 0x17d4: 0x0002, 0x17d5: 0x0002, 0x17d6: 0x0002, 0x17d7: 0x0002, + 0x17d8: 0x0002, 0x17d9: 0x0002, 0x17da: 0x0002, 0x17db: 0x0002, + // Block 0x60, offset 0x1800 + 0x182a: 0x000a, 0x182b: 0x000a, 0x182c: 0x000a, 0x182d: 0x000a, 0x182e: 0x000a, 0x182f: 0x000a, + 0x1830: 0x000a, 0x1831: 0x000a, 0x1832: 0x000a, 0x1833: 0x000a, 0x1834: 0x000a, 0x1835: 0x000a, + 0x1836: 0x000a, 0x1837: 0x000a, 0x1838: 0x000a, 0x1839: 0x000a, 0x183a: 0x000a, 0x183b: 0x000a, + 0x183c: 0x000a, 0x183d: 0x000a, 0x183e: 0x000a, 0x183f: 0x000a, + // Block 0x61, offset 0x1840 + 0x1840: 0x000a, 0x1841: 0x000a, 0x1842: 0x000a, 0x1843: 0x000a, 0x1844: 0x000a, 0x1845: 0x000a, + 0x1846: 0x000a, 0x1847: 0x000a, 0x1848: 0x000a, 0x1849: 0x000a, 0x184a: 0x000a, 0x184b: 0x000a, + 0x184c: 0x000a, 0x184d: 0x000a, 0x184e: 0x000a, 0x184f: 0x000a, 0x1850: 0x000a, 0x1851: 0x000a, + 0x1852: 0x000a, 0x1853: 0x000a, 0x1854: 0x000a, 0x1855: 0x000a, 0x1856: 0x000a, 0x1857: 0x000a, + 0x1858: 0x000a, 0x1859: 0x000a, 0x185a: 0x000a, 0x185b: 0x000a, 0x185c: 0x000a, 0x185d: 0x000a, + 0x185e: 0x000a, 0x185f: 0x000a, 0x1860: 0x000a, 0x1861: 0x000a, 0x1862: 0x000a, 0x1863: 0x000a, + 0x1864: 0x000a, 0x1865: 0x000a, 0x1866: 0x000a, 0x1867: 0x000a, 0x1868: 0x000a, 0x1869: 0x000a, + 0x186a: 0x000a, 0x186b: 0x000a, 0x186d: 0x000a, 0x186e: 0x000a, 0x186f: 0x000a, + 0x1870: 0x000a, 0x1871: 0x000a, 0x1872: 0x000a, 0x1873: 0x000a, 0x1874: 0x000a, 0x1875: 0x000a, + 0x1876: 0x000a, 0x1877: 0x000a, 0x1878: 0x000a, 0x1879: 0x000a, 0x187a: 0x000a, 0x187b: 0x000a, + 0x187c: 0x000a, 0x187d: 0x000a, 0x187e: 0x000a, 0x187f: 0x000a, + // Block 0x62, offset 0x1880 + 0x1880: 0x000a, 0x1881: 0x000a, 0x1882: 0x000a, 0x1883: 0x000a, 0x1884: 0x000a, 0x1885: 0x000a, + 0x1886: 0x000a, 0x1887: 0x000a, 0x1888: 0x000a, 0x1889: 0x000a, 0x188a: 0x000a, 0x188b: 0x000a, + 0x188c: 0x000a, 0x188d: 0x000a, 0x188e: 0x000a, 0x188f: 0x000a, 0x1890: 0x000a, 0x1891: 0x000a, + 0x1892: 0x000a, 0x1893: 0x000a, 0x1894: 0x000a, 0x1895: 0x000a, 0x1896: 0x000a, 0x1897: 0x000a, + 0x1898: 0x000a, 0x1899: 0x000a, 0x189a: 0x000a, 0x189b: 0x000a, 0x189c: 0x000a, 0x189d: 0x000a, + 0x189e: 0x000a, 0x189f: 0x000a, 0x18a0: 0x000a, 0x18a1: 0x000a, 0x18a2: 0x000a, 0x18a3: 0x000a, + 0x18a4: 0x000a, 0x18a5: 0x000a, 0x18a6: 0x000a, 0x18a7: 0x000a, 0x18a8: 0x003a, 0x18a9: 0x002a, + 0x18aa: 0x003a, 0x18ab: 0x002a, 0x18ac: 0x003a, 0x18ad: 0x002a, 0x18ae: 0x003a, 0x18af: 0x002a, + 0x18b0: 0x003a, 0x18b1: 0x002a, 0x18b2: 0x003a, 0x18b3: 0x002a, 0x18b4: 0x003a, 0x18b5: 0x002a, + 0x18b6: 0x000a, 0x18b7: 0x000a, 0x18b8: 0x000a, 0x18b9: 0x000a, 0x18ba: 0x000a, 0x18bb: 0x000a, + 0x18bc: 0x000a, 0x18bd: 0x000a, 0x18be: 0x000a, 0x18bf: 0x000a, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x000a, 0x18c1: 0x000a, 0x18c2: 0x000a, 0x18c3: 0x000a, 0x18c4: 0x000a, 0x18c5: 0x009a, + 0x18c6: 0x008a, 0x18c7: 0x000a, 0x18c8: 0x000a, 0x18c9: 0x000a, 0x18ca: 0x000a, 0x18cb: 0x000a, + 0x18cc: 0x000a, 0x18cd: 0x000a, 0x18ce: 0x000a, 0x18cf: 0x000a, 0x18d0: 0x000a, 0x18d1: 0x000a, + 0x18d2: 0x000a, 0x18d3: 0x000a, 0x18d4: 0x000a, 0x18d5: 0x000a, 0x18d6: 0x000a, 0x18d7: 0x000a, + 0x18d8: 0x000a, 0x18d9: 0x000a, 0x18da: 0x000a, 0x18db: 0x000a, 0x18dc: 0x000a, 0x18dd: 0x000a, + 0x18de: 0x000a, 0x18df: 0x000a, 0x18e0: 0x000a, 0x18e1: 0x000a, 0x18e2: 0x000a, 0x18e3: 0x000a, + 0x18e4: 0x000a, 0x18e5: 0x000a, 0x18e6: 0x003a, 0x18e7: 0x002a, 0x18e8: 0x003a, 0x18e9: 0x002a, + 0x18ea: 0x003a, 0x18eb: 0x002a, 0x18ec: 0x003a, 0x18ed: 0x002a, 0x18ee: 0x003a, 0x18ef: 0x002a, + 0x18f0: 0x000a, 0x18f1: 0x000a, 0x18f2: 0x000a, 0x18f3: 0x000a, 0x18f4: 0x000a, 0x18f5: 0x000a, + 0x18f6: 0x000a, 0x18f7: 0x000a, 0x18f8: 0x000a, 0x18f9: 0x000a, 0x18fa: 0x000a, 0x18fb: 0x000a, + 0x18fc: 0x000a, 0x18fd: 0x000a, 0x18fe: 0x000a, 0x18ff: 0x000a, + // Block 0x64, offset 0x1900 + 0x1900: 0x000a, 0x1901: 0x000a, 0x1902: 0x000a, 0x1903: 0x007a, 0x1904: 0x006a, 0x1905: 0x009a, + 0x1906: 0x008a, 0x1907: 0x00ba, 0x1908: 0x00aa, 0x1909: 0x009a, 0x190a: 0x008a, 0x190b: 0x007a, + 0x190c: 0x006a, 0x190d: 0x00da, 0x190e: 0x002a, 0x190f: 0x003a, 0x1910: 0x00ca, 0x1911: 0x009a, + 0x1912: 0x008a, 0x1913: 0x007a, 0x1914: 0x006a, 0x1915: 0x009a, 0x1916: 0x008a, 0x1917: 0x00ba, + 0x1918: 0x00aa, 0x1919: 0x000a, 0x191a: 0x000a, 0x191b: 0x000a, 0x191c: 0x000a, 0x191d: 0x000a, + 0x191e: 0x000a, 0x191f: 0x000a, 0x1920: 0x000a, 0x1921: 0x000a, 0x1922: 0x000a, 0x1923: 0x000a, + 0x1924: 0x000a, 0x1925: 0x000a, 0x1926: 0x000a, 0x1927: 0x000a, 0x1928: 0x000a, 0x1929: 0x000a, + 0x192a: 0x000a, 0x192b: 0x000a, 0x192c: 0x000a, 0x192d: 0x000a, 0x192e: 0x000a, 0x192f: 0x000a, + 0x1930: 0x000a, 0x1931: 0x000a, 0x1932: 0x000a, 0x1933: 0x000a, 0x1934: 0x000a, 0x1935: 0x000a, + 0x1936: 0x000a, 0x1937: 0x000a, 0x1938: 0x000a, 0x1939: 0x000a, 0x193a: 0x000a, 0x193b: 0x000a, + 0x193c: 0x000a, 0x193d: 0x000a, 0x193e: 0x000a, 0x193f: 0x000a, + // Block 0x65, offset 0x1940 + 0x1940: 0x000a, 0x1941: 0x000a, 0x1942: 0x000a, 0x1943: 0x000a, 0x1944: 0x000a, 0x1945: 0x000a, + 0x1946: 0x000a, 0x1947: 0x000a, 0x1948: 0x000a, 0x1949: 0x000a, 0x194a: 0x000a, 0x194b: 0x000a, + 0x194c: 0x000a, 0x194d: 0x000a, 0x194e: 0x000a, 0x194f: 0x000a, 0x1950: 0x000a, 0x1951: 0x000a, + 0x1952: 0x000a, 0x1953: 0x000a, 0x1954: 0x000a, 0x1955: 0x000a, 0x1956: 0x000a, 0x1957: 0x000a, + 0x1958: 0x003a, 0x1959: 0x002a, 0x195a: 0x003a, 0x195b: 0x002a, 0x195c: 0x000a, 0x195d: 0x000a, + 0x195e: 0x000a, 0x195f: 0x000a, 0x1960: 0x000a, 0x1961: 0x000a, 0x1962: 0x000a, 0x1963: 0x000a, + 0x1964: 0x000a, 0x1965: 0x000a, 0x1966: 0x000a, 0x1967: 0x000a, 0x1968: 0x000a, 0x1969: 0x000a, + 0x196a: 0x000a, 0x196b: 0x000a, 0x196c: 0x000a, 0x196d: 0x000a, 0x196e: 0x000a, 0x196f: 0x000a, + 0x1970: 0x000a, 0x1971: 0x000a, 0x1972: 0x000a, 0x1973: 0x000a, 0x1974: 0x000a, 0x1975: 0x000a, + 0x1976: 0x000a, 0x1977: 0x000a, 0x1978: 0x000a, 0x1979: 0x000a, 0x197a: 0x000a, 0x197b: 0x000a, + 0x197c: 0x003a, 0x197d: 0x002a, 0x197e: 0x000a, 0x197f: 0x000a, + // Block 0x66, offset 0x1980 + 0x1980: 0x000a, 0x1981: 0x000a, 0x1982: 0x000a, 0x1983: 0x000a, 0x1984: 0x000a, 0x1985: 0x000a, + 0x1986: 0x000a, 0x1987: 0x000a, 0x1988: 0x000a, 0x1989: 0x000a, 0x198a: 0x000a, 0x198b: 0x000a, + 0x198c: 0x000a, 0x198d: 0x000a, 0x198e: 0x000a, 0x198f: 0x000a, 0x1990: 0x000a, 0x1991: 0x000a, + 0x1992: 0x000a, 0x1993: 0x000a, 0x1994: 0x000a, 0x1995: 0x000a, 0x1996: 0x000a, 0x1997: 0x000a, + 0x1998: 0x000a, 0x1999: 0x000a, 0x199a: 0x000a, 0x199b: 0x000a, 0x199c: 0x000a, 0x199d: 0x000a, + 0x199e: 0x000a, 0x199f: 0x000a, 0x19a0: 0x000a, 0x19a1: 0x000a, 0x19a2: 0x000a, 0x19a3: 0x000a, + 0x19a4: 0x000a, 0x19a5: 0x000a, 0x19a6: 0x000a, 0x19a7: 0x000a, 0x19a8: 0x000a, 0x19a9: 0x000a, + 0x19aa: 0x000a, 0x19ab: 0x000a, 0x19ac: 0x000a, 0x19ad: 0x000a, 0x19ae: 0x000a, 0x19af: 0x000a, + 0x19b0: 0x000a, 0x19b1: 0x000a, 0x19b2: 0x000a, 0x19b3: 0x000a, + 0x19b6: 0x000a, 0x19b7: 0x000a, 0x19b8: 0x000a, 0x19b9: 0x000a, 0x19ba: 0x000a, 0x19bb: 0x000a, + 0x19bc: 0x000a, 0x19bd: 0x000a, 0x19be: 0x000a, 0x19bf: 0x000a, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x000a, 0x19c1: 0x000a, 0x19c2: 0x000a, 0x19c3: 0x000a, 0x19c4: 0x000a, 0x19c5: 0x000a, + 0x19c6: 0x000a, 0x19c7: 0x000a, 0x19c8: 0x000a, 0x19c9: 0x000a, 0x19ca: 0x000a, 0x19cb: 0x000a, + 0x19cc: 0x000a, 0x19cd: 0x000a, 0x19ce: 0x000a, 0x19cf: 0x000a, 0x19d0: 0x000a, 0x19d1: 0x000a, + 0x19d2: 0x000a, 0x19d3: 0x000a, 0x19d4: 0x000a, 0x19d5: 0x000a, 0x19d7: 0x000a, + 0x19d8: 0x000a, 0x19d9: 0x000a, 0x19da: 0x000a, 0x19db: 0x000a, 0x19dc: 0x000a, 0x19dd: 0x000a, + 0x19de: 0x000a, 0x19df: 0x000a, 0x19e0: 0x000a, 0x19e1: 0x000a, 0x19e2: 0x000a, 0x19e3: 0x000a, + 0x19e4: 0x000a, 0x19e5: 0x000a, 0x19e6: 0x000a, 0x19e7: 0x000a, 0x19e8: 0x000a, 0x19e9: 0x000a, + 0x19ea: 0x000a, 0x19eb: 0x000a, 0x19ec: 0x000a, 0x19ed: 0x000a, 0x19ee: 0x000a, 0x19ef: 0x000a, + 0x19f0: 0x000a, 0x19f1: 0x000a, 0x19f2: 0x000a, 0x19f3: 0x000a, 0x19f4: 0x000a, 0x19f5: 0x000a, + 0x19f6: 0x000a, 0x19f7: 0x000a, 0x19f8: 0x000a, 0x19f9: 0x000a, 0x19fa: 0x000a, 0x19fb: 0x000a, + 0x19fc: 0x000a, 0x19fd: 0x000a, 0x19fe: 0x000a, 0x19ff: 0x000a, + // Block 0x68, offset 0x1a00 + 0x1a25: 0x000a, 0x1a26: 0x000a, 0x1a27: 0x000a, 0x1a28: 0x000a, 0x1a29: 0x000a, + 0x1a2a: 0x000a, 0x1a2f: 0x000c, + 0x1a30: 0x000c, 0x1a31: 0x000c, + 0x1a39: 0x000a, 0x1a3a: 0x000a, 0x1a3b: 0x000a, + 0x1a3c: 0x000a, 0x1a3d: 0x000a, 0x1a3e: 0x000a, 0x1a3f: 0x000a, + // Block 0x69, offset 0x1a40 + 0x1a7f: 0x000c, + // Block 0x6a, offset 0x1a80 + 0x1aa0: 0x000c, 0x1aa1: 0x000c, 0x1aa2: 0x000c, 0x1aa3: 0x000c, + 0x1aa4: 0x000c, 0x1aa5: 0x000c, 0x1aa6: 0x000c, 0x1aa7: 0x000c, 0x1aa8: 0x000c, 0x1aa9: 0x000c, + 0x1aaa: 0x000c, 0x1aab: 0x000c, 0x1aac: 0x000c, 0x1aad: 0x000c, 0x1aae: 0x000c, 0x1aaf: 0x000c, + 0x1ab0: 0x000c, 0x1ab1: 0x000c, 0x1ab2: 0x000c, 0x1ab3: 0x000c, 0x1ab4: 0x000c, 0x1ab5: 0x000c, + 0x1ab6: 0x000c, 0x1ab7: 0x000c, 0x1ab8: 0x000c, 0x1ab9: 0x000c, 0x1aba: 0x000c, 0x1abb: 0x000c, + 0x1abc: 0x000c, 0x1abd: 0x000c, 0x1abe: 0x000c, 0x1abf: 0x000c, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x000a, 0x1ac1: 0x000a, 0x1ac2: 0x000a, 0x1ac3: 0x000a, 0x1ac4: 0x000a, 0x1ac5: 0x000a, + 0x1ac6: 0x000a, 0x1ac7: 0x000a, 0x1ac8: 0x000a, 0x1ac9: 0x000a, 0x1aca: 0x000a, 0x1acb: 0x000a, + 0x1acc: 0x000a, 0x1acd: 0x000a, 0x1ace: 0x000a, 0x1acf: 0x000a, 0x1ad0: 0x000a, 0x1ad1: 0x000a, + 0x1ad2: 0x000a, 0x1ad3: 0x000a, 0x1ad4: 0x000a, 0x1ad5: 0x000a, 0x1ad6: 0x000a, 0x1ad7: 0x000a, + 0x1ad8: 0x000a, 0x1ad9: 0x000a, 0x1ada: 0x000a, 0x1adb: 0x000a, 0x1adc: 0x000a, 0x1add: 0x000a, + 0x1ade: 0x000a, 0x1adf: 0x000a, 0x1ae0: 0x000a, 0x1ae1: 0x000a, 0x1ae2: 0x003a, 0x1ae3: 0x002a, + 0x1ae4: 0x003a, 0x1ae5: 0x002a, 0x1ae6: 0x003a, 0x1ae7: 0x002a, 0x1ae8: 0x003a, 0x1ae9: 0x002a, + 0x1aea: 0x000a, 0x1aeb: 0x000a, 0x1aec: 0x000a, 0x1aed: 0x000a, 0x1aee: 0x000a, 0x1aef: 0x000a, + 0x1af0: 0x000a, 0x1af1: 0x000a, 0x1af2: 0x000a, 0x1af3: 0x000a, 0x1af4: 0x000a, 0x1af5: 0x000a, + 0x1af6: 0x000a, 0x1af7: 0x000a, 0x1af8: 0x000a, 0x1af9: 0x000a, 0x1afa: 0x000a, 0x1afb: 0x000a, + 0x1afc: 0x000a, 0x1afd: 0x000a, 0x1afe: 0x000a, 0x1aff: 0x000a, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x000a, 0x1b01: 0x000a, 0x1b02: 0x000a, 0x1b03: 0x000a, 0x1b04: 0x000a, 0x1b05: 0x000a, + 0x1b06: 0x000a, 0x1b07: 0x000a, 0x1b08: 0x000a, 0x1b09: 0x000a, 0x1b0a: 0x000a, 0x1b0b: 0x000a, + 0x1b0c: 0x000a, 0x1b0d: 0x000a, 0x1b0e: 0x000a, 0x1b0f: 0x000a, 0x1b10: 0x000a, 0x1b11: 0x000a, + 0x1b12: 0x000a, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0x000a, 0x1b41: 0x000a, 0x1b42: 0x000a, 0x1b43: 0x000a, 0x1b44: 0x000a, 0x1b45: 0x000a, + 0x1b46: 0x000a, 0x1b47: 0x000a, 0x1b48: 0x000a, 0x1b49: 0x000a, 0x1b4a: 0x000a, 0x1b4b: 0x000a, + 0x1b4c: 0x000a, 0x1b4d: 0x000a, 0x1b4e: 0x000a, 0x1b4f: 0x000a, 0x1b50: 0x000a, 0x1b51: 0x000a, + 0x1b52: 0x000a, 0x1b53: 0x000a, 0x1b54: 0x000a, 0x1b55: 0x000a, 0x1b56: 0x000a, 0x1b57: 0x000a, + 0x1b58: 0x000a, 0x1b59: 0x000a, 0x1b5b: 0x000a, 0x1b5c: 0x000a, 0x1b5d: 0x000a, + 0x1b5e: 0x000a, 0x1b5f: 0x000a, 0x1b60: 0x000a, 0x1b61: 0x000a, 0x1b62: 0x000a, 0x1b63: 0x000a, + 0x1b64: 0x000a, 0x1b65: 0x000a, 0x1b66: 0x000a, 0x1b67: 0x000a, 0x1b68: 0x000a, 0x1b69: 0x000a, + 0x1b6a: 0x000a, 0x1b6b: 0x000a, 0x1b6c: 0x000a, 0x1b6d: 0x000a, 0x1b6e: 0x000a, 0x1b6f: 0x000a, + 0x1b70: 0x000a, 0x1b71: 0x000a, 0x1b72: 0x000a, 0x1b73: 0x000a, 0x1b74: 0x000a, 0x1b75: 0x000a, + 0x1b76: 0x000a, 0x1b77: 0x000a, 0x1b78: 0x000a, 0x1b79: 0x000a, 0x1b7a: 0x000a, 0x1b7b: 0x000a, + 0x1b7c: 0x000a, 0x1b7d: 0x000a, 0x1b7e: 0x000a, 0x1b7f: 0x000a, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x000a, 0x1b81: 0x000a, 0x1b82: 0x000a, 0x1b83: 0x000a, 0x1b84: 0x000a, 0x1b85: 0x000a, + 0x1b86: 0x000a, 0x1b87: 0x000a, 0x1b88: 0x000a, 0x1b89: 0x000a, 0x1b8a: 0x000a, 0x1b8b: 0x000a, + 0x1b8c: 0x000a, 0x1b8d: 0x000a, 0x1b8e: 0x000a, 0x1b8f: 0x000a, 0x1b90: 0x000a, 0x1b91: 0x000a, + 0x1b92: 0x000a, 0x1b93: 0x000a, 0x1b94: 0x000a, 0x1b95: 0x000a, 0x1b96: 0x000a, 0x1b97: 0x000a, + 0x1b98: 0x000a, 0x1b99: 0x000a, 0x1b9a: 0x000a, 0x1b9b: 0x000a, 0x1b9c: 0x000a, 0x1b9d: 0x000a, + 0x1b9e: 0x000a, 0x1b9f: 0x000a, 0x1ba0: 0x000a, 0x1ba1: 0x000a, 0x1ba2: 0x000a, 0x1ba3: 0x000a, + 0x1ba4: 0x000a, 0x1ba5: 0x000a, 0x1ba6: 0x000a, 0x1ba7: 0x000a, 0x1ba8: 0x000a, 0x1ba9: 0x000a, + 0x1baa: 0x000a, 0x1bab: 0x000a, 0x1bac: 0x000a, 0x1bad: 0x000a, 0x1bae: 0x000a, 0x1baf: 0x000a, + 0x1bb0: 0x000a, 0x1bb1: 0x000a, 0x1bb2: 0x000a, 0x1bb3: 0x000a, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x000a, 0x1bc1: 0x000a, 0x1bc2: 0x000a, 0x1bc3: 0x000a, 0x1bc4: 0x000a, 0x1bc5: 0x000a, + 0x1bc6: 0x000a, 0x1bc7: 0x000a, 0x1bc8: 0x000a, 0x1bc9: 0x000a, 0x1bca: 0x000a, 0x1bcb: 0x000a, + 0x1bcc: 0x000a, 0x1bcd: 0x000a, 0x1bce: 0x000a, 0x1bcf: 0x000a, 0x1bd0: 0x000a, 0x1bd1: 0x000a, + 0x1bd2: 0x000a, 0x1bd3: 0x000a, 0x1bd4: 0x000a, 0x1bd5: 0x000a, + 0x1bf0: 0x000a, 0x1bf1: 0x000a, 0x1bf2: 0x000a, 0x1bf3: 0x000a, 0x1bf4: 0x000a, 0x1bf5: 0x000a, + 0x1bf6: 0x000a, 0x1bf7: 0x000a, 0x1bf8: 0x000a, 0x1bf9: 0x000a, 0x1bfa: 0x000a, 0x1bfb: 0x000a, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x0009, 0x1c01: 0x000a, 0x1c02: 0x000a, 0x1c03: 0x000a, 0x1c04: 0x000a, + 0x1c08: 0x003a, 0x1c09: 0x002a, 0x1c0a: 0x003a, 0x1c0b: 0x002a, + 0x1c0c: 0x003a, 0x1c0d: 0x002a, 0x1c0e: 0x003a, 0x1c0f: 0x002a, 0x1c10: 0x003a, 0x1c11: 0x002a, + 0x1c12: 0x000a, 0x1c13: 0x000a, 0x1c14: 0x003a, 0x1c15: 0x002a, 0x1c16: 0x003a, 0x1c17: 0x002a, + 0x1c18: 0x003a, 0x1c19: 0x002a, 0x1c1a: 0x003a, 0x1c1b: 0x002a, 0x1c1c: 0x000a, 0x1c1d: 0x000a, + 0x1c1e: 0x000a, 0x1c1f: 0x000a, 0x1c20: 0x000a, + 0x1c2a: 0x000c, 0x1c2b: 0x000c, 0x1c2c: 0x000c, 0x1c2d: 0x000c, + 0x1c30: 0x000a, + 0x1c36: 0x000a, 0x1c37: 0x000a, + 0x1c3d: 0x000a, 0x1c3e: 0x000a, 0x1c3f: 0x000a, + // Block 0x71, offset 0x1c40 + 0x1c59: 0x000c, 0x1c5a: 0x000c, 0x1c5b: 0x000a, 0x1c5c: 0x000a, + 0x1c60: 0x000a, + // Block 0x72, offset 0x1c80 + 0x1cbb: 0x000a, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x000a, 0x1cc1: 0x000a, 0x1cc2: 0x000a, 0x1cc3: 0x000a, 0x1cc4: 0x000a, 0x1cc5: 0x000a, + 0x1cc6: 0x000a, 0x1cc7: 0x000a, 0x1cc8: 0x000a, 0x1cc9: 0x000a, 0x1cca: 0x000a, 0x1ccb: 0x000a, + 0x1ccc: 0x000a, 0x1ccd: 0x000a, 0x1cce: 0x000a, 0x1ccf: 0x000a, 0x1cd0: 0x000a, 0x1cd1: 0x000a, + 0x1cd2: 0x000a, 0x1cd3: 0x000a, 0x1cd4: 0x000a, 0x1cd5: 0x000a, 0x1cd6: 0x000a, 0x1cd7: 0x000a, + 0x1cd8: 0x000a, 0x1cd9: 0x000a, 0x1cda: 0x000a, 0x1cdb: 0x000a, 0x1cdc: 0x000a, 0x1cdd: 0x000a, + 0x1cde: 0x000a, 0x1cdf: 0x000a, 0x1ce0: 0x000a, 0x1ce1: 0x000a, 0x1ce2: 0x000a, 0x1ce3: 0x000a, + // Block 0x74, offset 0x1d00 + 0x1d1d: 0x000a, + 0x1d1e: 0x000a, + // Block 0x75, offset 0x1d40 + 0x1d50: 0x000a, 0x1d51: 0x000a, + 0x1d52: 0x000a, 0x1d53: 0x000a, 0x1d54: 0x000a, 0x1d55: 0x000a, 0x1d56: 0x000a, 0x1d57: 0x000a, + 0x1d58: 0x000a, 0x1d59: 0x000a, 0x1d5a: 0x000a, 0x1d5b: 0x000a, 0x1d5c: 0x000a, 0x1d5d: 0x000a, + 0x1d5e: 0x000a, 0x1d5f: 0x000a, + 0x1d7c: 0x000a, 0x1d7d: 0x000a, 0x1d7e: 0x000a, + // Block 0x76, offset 0x1d80 + 0x1db1: 0x000a, 0x1db2: 0x000a, 0x1db3: 0x000a, 0x1db4: 0x000a, 0x1db5: 0x000a, + 0x1db6: 0x000a, 0x1db7: 0x000a, 0x1db8: 0x000a, 0x1db9: 0x000a, 0x1dba: 0x000a, 0x1dbb: 0x000a, + 0x1dbc: 0x000a, 0x1dbd: 0x000a, 0x1dbe: 0x000a, 0x1dbf: 0x000a, + // Block 0x77, offset 0x1dc0 + 0x1dcc: 0x000a, 0x1dcd: 0x000a, 0x1dce: 0x000a, 0x1dcf: 0x000a, + // Block 0x78, offset 0x1e00 + 0x1e37: 0x000a, 0x1e38: 0x000a, 0x1e39: 0x000a, 0x1e3a: 0x000a, + // Block 0x79, offset 0x1e40 + 0x1e5e: 0x000a, 0x1e5f: 0x000a, + 0x1e7f: 0x000a, + // Block 0x7a, offset 0x1e80 + 0x1e90: 0x000a, 0x1e91: 0x000a, + 0x1e92: 0x000a, 0x1e93: 0x000a, 0x1e94: 0x000a, 0x1e95: 0x000a, 0x1e96: 0x000a, 0x1e97: 0x000a, + 0x1e98: 0x000a, 0x1e99: 0x000a, 0x1e9a: 0x000a, 0x1e9b: 0x000a, 0x1e9c: 0x000a, 0x1e9d: 0x000a, + 0x1e9e: 0x000a, 0x1e9f: 0x000a, 0x1ea0: 0x000a, 0x1ea1: 0x000a, 0x1ea2: 0x000a, 0x1ea3: 0x000a, + 0x1ea4: 0x000a, 0x1ea5: 0x000a, 0x1ea6: 0x000a, 0x1ea7: 0x000a, 0x1ea8: 0x000a, 0x1ea9: 0x000a, + 0x1eaa: 0x000a, 0x1eab: 0x000a, 0x1eac: 0x000a, 0x1ead: 0x000a, 0x1eae: 0x000a, 0x1eaf: 0x000a, + 0x1eb0: 0x000a, 0x1eb1: 0x000a, 0x1eb2: 0x000a, 0x1eb3: 0x000a, 0x1eb4: 0x000a, 0x1eb5: 0x000a, + 0x1eb6: 0x000a, 0x1eb7: 0x000a, 0x1eb8: 0x000a, 0x1eb9: 0x000a, 0x1eba: 0x000a, 0x1ebb: 0x000a, + 0x1ebc: 0x000a, 0x1ebd: 0x000a, 0x1ebe: 0x000a, 0x1ebf: 0x000a, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0x000a, 0x1ec1: 0x000a, 0x1ec2: 0x000a, 0x1ec3: 0x000a, 0x1ec4: 0x000a, 0x1ec5: 0x000a, + 0x1ec6: 0x000a, + // Block 0x7c, offset 0x1f00 + 0x1f0d: 0x000a, 0x1f0e: 0x000a, 0x1f0f: 0x000a, + // Block 0x7d, offset 0x1f40 + 0x1f6f: 0x000c, + 0x1f70: 0x000c, 0x1f71: 0x000c, 0x1f72: 0x000c, 0x1f73: 0x000a, 0x1f74: 0x000c, 0x1f75: 0x000c, + 0x1f76: 0x000c, 0x1f77: 0x000c, 0x1f78: 0x000c, 0x1f79: 0x000c, 0x1f7a: 0x000c, 0x1f7b: 0x000c, + 0x1f7c: 0x000c, 0x1f7d: 0x000c, 0x1f7e: 0x000a, 0x1f7f: 0x000a, + // Block 0x7e, offset 0x1f80 + 0x1f9e: 0x000c, 0x1f9f: 0x000c, + // Block 0x7f, offset 0x1fc0 + 0x1ff0: 0x000c, 0x1ff1: 0x000c, + // Block 0x80, offset 0x2000 + 0x2000: 0x000a, 0x2001: 0x000a, 0x2002: 0x000a, 0x2003: 0x000a, 0x2004: 0x000a, 0x2005: 0x000a, + 0x2006: 0x000a, 0x2007: 0x000a, 0x2008: 0x000a, 0x2009: 0x000a, 0x200a: 0x000a, 0x200b: 0x000a, + 0x200c: 0x000a, 0x200d: 0x000a, 0x200e: 0x000a, 0x200f: 0x000a, 0x2010: 0x000a, 0x2011: 0x000a, + 0x2012: 0x000a, 0x2013: 0x000a, 0x2014: 0x000a, 0x2015: 0x000a, 0x2016: 0x000a, 0x2017: 0x000a, + 0x2018: 0x000a, 0x2019: 0x000a, 0x201a: 0x000a, 0x201b: 0x000a, 0x201c: 0x000a, 0x201d: 0x000a, + 0x201e: 0x000a, 0x201f: 0x000a, 0x2020: 0x000a, 0x2021: 0x000a, + // Block 0x81, offset 0x2040 + 0x2048: 0x000a, + // Block 0x82, offset 0x2080 + 0x2082: 0x000c, + 0x2086: 0x000c, 0x208b: 0x000c, + 0x20a5: 0x000c, 0x20a6: 0x000c, 0x20a8: 0x000a, 0x20a9: 0x000a, + 0x20aa: 0x000a, 0x20ab: 0x000a, 0x20ac: 0x000c, + 0x20b8: 0x0004, 0x20b9: 0x0004, + // Block 0x83, offset 0x20c0 + 0x20f4: 0x000a, 0x20f5: 0x000a, + 0x20f6: 0x000a, 0x20f7: 0x000a, + // Block 0x84, offset 0x2100 + 0x2104: 0x000c, 0x2105: 0x000c, + 0x2120: 0x000c, 0x2121: 0x000c, 0x2122: 0x000c, 0x2123: 0x000c, + 0x2124: 0x000c, 0x2125: 0x000c, 0x2126: 0x000c, 0x2127: 0x000c, 0x2128: 0x000c, 0x2129: 0x000c, + 0x212a: 0x000c, 0x212b: 0x000c, 0x212c: 0x000c, 0x212d: 0x000c, 0x212e: 0x000c, 0x212f: 0x000c, + 0x2130: 0x000c, 0x2131: 0x000c, + 0x213f: 0x000c, + // Block 0x85, offset 0x2140 + 0x2166: 0x000c, 0x2167: 0x000c, 0x2168: 0x000c, 0x2169: 0x000c, + 0x216a: 0x000c, 0x216b: 0x000c, 0x216c: 0x000c, 0x216d: 0x000c, + // Block 0x86, offset 0x2180 + 0x2187: 0x000c, 0x2188: 0x000c, 0x2189: 0x000c, 0x218a: 0x000c, 0x218b: 0x000c, + 0x218c: 0x000c, 0x218d: 0x000c, 0x218e: 0x000c, 0x218f: 0x000c, 0x2190: 0x000c, 0x2191: 0x000c, + // Block 0x87, offset 0x21c0 + 0x21c0: 0x000c, 0x21c1: 0x000c, 0x21c2: 0x000c, + 0x21f3: 0x000c, + 0x21f6: 0x000c, 0x21f7: 0x000c, 0x21f8: 0x000c, 0x21f9: 0x000c, + 0x21fc: 0x000c, 0x21fd: 0x000c, + // Block 0x88, offset 0x2200 + 0x2225: 0x000c, + // Block 0x89, offset 0x2240 + 0x2269: 0x000c, + 0x226a: 0x000c, 0x226b: 0x000c, 0x226c: 0x000c, 0x226d: 0x000c, 0x226e: 0x000c, + 0x2271: 0x000c, 0x2272: 0x000c, 0x2275: 0x000c, + 0x2276: 0x000c, + // Block 0x8a, offset 0x2280 + 0x2283: 0x000c, + 0x228c: 0x000c, + 0x22bc: 0x000c, + // Block 0x8b, offset 0x22c0 + 0x22f0: 0x000c, 0x22f2: 0x000c, 0x22f3: 0x000c, 0x22f4: 0x000c, + 0x22f7: 0x000c, 0x22f8: 0x000c, + 0x22fe: 0x000c, 0x22ff: 0x000c, + // Block 0x8c, offset 0x2300 + 0x2301: 0x000c, + 0x232c: 0x000c, 0x232d: 0x000c, + 0x2336: 0x000c, + // Block 0x8d, offset 0x2340 + 0x236a: 0x000a, 0x236b: 0x000a, + // Block 0x8e, offset 0x2380 + 0x23a5: 0x000c, 0x23a8: 0x000c, + 0x23ad: 0x000c, + // Block 0x8f, offset 0x23c0 + 0x23dd: 0x0001, + 0x23de: 0x000c, 0x23df: 0x0001, 0x23e0: 0x0001, 0x23e1: 0x0001, 0x23e2: 0x0001, 0x23e3: 0x0001, + 0x23e4: 0x0001, 0x23e5: 0x0001, 0x23e6: 0x0001, 0x23e7: 0x0001, 0x23e8: 0x0001, 0x23e9: 0x0003, + 0x23ea: 0x0001, 0x23eb: 0x0001, 0x23ec: 0x0001, 0x23ed: 0x0001, 0x23ee: 0x0001, 0x23ef: 0x0001, + 0x23f0: 0x0001, 0x23f1: 0x0001, 0x23f2: 0x0001, 0x23f3: 0x0001, 0x23f4: 0x0001, 0x23f5: 0x0001, + 0x23f6: 0x0001, 0x23f7: 0x0001, 0x23f8: 0x0001, 0x23f9: 0x0001, 0x23fa: 0x0001, 0x23fb: 0x0001, + 0x23fc: 0x0001, 0x23fd: 0x0001, 0x23fe: 0x0001, 0x23ff: 0x0001, + // Block 0x90, offset 0x2400 + 0x2400: 0x0001, 0x2401: 0x0001, 0x2402: 0x0001, 0x2403: 0x0001, 0x2404: 0x0001, 0x2405: 0x0001, + 0x2406: 0x0001, 0x2407: 0x0001, 0x2408: 0x0001, 0x2409: 0x0001, 0x240a: 0x0001, 0x240b: 0x0001, + 0x240c: 0x0001, 0x240d: 0x0001, 0x240e: 0x0001, 0x240f: 0x0001, 0x2410: 0x000d, 0x2411: 0x000d, + 0x2412: 0x000d, 0x2413: 0x000d, 0x2414: 0x000d, 0x2415: 0x000d, 0x2416: 0x000d, 0x2417: 0x000d, + 0x2418: 0x000d, 0x2419: 0x000d, 0x241a: 0x000d, 0x241b: 0x000d, 0x241c: 0x000d, 0x241d: 0x000d, + 0x241e: 0x000d, 0x241f: 0x000d, 0x2420: 0x000d, 0x2421: 0x000d, 0x2422: 0x000d, 0x2423: 0x000d, + 0x2424: 0x000d, 0x2425: 0x000d, 0x2426: 0x000d, 0x2427: 0x000d, 0x2428: 0x000d, 0x2429: 0x000d, + 0x242a: 0x000d, 0x242b: 0x000d, 0x242c: 0x000d, 0x242d: 0x000d, 0x242e: 0x000d, 0x242f: 0x000d, + 0x2430: 0x000d, 0x2431: 0x000d, 0x2432: 0x000d, 0x2433: 0x000d, 0x2434: 0x000d, 0x2435: 0x000d, + 0x2436: 0x000d, 0x2437: 0x000d, 0x2438: 0x000d, 0x2439: 0x000d, 0x243a: 0x000d, 0x243b: 0x000d, + 0x243c: 0x000d, 0x243d: 0x000d, 0x243e: 0x000d, 0x243f: 0x000d, + // Block 0x91, offset 0x2440 + 0x2440: 0x000d, 0x2441: 0x000d, 0x2442: 0x000d, 0x2443: 0x000d, 0x2444: 0x000d, 0x2445: 0x000d, + 0x2446: 0x000d, 0x2447: 0x000d, 0x2448: 0x000d, 0x2449: 0x000d, 0x244a: 0x000d, 0x244b: 0x000d, + 0x244c: 0x000d, 0x244d: 0x000d, 0x244e: 0x000d, 0x244f: 0x000d, 0x2450: 0x000d, 0x2451: 0x000d, + 0x2452: 0x000d, 0x2453: 0x000d, 0x2454: 0x000d, 0x2455: 0x000d, 0x2456: 0x000d, 0x2457: 0x000d, + 0x2458: 0x000d, 0x2459: 0x000d, 0x245a: 0x000d, 0x245b: 0x000d, 0x245c: 0x000d, 0x245d: 0x000d, + 0x245e: 0x000d, 0x245f: 0x000d, 0x2460: 0x000d, 0x2461: 0x000d, 0x2462: 0x000d, 0x2463: 0x000d, + 0x2464: 0x000d, 0x2465: 0x000d, 0x2466: 0x000d, 0x2467: 0x000d, 0x2468: 0x000d, 0x2469: 0x000d, + 0x246a: 0x000d, 0x246b: 0x000d, 0x246c: 0x000d, 0x246d: 0x000d, 0x246e: 0x000d, 0x246f: 0x000d, + 0x2470: 0x000d, 0x2471: 0x000d, 0x2472: 0x000d, 0x2473: 0x000d, 0x2474: 0x000d, 0x2475: 0x000d, + 0x2476: 0x000d, 0x2477: 0x000d, 0x2478: 0x000d, 0x2479: 0x000d, 0x247a: 0x000d, 0x247b: 0x000d, + 0x247c: 0x000d, 0x247d: 0x000d, 0x247e: 0x000a, 0x247f: 0x000a, + // Block 0x92, offset 0x2480 + 0x2480: 0x000d, 0x2481: 0x000d, 0x2482: 0x000d, 0x2483: 0x000d, 0x2484: 0x000d, 0x2485: 0x000d, + 0x2486: 0x000d, 0x2487: 0x000d, 0x2488: 0x000d, 0x2489: 0x000d, 0x248a: 0x000d, 0x248b: 0x000d, + 0x248c: 0x000d, 0x248d: 0x000d, 0x248e: 0x000d, 0x248f: 0x000d, 0x2490: 0x000b, 0x2491: 0x000b, + 0x2492: 0x000b, 0x2493: 0x000b, 0x2494: 0x000b, 0x2495: 0x000b, 0x2496: 0x000b, 0x2497: 0x000b, + 0x2498: 0x000b, 0x2499: 0x000b, 0x249a: 0x000b, 0x249b: 0x000b, 0x249c: 0x000b, 0x249d: 0x000b, + 0x249e: 0x000b, 0x249f: 0x000b, 0x24a0: 0x000b, 0x24a1: 0x000b, 0x24a2: 0x000b, 0x24a3: 0x000b, + 0x24a4: 0x000b, 0x24a5: 0x000b, 0x24a6: 0x000b, 0x24a7: 0x000b, 0x24a8: 0x000b, 0x24a9: 0x000b, + 0x24aa: 0x000b, 0x24ab: 0x000b, 0x24ac: 0x000b, 0x24ad: 0x000b, 0x24ae: 0x000b, 0x24af: 0x000b, + 0x24b0: 0x000d, 0x24b1: 0x000d, 0x24b2: 0x000d, 0x24b3: 0x000d, 0x24b4: 0x000d, 0x24b5: 0x000d, + 0x24b6: 0x000d, 0x24b7: 0x000d, 0x24b8: 0x000d, 0x24b9: 0x000d, 0x24ba: 0x000d, 0x24bb: 0x000d, + 0x24bc: 0x000d, 0x24bd: 0x000a, 0x24be: 0x000d, 0x24bf: 0x000d, + // Block 0x93, offset 0x24c0 + 0x24c0: 0x000c, 0x24c1: 0x000c, 0x24c2: 0x000c, 0x24c3: 0x000c, 0x24c4: 0x000c, 0x24c5: 0x000c, + 0x24c6: 0x000c, 0x24c7: 0x000c, 0x24c8: 0x000c, 0x24c9: 0x000c, 0x24ca: 0x000c, 0x24cb: 0x000c, + 0x24cc: 0x000c, 0x24cd: 0x000c, 0x24ce: 0x000c, 0x24cf: 0x000c, 0x24d0: 0x000a, 0x24d1: 0x000a, + 0x24d2: 0x000a, 0x24d3: 0x000a, 0x24d4: 0x000a, 0x24d5: 0x000a, 0x24d6: 0x000a, 0x24d7: 0x000a, + 0x24d8: 0x000a, 0x24d9: 0x000a, + 0x24e0: 0x000c, 0x24e1: 0x000c, 0x24e2: 0x000c, 0x24e3: 0x000c, + 0x24e4: 0x000c, 0x24e5: 0x000c, 0x24e6: 0x000c, 0x24e7: 0x000c, 0x24e8: 0x000c, 0x24e9: 0x000c, + 0x24ea: 0x000c, 0x24eb: 0x000c, 0x24ec: 0x000c, 0x24ed: 0x000c, 0x24ee: 0x000c, 0x24ef: 0x000c, + 0x24f0: 0x000a, 0x24f1: 0x000a, 0x24f2: 0x000a, 0x24f3: 0x000a, 0x24f4: 0x000a, 0x24f5: 0x000a, + 0x24f6: 0x000a, 0x24f7: 0x000a, 0x24f8: 0x000a, 0x24f9: 0x000a, 0x24fa: 0x000a, 0x24fb: 0x000a, + 0x24fc: 0x000a, 0x24fd: 0x000a, 0x24fe: 0x000a, 0x24ff: 0x000a, + // Block 0x94, offset 0x2500 + 0x2500: 0x000a, 0x2501: 0x000a, 0x2502: 0x000a, 0x2503: 0x000a, 0x2504: 0x000a, 0x2505: 0x000a, + 0x2506: 0x000a, 0x2507: 0x000a, 0x2508: 0x000a, 0x2509: 0x000a, 0x250a: 0x000a, 0x250b: 0x000a, + 0x250c: 0x000a, 0x250d: 0x000a, 0x250e: 0x000a, 0x250f: 0x000a, 0x2510: 0x0006, 0x2511: 0x000a, + 0x2512: 0x0006, 0x2514: 0x000a, 0x2515: 0x0006, 0x2516: 0x000a, 0x2517: 0x000a, + 0x2518: 0x000a, 0x2519: 0x009a, 0x251a: 0x008a, 0x251b: 0x007a, 0x251c: 0x006a, 0x251d: 0x009a, + 0x251e: 0x008a, 0x251f: 0x0004, 0x2520: 0x000a, 0x2521: 0x000a, 0x2522: 0x0003, 0x2523: 0x0003, + 0x2524: 0x000a, 0x2525: 0x000a, 0x2526: 0x000a, 0x2528: 0x000a, 0x2529: 0x0004, + 0x252a: 0x0004, 0x252b: 0x000a, + 0x2530: 0x000d, 0x2531: 0x000d, 0x2532: 0x000d, 0x2533: 0x000d, 0x2534: 0x000d, 0x2535: 0x000d, + 0x2536: 0x000d, 0x2537: 0x000d, 0x2538: 0x000d, 0x2539: 0x000d, 0x253a: 0x000d, 0x253b: 0x000d, + 0x253c: 0x000d, 0x253d: 0x000d, 0x253e: 0x000d, 0x253f: 0x000d, + // Block 0x95, offset 0x2540 + 0x2540: 0x000d, 0x2541: 0x000d, 0x2542: 0x000d, 0x2543: 0x000d, 0x2544: 0x000d, 0x2545: 0x000d, + 0x2546: 0x000d, 0x2547: 0x000d, 0x2548: 0x000d, 0x2549: 0x000d, 0x254a: 0x000d, 0x254b: 0x000d, + 0x254c: 0x000d, 0x254d: 0x000d, 0x254e: 0x000d, 0x254f: 0x000d, 0x2550: 0x000d, 0x2551: 0x000d, + 0x2552: 0x000d, 0x2553: 0x000d, 0x2554: 0x000d, 0x2555: 0x000d, 0x2556: 0x000d, 0x2557: 0x000d, + 0x2558: 0x000d, 0x2559: 0x000d, 0x255a: 0x000d, 0x255b: 0x000d, 0x255c: 0x000d, 0x255d: 0x000d, + 0x255e: 0x000d, 0x255f: 0x000d, 0x2560: 0x000d, 0x2561: 0x000d, 0x2562: 0x000d, 0x2563: 0x000d, + 0x2564: 0x000d, 0x2565: 0x000d, 0x2566: 0x000d, 0x2567: 0x000d, 0x2568: 0x000d, 0x2569: 0x000d, + 0x256a: 0x000d, 0x256b: 0x000d, 0x256c: 0x000d, 0x256d: 0x000d, 0x256e: 0x000d, 0x256f: 0x000d, + 0x2570: 0x000d, 0x2571: 0x000d, 0x2572: 0x000d, 0x2573: 0x000d, 0x2574: 0x000d, 0x2575: 0x000d, + 0x2576: 0x000d, 0x2577: 0x000d, 0x2578: 0x000d, 0x2579: 0x000d, 0x257a: 0x000d, 0x257b: 0x000d, + 0x257c: 0x000d, 0x257d: 0x000d, 0x257e: 0x000d, 0x257f: 0x000b, + // Block 0x96, offset 0x2580 + 0x2581: 0x000a, 0x2582: 0x000a, 0x2583: 0x0004, 0x2584: 0x0004, 0x2585: 0x0004, + 0x2586: 0x000a, 0x2587: 0x000a, 0x2588: 0x003a, 0x2589: 0x002a, 0x258a: 0x000a, 0x258b: 0x0003, + 0x258c: 0x0006, 0x258d: 0x0003, 0x258e: 0x0006, 0x258f: 0x0006, 0x2590: 0x0002, 0x2591: 0x0002, + 0x2592: 0x0002, 0x2593: 0x0002, 0x2594: 0x0002, 0x2595: 0x0002, 0x2596: 0x0002, 0x2597: 0x0002, + 0x2598: 0x0002, 0x2599: 0x0002, 0x259a: 0x0006, 0x259b: 0x000a, 0x259c: 0x000a, 0x259d: 0x000a, + 0x259e: 0x000a, 0x259f: 0x000a, 0x25a0: 0x000a, + 0x25bb: 0x005a, + 0x25bc: 0x000a, 0x25bd: 0x004a, 0x25be: 0x000a, 0x25bf: 0x000a, + // Block 0x97, offset 0x25c0 + 0x25c0: 0x000a, + 0x25db: 0x005a, 0x25dc: 0x000a, 0x25dd: 0x004a, + 0x25de: 0x000a, 0x25df: 0x00fa, 0x25e0: 0x00ea, 0x25e1: 0x000a, 0x25e2: 0x003a, 0x25e3: 0x002a, + 0x25e4: 0x000a, 0x25e5: 0x000a, + // Block 0x98, offset 0x2600 + 0x2620: 0x0004, 0x2621: 0x0004, 0x2622: 0x000a, 0x2623: 0x000a, + 0x2624: 0x000a, 0x2625: 0x0004, 0x2626: 0x0004, 0x2628: 0x000a, 0x2629: 0x000a, + 0x262a: 0x000a, 0x262b: 0x000a, 0x262c: 0x000a, 0x262d: 0x000a, 0x262e: 0x000a, + 0x2630: 0x000b, 0x2631: 0x000b, 0x2632: 0x000b, 0x2633: 0x000b, 0x2634: 0x000b, 0x2635: 0x000b, + 0x2636: 0x000b, 0x2637: 0x000b, 0x2638: 0x000b, 0x2639: 0x000a, 0x263a: 0x000a, 0x263b: 0x000a, + 0x263c: 0x000a, 0x263d: 0x000a, 0x263e: 0x000b, 0x263f: 0x000b, + // Block 0x99, offset 0x2640 + 0x2641: 0x000a, + // Block 0x9a, offset 0x2680 + 0x2680: 0x000a, 0x2681: 0x000a, 0x2682: 0x000a, 0x2683: 0x000a, 0x2684: 0x000a, 0x2685: 0x000a, + 0x2686: 0x000a, 0x2687: 0x000a, 0x2688: 0x000a, 0x2689: 0x000a, 0x268a: 0x000a, 0x268b: 0x000a, + 0x268c: 0x000a, 0x2690: 0x000a, 0x2691: 0x000a, + 0x2692: 0x000a, 0x2693: 0x000a, 0x2694: 0x000a, 0x2695: 0x000a, 0x2696: 0x000a, 0x2697: 0x000a, + 0x2698: 0x000a, 0x2699: 0x000a, 0x269a: 0x000a, 0x269b: 0x000a, 0x269c: 0x000a, + 0x26a0: 0x000a, + // Block 0x9b, offset 0x26c0 + 0x26fd: 0x000c, + // Block 0x9c, offset 0x2700 + 0x2720: 0x000c, 0x2721: 0x0002, 0x2722: 0x0002, 0x2723: 0x0002, + 0x2724: 0x0002, 0x2725: 0x0002, 0x2726: 0x0002, 0x2727: 0x0002, 0x2728: 0x0002, 0x2729: 0x0002, + 0x272a: 0x0002, 0x272b: 0x0002, 0x272c: 0x0002, 0x272d: 0x0002, 0x272e: 0x0002, 0x272f: 0x0002, + 0x2730: 0x0002, 0x2731: 0x0002, 0x2732: 0x0002, 0x2733: 0x0002, 0x2734: 0x0002, 0x2735: 0x0002, + 0x2736: 0x0002, 0x2737: 0x0002, 0x2738: 0x0002, 0x2739: 0x0002, 0x273a: 0x0002, 0x273b: 0x0002, + // Block 0x9d, offset 0x2740 + 0x2776: 0x000c, 0x2777: 0x000c, 0x2778: 0x000c, 0x2779: 0x000c, 0x277a: 0x000c, + // Block 0x9e, offset 0x2780 + 0x2780: 0x0001, 0x2781: 0x0001, 0x2782: 0x0001, 0x2783: 0x0001, 0x2784: 0x0001, 0x2785: 0x0001, + 0x2786: 0x0001, 0x2787: 0x0001, 0x2788: 0x0001, 0x2789: 0x0001, 0x278a: 0x0001, 0x278b: 0x0001, + 0x278c: 0x0001, 0x278d: 0x0001, 0x278e: 0x0001, 0x278f: 0x0001, 0x2790: 0x0001, 0x2791: 0x0001, + 0x2792: 0x0001, 0x2793: 0x0001, 0x2794: 0x0001, 0x2795: 0x0001, 0x2796: 0x0001, 0x2797: 0x0001, + 0x2798: 0x0001, 0x2799: 0x0001, 0x279a: 0x0001, 0x279b: 0x0001, 0x279c: 0x0001, 0x279d: 0x0001, + 0x279e: 0x0001, 0x279f: 0x0001, 0x27a0: 0x0001, 0x27a1: 0x0001, 0x27a2: 0x0001, 0x27a3: 0x0001, + 0x27a4: 0x0001, 0x27a5: 0x0001, 0x27a6: 0x0001, 0x27a7: 0x0001, 0x27a8: 0x0001, 0x27a9: 0x0001, + 0x27aa: 0x0001, 0x27ab: 0x0001, 0x27ac: 0x0001, 0x27ad: 0x0001, 0x27ae: 0x0001, 0x27af: 0x0001, + 0x27b0: 0x0001, 0x27b1: 0x0001, 0x27b2: 0x0001, 0x27b3: 0x0001, 0x27b4: 0x0001, 0x27b5: 0x0001, + 0x27b6: 0x0001, 0x27b7: 0x0001, 0x27b8: 0x0001, 0x27b9: 0x0001, 0x27ba: 0x0001, 0x27bb: 0x0001, + 0x27bc: 0x0001, 0x27bd: 0x0001, 0x27be: 0x0001, 0x27bf: 0x0001, + // Block 0x9f, offset 0x27c0 + 0x27c0: 0x0001, 0x27c1: 0x0001, 0x27c2: 0x0001, 0x27c3: 0x0001, 0x27c4: 0x0001, 0x27c5: 0x0001, + 0x27c6: 0x0001, 0x27c7: 0x0001, 0x27c8: 0x0001, 0x27c9: 0x0001, 0x27ca: 0x0001, 0x27cb: 0x0001, + 0x27cc: 0x0001, 0x27cd: 0x0001, 0x27ce: 0x0001, 0x27cf: 0x0001, 0x27d0: 0x0001, 0x27d1: 0x0001, + 0x27d2: 0x0001, 0x27d3: 0x0001, 0x27d4: 0x0001, 0x27d5: 0x0001, 0x27d6: 0x0001, 0x27d7: 0x0001, + 0x27d8: 0x0001, 0x27d9: 0x0001, 0x27da: 0x0001, 0x27db: 0x0001, 0x27dc: 0x0001, 0x27dd: 0x0001, + 0x27de: 0x0001, 0x27df: 0x000a, 0x27e0: 0x0001, 0x27e1: 0x0001, 0x27e2: 0x0001, 0x27e3: 0x0001, + 0x27e4: 0x0001, 0x27e5: 0x0001, 0x27e6: 0x0001, 0x27e7: 0x0001, 0x27e8: 0x0001, 0x27e9: 0x0001, + 0x27ea: 0x0001, 0x27eb: 0x0001, 0x27ec: 0x0001, 0x27ed: 0x0001, 0x27ee: 0x0001, 0x27ef: 0x0001, + 0x27f0: 0x0001, 0x27f1: 0x0001, 0x27f2: 0x0001, 0x27f3: 0x0001, 0x27f4: 0x0001, 0x27f5: 0x0001, + 0x27f6: 0x0001, 0x27f7: 0x0001, 0x27f8: 0x0001, 0x27f9: 0x0001, 0x27fa: 0x0001, 0x27fb: 0x0001, + 0x27fc: 0x0001, 0x27fd: 0x0001, 0x27fe: 0x0001, 0x27ff: 0x0001, + // Block 0xa0, offset 0x2800 + 0x2800: 0x0001, 0x2801: 0x000c, 0x2802: 0x000c, 0x2803: 0x000c, 0x2804: 0x0001, 0x2805: 0x000c, + 0x2806: 0x000c, 0x2807: 0x0001, 0x2808: 0x0001, 0x2809: 0x0001, 0x280a: 0x0001, 0x280b: 0x0001, + 0x280c: 0x000c, 0x280d: 0x000c, 0x280e: 0x000c, 0x280f: 0x000c, 0x2810: 0x0001, 0x2811: 0x0001, + 0x2812: 0x0001, 0x2813: 0x0001, 0x2814: 0x0001, 0x2815: 0x0001, 0x2816: 0x0001, 0x2817: 0x0001, + 0x2818: 0x0001, 0x2819: 0x0001, 0x281a: 0x0001, 0x281b: 0x0001, 0x281c: 0x0001, 0x281d: 0x0001, + 0x281e: 0x0001, 0x281f: 0x0001, 0x2820: 0x0001, 0x2821: 0x0001, 0x2822: 0x0001, 0x2823: 0x0001, + 0x2824: 0x0001, 0x2825: 0x0001, 0x2826: 0x0001, 0x2827: 0x0001, 0x2828: 0x0001, 0x2829: 0x0001, + 0x282a: 0x0001, 0x282b: 0x0001, 0x282c: 0x0001, 0x282d: 0x0001, 0x282e: 0x0001, 0x282f: 0x0001, + 0x2830: 0x0001, 0x2831: 0x0001, 0x2832: 0x0001, 0x2833: 0x0001, 0x2834: 0x0001, 0x2835: 0x0001, + 0x2836: 0x0001, 0x2837: 0x0001, 0x2838: 0x000c, 0x2839: 0x000c, 0x283a: 0x000c, 0x283b: 0x0001, + 0x283c: 0x0001, 0x283d: 0x0001, 0x283e: 0x0001, 0x283f: 0x000c, + // Block 0xa1, offset 0x2840 + 0x2840: 0x0001, 0x2841: 0x0001, 0x2842: 0x0001, 0x2843: 0x0001, 0x2844: 0x0001, 0x2845: 0x0001, + 0x2846: 0x0001, 0x2847: 0x0001, 0x2848: 0x0001, 0x2849: 0x0001, 0x284a: 0x0001, 0x284b: 0x0001, + 0x284c: 0x0001, 0x284d: 0x0001, 0x284e: 0x0001, 0x284f: 0x0001, 0x2850: 0x0001, 0x2851: 0x0001, + 0x2852: 0x0001, 0x2853: 0x0001, 0x2854: 0x0001, 0x2855: 0x0001, 0x2856: 0x0001, 0x2857: 0x0001, + 0x2858: 0x0001, 0x2859: 0x0001, 0x285a: 0x0001, 0x285b: 0x0001, 0x285c: 0x0001, 0x285d: 0x0001, + 0x285e: 0x0001, 0x285f: 0x0001, 0x2860: 0x0001, 0x2861: 0x0001, 0x2862: 0x0001, 0x2863: 0x0001, + 0x2864: 0x0001, 0x2865: 0x000c, 0x2866: 0x000c, 0x2867: 0x0001, 0x2868: 0x0001, 0x2869: 0x0001, + 0x286a: 0x0001, 0x286b: 0x0001, 0x286c: 0x0001, 0x286d: 0x0001, 0x286e: 0x0001, 0x286f: 0x0001, + 0x2870: 0x0001, 0x2871: 0x0001, 0x2872: 0x0001, 0x2873: 0x0001, 0x2874: 0x0001, 0x2875: 0x0001, + 0x2876: 0x0001, 0x2877: 0x0001, 0x2878: 0x0001, 0x2879: 0x0001, 0x287a: 0x0001, 0x287b: 0x0001, + 0x287c: 0x0001, 0x287d: 0x0001, 0x287e: 0x0001, 0x287f: 0x0001, + // Block 0xa2, offset 0x2880 + 0x2880: 0x0001, 0x2881: 0x0001, 0x2882: 0x0001, 0x2883: 0x0001, 0x2884: 0x0001, 0x2885: 0x0001, + 0x2886: 0x0001, 0x2887: 0x0001, 0x2888: 0x0001, 0x2889: 0x0001, 0x288a: 0x0001, 0x288b: 0x0001, + 0x288c: 0x0001, 0x288d: 0x0001, 0x288e: 0x0001, 0x288f: 0x0001, 0x2890: 0x0001, 0x2891: 0x0001, + 0x2892: 0x0001, 0x2893: 0x0001, 0x2894: 0x0001, 0x2895: 0x0001, 0x2896: 0x0001, 0x2897: 0x0001, + 0x2898: 0x0001, 0x2899: 0x0001, 0x289a: 0x0001, 0x289b: 0x0001, 0x289c: 0x0001, 0x289d: 0x0001, + 0x289e: 0x0001, 0x289f: 0x0001, 0x28a0: 0x0001, 0x28a1: 0x0001, 0x28a2: 0x0001, 0x28a3: 0x0001, + 0x28a4: 0x0001, 0x28a5: 0x0001, 0x28a6: 0x0001, 0x28a7: 0x0001, 0x28a8: 0x0001, 0x28a9: 0x0001, + 0x28aa: 0x0001, 0x28ab: 0x0001, 0x28ac: 0x0001, 0x28ad: 0x0001, 0x28ae: 0x0001, 0x28af: 0x0001, + 0x28b0: 0x0001, 0x28b1: 0x0001, 0x28b2: 0x0001, 0x28b3: 0x0001, 0x28b4: 0x0001, 0x28b5: 0x0001, + 0x28b6: 0x0001, 0x28b7: 0x0001, 0x28b8: 0x0001, 0x28b9: 0x000a, 0x28ba: 0x000a, 0x28bb: 0x000a, + 0x28bc: 0x000a, 0x28bd: 0x000a, 0x28be: 0x000a, 0x28bf: 0x000a, + // Block 0xa3, offset 0x28c0 + 0x28c0: 0x000d, 0x28c1: 0x000d, 0x28c2: 0x000d, 0x28c3: 0x000d, 0x28c4: 0x000d, 0x28c5: 0x000d, + 0x28c6: 0x000d, 0x28c7: 0x000d, 0x28c8: 0x000d, 0x28c9: 0x000d, 0x28ca: 0x000d, 0x28cb: 0x000d, + 0x28cc: 0x000d, 0x28cd: 0x000d, 0x28ce: 0x000d, 0x28cf: 0x000d, 0x28d0: 0x000d, 0x28d1: 0x000d, + 0x28d2: 0x000d, 0x28d3: 0x000d, 0x28d4: 0x000d, 0x28d5: 0x000d, 0x28d6: 0x000d, 0x28d7: 0x000d, + 0x28d8: 0x000d, 0x28d9: 0x000d, 0x28da: 0x000d, 0x28db: 0x000d, 0x28dc: 0x000d, 0x28dd: 0x000d, + 0x28de: 0x000d, 0x28df: 0x000d, 0x28e0: 0x000d, 0x28e1: 0x000d, 0x28e2: 0x000d, 0x28e3: 0x000d, + 0x28e4: 0x000c, 0x28e5: 0x000c, 0x28e6: 0x000c, 0x28e7: 0x000c, 0x28e8: 0x000d, 0x28e9: 0x000d, + 0x28ea: 0x000d, 0x28eb: 0x000d, 0x28ec: 0x000d, 0x28ed: 0x000d, 0x28ee: 0x000d, 0x28ef: 0x000d, + 0x28f0: 0x0005, 0x28f1: 0x0005, 0x28f2: 0x0005, 0x28f3: 0x0005, 0x28f4: 0x0005, 0x28f5: 0x0005, + 0x28f6: 0x0005, 0x28f7: 0x0005, 0x28f8: 0x0005, 0x28f9: 0x0005, 0x28fa: 0x000d, 0x28fb: 0x000d, + 0x28fc: 0x000d, 0x28fd: 0x000d, 0x28fe: 0x000d, 0x28ff: 0x000d, + // Block 0xa4, offset 0x2900 + 0x2900: 0x0001, 0x2901: 0x0001, 0x2902: 0x0001, 0x2903: 0x0001, 0x2904: 0x0001, 0x2905: 0x0001, + 0x2906: 0x0001, 0x2907: 0x0001, 0x2908: 0x0001, 0x2909: 0x0001, 0x290a: 0x0001, 0x290b: 0x0001, + 0x290c: 0x0001, 0x290d: 0x0001, 0x290e: 0x0001, 0x290f: 0x0001, 0x2910: 0x0001, 0x2911: 0x0001, + 0x2912: 0x0001, 0x2913: 0x0001, 0x2914: 0x0001, 0x2915: 0x0001, 0x2916: 0x0001, 0x2917: 0x0001, + 0x2918: 0x0001, 0x2919: 0x0001, 0x291a: 0x0001, 0x291b: 0x0001, 0x291c: 0x0001, 0x291d: 0x0001, + 0x291e: 0x0001, 0x291f: 0x0001, 0x2920: 0x0005, 0x2921: 0x0005, 0x2922: 0x0005, 0x2923: 0x0005, + 0x2924: 0x0005, 0x2925: 0x0005, 0x2926: 0x0005, 0x2927: 0x0005, 0x2928: 0x0005, 0x2929: 0x0005, + 0x292a: 0x0005, 0x292b: 0x0005, 0x292c: 0x0005, 0x292d: 0x0005, 0x292e: 0x0005, 0x292f: 0x0005, + 0x2930: 0x0005, 0x2931: 0x0005, 0x2932: 0x0005, 0x2933: 0x0005, 0x2934: 0x0005, 0x2935: 0x0005, + 0x2936: 0x0005, 0x2937: 0x0005, 0x2938: 0x0005, 0x2939: 0x0005, 0x293a: 0x0005, 0x293b: 0x0005, + 0x293c: 0x0005, 0x293d: 0x0005, 0x293e: 0x0005, 0x293f: 0x0001, + // Block 0xa5, offset 0x2940 + 0x2940: 0x0001, 0x2941: 0x0001, 0x2942: 0x0001, 0x2943: 0x0001, 0x2944: 0x0001, 0x2945: 0x0001, + 0x2946: 0x0001, 0x2947: 0x0001, 0x2948: 0x0001, 0x2949: 0x0001, 0x294a: 0x0001, 0x294b: 0x0001, + 0x294c: 0x0001, 0x294d: 0x0001, 0x294e: 0x0001, 0x294f: 0x0001, 0x2950: 0x0001, 0x2951: 0x0001, + 0x2952: 0x0001, 0x2953: 0x0001, 0x2954: 0x0001, 0x2955: 0x0001, 0x2956: 0x0001, 0x2957: 0x0001, + 0x2958: 0x0001, 0x2959: 0x0001, 0x295a: 0x0001, 0x295b: 0x0001, 0x295c: 0x0001, 0x295d: 0x0001, + 0x295e: 0x0001, 0x295f: 0x0001, 0x2960: 0x0001, 0x2961: 0x0001, 0x2962: 0x0001, 0x2963: 0x0001, + 0x2964: 0x0001, 0x2965: 0x0001, 0x2966: 0x0001, 0x2967: 0x0001, 0x2968: 0x0001, 0x2969: 0x0001, + 0x296a: 0x0001, 0x296b: 0x000c, 0x296c: 0x000c, 0x296d: 0x0001, 0x296e: 0x0001, 0x296f: 0x0001, + 0x2970: 0x0001, 0x2971: 0x0001, 0x2972: 0x0001, 0x2973: 0x0001, 0x2974: 0x0001, 0x2975: 0x0001, + 0x2976: 0x0001, 0x2977: 0x0001, 0x2978: 0x0001, 0x2979: 0x0001, 0x297a: 0x0001, 0x297b: 0x0001, + 0x297c: 0x0001, 0x297d: 0x0001, 0x297e: 0x0001, 0x297f: 0x0001, + // Block 0xa6, offset 0x2980 + 0x2980: 0x0001, 0x2981: 0x0001, 0x2982: 0x0001, 0x2983: 0x0001, 0x2984: 0x0001, 0x2985: 0x0001, + 0x2986: 0x0001, 0x2987: 0x0001, 0x2988: 0x0001, 0x2989: 0x0001, 0x298a: 0x0001, 0x298b: 0x0001, + 0x298c: 0x0001, 0x298d: 0x0001, 0x298e: 0x0001, 0x298f: 0x0001, 0x2990: 0x0001, 0x2991: 0x0001, + 0x2992: 0x0001, 0x2993: 0x0001, 0x2994: 0x0001, 0x2995: 0x0001, 0x2996: 0x0001, 0x2997: 0x0001, + 0x2998: 0x0001, 0x2999: 0x0001, 0x299a: 0x0001, 0x299b: 0x0001, 0x299c: 0x0001, 0x299d: 0x0001, + 0x299e: 0x0001, 0x299f: 0x0001, 0x29a0: 0x0001, 0x29a1: 0x0001, 0x29a2: 0x0001, 0x29a3: 0x0001, + 0x29a4: 0x0001, 0x29a5: 0x0001, 0x29a6: 0x0001, 0x29a7: 0x0001, 0x29a8: 0x0001, 0x29a9: 0x0001, + 0x29aa: 0x0001, 0x29ab: 0x0001, 0x29ac: 0x0001, 0x29ad: 0x0001, 0x29ae: 0x0001, 0x29af: 0x0001, + 0x29b0: 0x000d, 0x29b1: 0x000d, 0x29b2: 0x000d, 0x29b3: 0x000d, 0x29b4: 0x000d, 0x29b5: 0x000d, + 0x29b6: 0x000d, 0x29b7: 0x000d, 0x29b8: 0x000d, 0x29b9: 0x000d, 0x29ba: 0x000d, 0x29bb: 0x000d, + 0x29bc: 0x000d, 0x29bd: 0x000d, 0x29be: 0x000d, 0x29bf: 0x000d, + // Block 0xa7, offset 0x29c0 + 0x29c0: 0x000d, 0x29c1: 0x000d, 0x29c2: 0x000d, 0x29c3: 0x000d, 0x29c4: 0x000d, 0x29c5: 0x000d, + 0x29c6: 0x000c, 0x29c7: 0x000c, 0x29c8: 0x000c, 0x29c9: 0x000c, 0x29ca: 0x000c, 0x29cb: 0x000c, + 0x29cc: 0x000c, 0x29cd: 0x000c, 0x29ce: 0x000c, 0x29cf: 0x000c, 0x29d0: 0x000c, 0x29d1: 0x000d, + 0x29d2: 0x000d, 0x29d3: 0x000d, 0x29d4: 0x000d, 0x29d5: 0x000d, 0x29d6: 0x000d, 0x29d7: 0x000d, + 0x29d8: 0x000d, 0x29d9: 0x000d, 0x29da: 0x000d, 0x29db: 0x000d, 0x29dc: 0x000d, 0x29dd: 0x000d, + 0x29de: 0x000d, 0x29df: 0x000d, 0x29e0: 0x000d, 0x29e1: 0x000d, 0x29e2: 0x000d, 0x29e3: 0x000d, + 0x29e4: 0x000d, 0x29e5: 0x000d, 0x29e6: 0x000d, 0x29e7: 0x000d, 0x29e8: 0x000d, 0x29e9: 0x000d, + 0x29ea: 0x000d, 0x29eb: 0x000d, 0x29ec: 0x000d, 0x29ed: 0x000d, 0x29ee: 0x000d, 0x29ef: 0x000d, + 0x29f0: 0x0001, 0x29f1: 0x0001, 0x29f2: 0x0001, 0x29f3: 0x0001, 0x29f4: 0x0001, 0x29f5: 0x0001, + 0x29f6: 0x0001, 0x29f7: 0x0001, 0x29f8: 0x0001, 0x29f9: 0x0001, 0x29fa: 0x0001, 0x29fb: 0x0001, + 0x29fc: 0x0001, 0x29fd: 0x0001, 0x29fe: 0x0001, 0x29ff: 0x0001, + // Block 0xa8, offset 0x2a00 + 0x2a01: 0x000c, + 0x2a38: 0x000c, 0x2a39: 0x000c, 0x2a3a: 0x000c, 0x2a3b: 0x000c, + 0x2a3c: 0x000c, 0x2a3d: 0x000c, 0x2a3e: 0x000c, 0x2a3f: 0x000c, + // Block 0xa9, offset 0x2a40 + 0x2a40: 0x000c, 0x2a41: 0x000c, 0x2a42: 0x000c, 0x2a43: 0x000c, 0x2a44: 0x000c, 0x2a45: 0x000c, + 0x2a46: 0x000c, + 0x2a52: 0x000a, 0x2a53: 0x000a, 0x2a54: 0x000a, 0x2a55: 0x000a, 0x2a56: 0x000a, 0x2a57: 0x000a, + 0x2a58: 0x000a, 0x2a59: 0x000a, 0x2a5a: 0x000a, 0x2a5b: 0x000a, 0x2a5c: 0x000a, 0x2a5d: 0x000a, + 0x2a5e: 0x000a, 0x2a5f: 0x000a, 0x2a60: 0x000a, 0x2a61: 0x000a, 0x2a62: 0x000a, 0x2a63: 0x000a, + 0x2a64: 0x000a, 0x2a65: 0x000a, + 0x2a7f: 0x000c, + // Block 0xaa, offset 0x2a80 + 0x2a80: 0x000c, 0x2a81: 0x000c, + 0x2ab3: 0x000c, 0x2ab4: 0x000c, 0x2ab5: 0x000c, + 0x2ab6: 0x000c, 0x2ab9: 0x000c, 0x2aba: 0x000c, + // Block 0xab, offset 0x2ac0 + 0x2ac0: 0x000c, 0x2ac1: 0x000c, 0x2ac2: 0x000c, + 0x2ae7: 0x000c, 0x2ae8: 0x000c, 0x2ae9: 0x000c, + 0x2aea: 0x000c, 0x2aeb: 0x000c, 0x2aed: 0x000c, 0x2aee: 0x000c, 0x2aef: 0x000c, + 0x2af0: 0x000c, 0x2af1: 0x000c, 0x2af2: 0x000c, 0x2af3: 0x000c, 0x2af4: 0x000c, + // Block 0xac, offset 0x2b00 + 0x2b33: 0x000c, + // Block 0xad, offset 0x2b40 + 0x2b40: 0x000c, 0x2b41: 0x000c, + 0x2b76: 0x000c, 0x2b77: 0x000c, 0x2b78: 0x000c, 0x2b79: 0x000c, 0x2b7a: 0x000c, 0x2b7b: 0x000c, + 0x2b7c: 0x000c, 0x2b7d: 0x000c, 0x2b7e: 0x000c, + // Block 0xae, offset 0x2b80 + 0x2b89: 0x000c, 0x2b8a: 0x000c, 0x2b8b: 0x000c, + 0x2b8c: 0x000c, 0x2b8f: 0x000c, + // Block 0xaf, offset 0x2bc0 + 0x2bef: 0x000c, + 0x2bf0: 0x000c, 0x2bf1: 0x000c, 0x2bf4: 0x000c, + 0x2bf6: 0x000c, 0x2bf7: 0x000c, + 0x2bfe: 0x000c, + // Block 0xb0, offset 0x2c00 + 0x2c1f: 0x000c, 0x2c23: 0x000c, + 0x2c24: 0x000c, 0x2c25: 0x000c, 0x2c26: 0x000c, 0x2c27: 0x000c, 0x2c28: 0x000c, 0x2c29: 0x000c, + 0x2c2a: 0x000c, + // Block 0xb1, offset 0x2c40 + 0x2c40: 0x000c, + 0x2c66: 0x000c, 0x2c67: 0x000c, 0x2c68: 0x000c, 0x2c69: 0x000c, + 0x2c6a: 0x000c, 0x2c6b: 0x000c, 0x2c6c: 0x000c, + 0x2c70: 0x000c, 0x2c71: 0x000c, 0x2c72: 0x000c, 0x2c73: 0x000c, 0x2c74: 0x000c, + // Block 0xb2, offset 0x2c80 + 0x2cb8: 0x000c, 0x2cb9: 0x000c, 0x2cba: 0x000c, 0x2cbb: 0x000c, + 0x2cbc: 0x000c, 0x2cbd: 0x000c, 0x2cbe: 0x000c, 0x2cbf: 0x000c, + // Block 0xb3, offset 0x2cc0 + 0x2cc2: 0x000c, 0x2cc3: 0x000c, 0x2cc4: 0x000c, + 0x2cc6: 0x000c, + 0x2cde: 0x000c, + // Block 0xb4, offset 0x2d00 + 0x2d33: 0x000c, 0x2d34: 0x000c, 0x2d35: 0x000c, + 0x2d36: 0x000c, 0x2d37: 0x000c, 0x2d38: 0x000c, 0x2d3a: 0x000c, + 0x2d3f: 0x000c, + // Block 0xb5, offset 0x2d40 + 0x2d40: 0x000c, 0x2d42: 0x000c, 0x2d43: 0x000c, + // Block 0xb6, offset 0x2d80 + 0x2db2: 0x000c, 0x2db3: 0x000c, 0x2db4: 0x000c, 0x2db5: 0x000c, + 0x2dbc: 0x000c, 0x2dbd: 0x000c, 0x2dbf: 0x000c, + // Block 0xb7, offset 0x2dc0 + 0x2dc0: 0x000c, + 0x2ddc: 0x000c, 0x2ddd: 0x000c, + // Block 0xb8, offset 0x2e00 + 0x2e33: 0x000c, 0x2e34: 0x000c, 0x2e35: 0x000c, + 0x2e36: 0x000c, 0x2e37: 0x000c, 0x2e38: 0x000c, 0x2e39: 0x000c, 0x2e3a: 0x000c, + 0x2e3d: 0x000c, 0x2e3f: 0x000c, + // Block 0xb9, offset 0x2e40 + 0x2e40: 0x000c, + 0x2e60: 0x000a, 0x2e61: 0x000a, 0x2e62: 0x000a, 0x2e63: 0x000a, + 0x2e64: 0x000a, 0x2e65: 0x000a, 0x2e66: 0x000a, 0x2e67: 0x000a, 0x2e68: 0x000a, 0x2e69: 0x000a, + 0x2e6a: 0x000a, 0x2e6b: 0x000a, 0x2e6c: 0x000a, + // Block 0xba, offset 0x2e80 + 0x2eab: 0x000c, 0x2ead: 0x000c, + 0x2eb0: 0x000c, 0x2eb1: 0x000c, 0x2eb2: 0x000c, 0x2eb3: 0x000c, 0x2eb4: 0x000c, 0x2eb5: 0x000c, + 0x2eb7: 0x000c, + // Block 0xbb, offset 0x2ec0 + 0x2edd: 0x000c, + 0x2ede: 0x000c, 0x2edf: 0x000c, 0x2ee2: 0x000c, 0x2ee3: 0x000c, + 0x2ee4: 0x000c, 0x2ee5: 0x000c, 0x2ee7: 0x000c, 0x2ee8: 0x000c, 0x2ee9: 0x000c, + 0x2eea: 0x000c, 0x2eeb: 0x000c, + // Block 0xbc, offset 0x2f00 + 0x2f2f: 0x000c, + 0x2f30: 0x000c, 0x2f31: 0x000c, 0x2f32: 0x000c, 0x2f33: 0x000c, 0x2f34: 0x000c, 0x2f35: 0x000c, + 0x2f36: 0x000c, 0x2f37: 0x000c, 0x2f39: 0x000c, 0x2f3a: 0x000c, + // Block 0xbd, offset 0x2f40 + 0x2f7b: 0x000c, + 0x2f7c: 0x000c, 0x2f7e: 0x000c, + // Block 0xbe, offset 0x2f80 + 0x2f83: 0x000c, + // Block 0xbf, offset 0x2fc0 + 0x2fd4: 0x000c, 0x2fd5: 0x000c, 0x2fd6: 0x000c, 0x2fd7: 0x000c, + 0x2fda: 0x000c, 0x2fdb: 0x000c, + 0x2fe0: 0x000c, + // Block 0xc0, offset 0x3000 + 0x3001: 0x000c, 0x3002: 0x000c, 0x3003: 0x000c, 0x3004: 0x000c, 0x3005: 0x000c, + 0x3006: 0x000c, 0x3009: 0x000c, 0x300a: 0x000c, + 0x3033: 0x000c, 0x3034: 0x000c, 0x3035: 0x000c, + 0x3036: 0x000c, 0x3037: 0x000c, 0x3038: 0x000c, 0x303b: 0x000c, + 0x303c: 0x000c, 0x303d: 0x000c, 0x303e: 0x000c, + // Block 0xc1, offset 0x3040 + 0x3047: 0x000c, + 0x3051: 0x000c, + 0x3052: 0x000c, 0x3053: 0x000c, 0x3054: 0x000c, 0x3055: 0x000c, 0x3056: 0x000c, + 0x3059: 0x000c, 0x305a: 0x000c, 0x305b: 0x000c, + // Block 0xc2, offset 0x3080 + 0x308a: 0x000c, 0x308b: 0x000c, + 0x308c: 0x000c, 0x308d: 0x000c, 0x308e: 0x000c, 0x308f: 0x000c, 0x3090: 0x000c, 0x3091: 0x000c, + 0x3092: 0x000c, 0x3093: 0x000c, 0x3094: 0x000c, 0x3095: 0x000c, 0x3096: 0x000c, + 0x3098: 0x000c, 0x3099: 0x000c, + // Block 0xc3, offset 0x30c0 + 0x30f0: 0x000c, 0x30f1: 0x000c, 0x30f2: 0x000c, 0x30f3: 0x000c, 0x30f4: 0x000c, 0x30f5: 0x000c, + 0x30f6: 0x000c, 0x30f8: 0x000c, 0x30f9: 0x000c, 0x30fa: 0x000c, 0x30fb: 0x000c, + 0x30fc: 0x000c, 0x30fd: 0x000c, + // Block 0xc4, offset 0x3100 + 0x3112: 0x000c, 0x3113: 0x000c, 0x3114: 0x000c, 0x3115: 0x000c, 0x3116: 0x000c, 0x3117: 0x000c, + 0x3118: 0x000c, 0x3119: 0x000c, 0x311a: 0x000c, 0x311b: 0x000c, 0x311c: 0x000c, 0x311d: 0x000c, + 0x311e: 0x000c, 0x311f: 0x000c, 0x3120: 0x000c, 0x3121: 0x000c, 0x3122: 0x000c, 0x3123: 0x000c, + 0x3124: 0x000c, 0x3125: 0x000c, 0x3126: 0x000c, 0x3127: 0x000c, + 0x312a: 0x000c, 0x312b: 0x000c, 0x312c: 0x000c, 0x312d: 0x000c, 0x312e: 0x000c, 0x312f: 0x000c, + 0x3130: 0x000c, 0x3132: 0x000c, 0x3133: 0x000c, 0x3135: 0x000c, + 0x3136: 0x000c, + // Block 0xc5, offset 0x3140 + 0x3171: 0x000c, 0x3172: 0x000c, 0x3173: 0x000c, 0x3174: 0x000c, 0x3175: 0x000c, + 0x3176: 0x000c, 0x317a: 0x000c, + 0x317c: 0x000c, 0x317d: 0x000c, 0x317f: 0x000c, + // Block 0xc6, offset 0x3180 + 0x3180: 0x000c, 0x3181: 0x000c, 0x3182: 0x000c, 0x3183: 0x000c, 0x3184: 0x000c, 0x3185: 0x000c, + 0x3187: 0x000c, + // Block 0xc7, offset 0x31c0 + 0x31d0: 0x000c, 0x31d1: 0x000c, + 0x31d5: 0x000c, 0x31d7: 0x000c, + // Block 0xc8, offset 0x3200 + 0x3233: 0x000c, 0x3234: 0x000c, + // Block 0xc9, offset 0x3240 + 0x3255: 0x000a, 0x3256: 0x000a, 0x3257: 0x000a, + 0x3258: 0x000a, 0x3259: 0x000a, 0x325a: 0x000a, 0x325b: 0x000a, 0x325c: 0x000a, 0x325d: 0x0004, + 0x325e: 0x0004, 0x325f: 0x0004, 0x3260: 0x0004, 0x3261: 0x000a, 0x3262: 0x000a, 0x3263: 0x000a, + 0x3264: 0x000a, 0x3265: 0x000a, 0x3266: 0x000a, 0x3267: 0x000a, 0x3268: 0x000a, 0x3269: 0x000a, + 0x326a: 0x000a, 0x326b: 0x000a, 0x326c: 0x000a, 0x326d: 0x000a, 0x326e: 0x000a, 0x326f: 0x000a, + 0x3270: 0x000a, 0x3271: 0x000a, + // Block 0xca, offset 0x3280 + 0x32b0: 0x000c, 0x32b1: 0x000c, 0x32b2: 0x000c, 0x32b3: 0x000c, 0x32b4: 0x000c, + // Block 0xcb, offset 0x32c0 + 0x32f0: 0x000c, 0x32f1: 0x000c, 0x32f2: 0x000c, 0x32f3: 0x000c, 0x32f4: 0x000c, 0x32f5: 0x000c, + 0x32f6: 0x000c, + // Block 0xcc, offset 0x3300 + 0x330f: 0x000c, + // Block 0xcd, offset 0x3340 + 0x334f: 0x000c, 0x3350: 0x000c, 0x3351: 0x000c, + 0x3352: 0x000c, + // Block 0xce, offset 0x3380 + 0x33a2: 0x000a, + 0x33a4: 0x000c, + // Block 0xcf, offset 0x33c0 + 0x33dd: 0x000c, + 0x33de: 0x000c, 0x33e0: 0x000b, 0x33e1: 0x000b, 0x33e2: 0x000b, 0x33e3: 0x000b, + // Block 0xd0, offset 0x3400 + 0x3427: 0x000c, 0x3428: 0x000c, 0x3429: 0x000c, + 0x3433: 0x000b, 0x3434: 0x000b, 0x3435: 0x000b, + 0x3436: 0x000b, 0x3437: 0x000b, 0x3438: 0x000b, 0x3439: 0x000b, 0x343a: 0x000b, 0x343b: 0x000c, + 0x343c: 0x000c, 0x343d: 0x000c, 0x343e: 0x000c, 0x343f: 0x000c, + // Block 0xd1, offset 0x3440 + 0x3440: 0x000c, 0x3441: 0x000c, 0x3442: 0x000c, 0x3445: 0x000c, + 0x3446: 0x000c, 0x3447: 0x000c, 0x3448: 0x000c, 0x3449: 0x000c, 0x344a: 0x000c, 0x344b: 0x000c, + 0x346a: 0x000c, 0x346b: 0x000c, 0x346c: 0x000c, 0x346d: 0x000c, + // Block 0xd2, offset 0x3480 + 0x3480: 0x000a, 0x3481: 0x000a, 0x3482: 0x000c, 0x3483: 0x000c, 0x3484: 0x000c, 0x3485: 0x000a, + // Block 0xd3, offset 0x34c0 + 0x34c0: 0x000a, 0x34c1: 0x000a, 0x34c2: 0x000a, 0x34c3: 0x000a, 0x34c4: 0x000a, 0x34c5: 0x000a, + 0x34c6: 0x000a, 0x34c7: 0x000a, 0x34c8: 0x000a, 0x34c9: 0x000a, 0x34ca: 0x000a, 0x34cb: 0x000a, + 0x34cc: 0x000a, 0x34cd: 0x000a, 0x34ce: 0x000a, 0x34cf: 0x000a, 0x34d0: 0x000a, 0x34d1: 0x000a, + 0x34d2: 0x000a, 0x34d3: 0x000a, 0x34d4: 0x000a, 0x34d5: 0x000a, 0x34d6: 0x000a, + // Block 0xd4, offset 0x3500 + 0x351b: 0x000a, + // Block 0xd5, offset 0x3540 + 0x3555: 0x000a, + // Block 0xd6, offset 0x3580 + 0x358f: 0x000a, + // Block 0xd7, offset 0x35c0 + 0x35c9: 0x000a, + // Block 0xd8, offset 0x3600 + 0x3603: 0x000a, + 0x360e: 0x0002, 0x360f: 0x0002, 0x3610: 0x0002, 0x3611: 0x0002, + 0x3612: 0x0002, 0x3613: 0x0002, 0x3614: 0x0002, 0x3615: 0x0002, 0x3616: 0x0002, 0x3617: 0x0002, + 0x3618: 0x0002, 0x3619: 0x0002, 0x361a: 0x0002, 0x361b: 0x0002, 0x361c: 0x0002, 0x361d: 0x0002, + 0x361e: 0x0002, 0x361f: 0x0002, 0x3620: 0x0002, 0x3621: 0x0002, 0x3622: 0x0002, 0x3623: 0x0002, + 0x3624: 0x0002, 0x3625: 0x0002, 0x3626: 0x0002, 0x3627: 0x0002, 0x3628: 0x0002, 0x3629: 0x0002, + 0x362a: 0x0002, 0x362b: 0x0002, 0x362c: 0x0002, 0x362d: 0x0002, 0x362e: 0x0002, 0x362f: 0x0002, + 0x3630: 0x0002, 0x3631: 0x0002, 0x3632: 0x0002, 0x3633: 0x0002, 0x3634: 0x0002, 0x3635: 0x0002, + 0x3636: 0x0002, 0x3637: 0x0002, 0x3638: 0x0002, 0x3639: 0x0002, 0x363a: 0x0002, 0x363b: 0x0002, + 0x363c: 0x0002, 0x363d: 0x0002, 0x363e: 0x0002, 0x363f: 0x0002, + // Block 0xd9, offset 0x3640 + 0x3640: 0x000c, 0x3641: 0x000c, 0x3642: 0x000c, 0x3643: 0x000c, 0x3644: 0x000c, 0x3645: 0x000c, + 0x3646: 0x000c, 0x3647: 0x000c, 0x3648: 0x000c, 0x3649: 0x000c, 0x364a: 0x000c, 0x364b: 0x000c, + 0x364c: 0x000c, 0x364d: 0x000c, 0x364e: 0x000c, 0x364f: 0x000c, 0x3650: 0x000c, 0x3651: 0x000c, + 0x3652: 0x000c, 0x3653: 0x000c, 0x3654: 0x000c, 0x3655: 0x000c, 0x3656: 0x000c, 0x3657: 0x000c, + 0x3658: 0x000c, 0x3659: 0x000c, 0x365a: 0x000c, 0x365b: 0x000c, 0x365c: 0x000c, 0x365d: 0x000c, + 0x365e: 0x000c, 0x365f: 0x000c, 0x3660: 0x000c, 0x3661: 0x000c, 0x3662: 0x000c, 0x3663: 0x000c, + 0x3664: 0x000c, 0x3665: 0x000c, 0x3666: 0x000c, 0x3667: 0x000c, 0x3668: 0x000c, 0x3669: 0x000c, + 0x366a: 0x000c, 0x366b: 0x000c, 0x366c: 0x000c, 0x366d: 0x000c, 0x366e: 0x000c, 0x366f: 0x000c, + 0x3670: 0x000c, 0x3671: 0x000c, 0x3672: 0x000c, 0x3673: 0x000c, 0x3674: 0x000c, 0x3675: 0x000c, + 0x3676: 0x000c, 0x367b: 0x000c, + 0x367c: 0x000c, 0x367d: 0x000c, 0x367e: 0x000c, 0x367f: 0x000c, + // Block 0xda, offset 0x3680 + 0x3680: 0x000c, 0x3681: 0x000c, 0x3682: 0x000c, 0x3683: 0x000c, 0x3684: 0x000c, 0x3685: 0x000c, + 0x3686: 0x000c, 0x3687: 0x000c, 0x3688: 0x000c, 0x3689: 0x000c, 0x368a: 0x000c, 0x368b: 0x000c, + 0x368c: 0x000c, 0x368d: 0x000c, 0x368e: 0x000c, 0x368f: 0x000c, 0x3690: 0x000c, 0x3691: 0x000c, + 0x3692: 0x000c, 0x3693: 0x000c, 0x3694: 0x000c, 0x3695: 0x000c, 0x3696: 0x000c, 0x3697: 0x000c, + 0x3698: 0x000c, 0x3699: 0x000c, 0x369a: 0x000c, 0x369b: 0x000c, 0x369c: 0x000c, 0x369d: 0x000c, + 0x369e: 0x000c, 0x369f: 0x000c, 0x36a0: 0x000c, 0x36a1: 0x000c, 0x36a2: 0x000c, 0x36a3: 0x000c, + 0x36a4: 0x000c, 0x36a5: 0x000c, 0x36a6: 0x000c, 0x36a7: 0x000c, 0x36a8: 0x000c, 0x36a9: 0x000c, + 0x36aa: 0x000c, 0x36ab: 0x000c, 0x36ac: 0x000c, + 0x36b5: 0x000c, + // Block 0xdb, offset 0x36c0 + 0x36c4: 0x000c, + 0x36db: 0x000c, 0x36dc: 0x000c, 0x36dd: 0x000c, + 0x36de: 0x000c, 0x36df: 0x000c, 0x36e1: 0x000c, 0x36e2: 0x000c, 0x36e3: 0x000c, + 0x36e4: 0x000c, 0x36e5: 0x000c, 0x36e6: 0x000c, 0x36e7: 0x000c, 0x36e8: 0x000c, 0x36e9: 0x000c, + 0x36ea: 0x000c, 0x36eb: 0x000c, 0x36ec: 0x000c, 0x36ed: 0x000c, 0x36ee: 0x000c, 0x36ef: 0x000c, + // Block 0xdc, offset 0x3700 + 0x3700: 0x000c, 0x3701: 0x000c, 0x3702: 0x000c, 0x3703: 0x000c, 0x3704: 0x000c, 0x3705: 0x000c, + 0x3706: 0x000c, 0x3708: 0x000c, 0x3709: 0x000c, 0x370a: 0x000c, 0x370b: 0x000c, + 0x370c: 0x000c, 0x370d: 0x000c, 0x370e: 0x000c, 0x370f: 0x000c, 0x3710: 0x000c, 0x3711: 0x000c, + 0x3712: 0x000c, 0x3713: 0x000c, 0x3714: 0x000c, 0x3715: 0x000c, 0x3716: 0x000c, 0x3717: 0x000c, + 0x3718: 0x000c, 0x371b: 0x000c, 0x371c: 0x000c, 0x371d: 0x000c, + 0x371e: 0x000c, 0x371f: 0x000c, 0x3720: 0x000c, 0x3721: 0x000c, 0x3723: 0x000c, + 0x3724: 0x000c, 0x3726: 0x000c, 0x3727: 0x000c, 0x3728: 0x000c, 0x3729: 0x000c, + 0x372a: 0x000c, + // Block 0xdd, offset 0x3740 + 0x376c: 0x000c, 0x376d: 0x000c, 0x376e: 0x000c, 0x376f: 0x000c, + 0x377f: 0x0004, + // Block 0xde, offset 0x3780 + 0x3780: 0x0001, 0x3781: 0x0001, 0x3782: 0x0001, 0x3783: 0x0001, 0x3784: 0x0001, 0x3785: 0x0001, + 0x3786: 0x0001, 0x3787: 0x0001, 0x3788: 0x0001, 0x3789: 0x0001, 0x378a: 0x0001, 0x378b: 0x0001, + 0x378c: 0x0001, 0x378d: 0x0001, 0x378e: 0x0001, 0x378f: 0x0001, 0x3790: 0x000c, 0x3791: 0x000c, + 0x3792: 0x000c, 0x3793: 0x000c, 0x3794: 0x000c, 0x3795: 0x000c, 0x3796: 0x000c, 0x3797: 0x0001, + 0x3798: 0x0001, 0x3799: 0x0001, 0x379a: 0x0001, 0x379b: 0x0001, 0x379c: 0x0001, 0x379d: 0x0001, + 0x379e: 0x0001, 0x379f: 0x0001, 0x37a0: 0x0001, 0x37a1: 0x0001, 0x37a2: 0x0001, 0x37a3: 0x0001, + 0x37a4: 0x0001, 0x37a5: 0x0001, 0x37a6: 0x0001, 0x37a7: 0x0001, 0x37a8: 0x0001, 0x37a9: 0x0001, + 0x37aa: 0x0001, 0x37ab: 0x0001, 0x37ac: 0x0001, 0x37ad: 0x0001, 0x37ae: 0x0001, 0x37af: 0x0001, + 0x37b0: 0x0001, 0x37b1: 0x0001, 0x37b2: 0x0001, 0x37b3: 0x0001, 0x37b4: 0x0001, 0x37b5: 0x0001, + 0x37b6: 0x0001, 0x37b7: 0x0001, 0x37b8: 0x0001, 0x37b9: 0x0001, 0x37ba: 0x0001, 0x37bb: 0x0001, + 0x37bc: 0x0001, 0x37bd: 0x0001, 0x37be: 0x0001, 0x37bf: 0x0001, + // Block 0xdf, offset 0x37c0 + 0x37c0: 0x0001, 0x37c1: 0x0001, 0x37c2: 0x0001, 0x37c3: 0x0001, 0x37c4: 0x000c, 0x37c5: 0x000c, + 0x37c6: 0x000c, 0x37c7: 0x000c, 0x37c8: 0x000c, 0x37c9: 0x000c, 0x37ca: 0x000c, 0x37cb: 0x0001, + 0x37cc: 0x0001, 0x37cd: 0x0001, 0x37ce: 0x0001, 0x37cf: 0x0001, 0x37d0: 0x0001, 0x37d1: 0x0001, + 0x37d2: 0x0001, 0x37d3: 0x0001, 0x37d4: 0x0001, 0x37d5: 0x0001, 0x37d6: 0x0001, 0x37d7: 0x0001, + 0x37d8: 0x0001, 0x37d9: 0x0001, 0x37da: 0x0001, 0x37db: 0x0001, 0x37dc: 0x0001, 0x37dd: 0x0001, + 0x37de: 0x0001, 0x37df: 0x0001, 0x37e0: 0x0001, 0x37e1: 0x0001, 0x37e2: 0x0001, 0x37e3: 0x0001, + 0x37e4: 0x0001, 0x37e5: 0x0001, 0x37e6: 0x0001, 0x37e7: 0x0001, 0x37e8: 0x0001, 0x37e9: 0x0001, + 0x37ea: 0x0001, 0x37eb: 0x0001, 0x37ec: 0x0001, 0x37ed: 0x0001, 0x37ee: 0x0001, 0x37ef: 0x0001, + 0x37f0: 0x0001, 0x37f1: 0x0001, 0x37f2: 0x0001, 0x37f3: 0x0001, 0x37f4: 0x0001, 0x37f5: 0x0001, + 0x37f6: 0x0001, 0x37f7: 0x0001, 0x37f8: 0x0001, 0x37f9: 0x0001, 0x37fa: 0x0001, 0x37fb: 0x0001, + 0x37fc: 0x0001, 0x37fd: 0x0001, 0x37fe: 0x0001, 0x37ff: 0x0001, + // Block 0xe0, offset 0x3800 + 0x3800: 0x000d, 0x3801: 0x000d, 0x3802: 0x000d, 0x3803: 0x000d, 0x3804: 0x000d, 0x3805: 0x000d, + 0x3806: 0x000d, 0x3807: 0x000d, 0x3808: 0x000d, 0x3809: 0x000d, 0x380a: 0x000d, 0x380b: 0x000d, + 0x380c: 0x000d, 0x380d: 0x000d, 0x380e: 0x000d, 0x380f: 0x000d, 0x3810: 0x0001, 0x3811: 0x0001, + 0x3812: 0x0001, 0x3813: 0x0001, 0x3814: 0x0001, 0x3815: 0x0001, 0x3816: 0x0001, 0x3817: 0x0001, + 0x3818: 0x0001, 0x3819: 0x0001, 0x381a: 0x0001, 0x381b: 0x0001, 0x381c: 0x0001, 0x381d: 0x0001, + 0x381e: 0x0001, 0x381f: 0x0001, 0x3820: 0x0001, 0x3821: 0x0001, 0x3822: 0x0001, 0x3823: 0x0001, + 0x3824: 0x0001, 0x3825: 0x0001, 0x3826: 0x0001, 0x3827: 0x0001, 0x3828: 0x0001, 0x3829: 0x0001, + 0x382a: 0x0001, 0x382b: 0x0001, 0x382c: 0x0001, 0x382d: 0x0001, 0x382e: 0x0001, 0x382f: 0x0001, + 0x3830: 0x0001, 0x3831: 0x0001, 0x3832: 0x0001, 0x3833: 0x0001, 0x3834: 0x0001, 0x3835: 0x0001, + 0x3836: 0x0001, 0x3837: 0x0001, 0x3838: 0x0001, 0x3839: 0x0001, 0x383a: 0x0001, 0x383b: 0x0001, + 0x383c: 0x0001, 0x383d: 0x0001, 0x383e: 0x0001, 0x383f: 0x0001, + // Block 0xe1, offset 0x3840 + 0x3840: 0x000d, 0x3841: 0x000d, 0x3842: 0x000d, 0x3843: 0x000d, 0x3844: 0x000d, 0x3845: 0x000d, + 0x3846: 0x000d, 0x3847: 0x000d, 0x3848: 0x000d, 0x3849: 0x000d, 0x384a: 0x000d, 0x384b: 0x000d, + 0x384c: 0x000d, 0x384d: 0x000d, 0x384e: 0x000d, 0x384f: 0x000d, 0x3850: 0x000d, 0x3851: 0x000d, + 0x3852: 0x000d, 0x3853: 0x000d, 0x3854: 0x000d, 0x3855: 0x000d, 0x3856: 0x000d, 0x3857: 0x000d, + 0x3858: 0x000d, 0x3859: 0x000d, 0x385a: 0x000d, 0x385b: 0x000d, 0x385c: 0x000d, 0x385d: 0x000d, + 0x385e: 0x000d, 0x385f: 0x000d, 0x3860: 0x000d, 0x3861: 0x000d, 0x3862: 0x000d, 0x3863: 0x000d, + 0x3864: 0x000d, 0x3865: 0x000d, 0x3866: 0x000d, 0x3867: 0x000d, 0x3868: 0x000d, 0x3869: 0x000d, + 0x386a: 0x000d, 0x386b: 0x000d, 0x386c: 0x000d, 0x386d: 0x000d, 0x386e: 0x000d, 0x386f: 0x000d, + 0x3870: 0x000a, 0x3871: 0x000a, 0x3872: 0x000d, 0x3873: 0x000d, 0x3874: 0x000d, 0x3875: 0x000d, + 0x3876: 0x000d, 0x3877: 0x000d, 0x3878: 0x000d, 0x3879: 0x000d, 0x387a: 0x000d, 0x387b: 0x000d, + 0x387c: 0x000d, 0x387d: 0x000d, 0x387e: 0x000d, 0x387f: 0x000d, + // Block 0xe2, offset 0x3880 + 0x3880: 0x000a, 0x3881: 0x000a, 0x3882: 0x000a, 0x3883: 0x000a, 0x3884: 0x000a, 0x3885: 0x000a, + 0x3886: 0x000a, 0x3887: 0x000a, 0x3888: 0x000a, 0x3889: 0x000a, 0x388a: 0x000a, 0x388b: 0x000a, + 0x388c: 0x000a, 0x388d: 0x000a, 0x388e: 0x000a, 0x388f: 0x000a, 0x3890: 0x000a, 0x3891: 0x000a, + 0x3892: 0x000a, 0x3893: 0x000a, 0x3894: 0x000a, 0x3895: 0x000a, 0x3896: 0x000a, 0x3897: 0x000a, + 0x3898: 0x000a, 0x3899: 0x000a, 0x389a: 0x000a, 0x389b: 0x000a, 0x389c: 0x000a, 0x389d: 0x000a, + 0x389e: 0x000a, 0x389f: 0x000a, 0x38a0: 0x000a, 0x38a1: 0x000a, 0x38a2: 0x000a, 0x38a3: 0x000a, + 0x38a4: 0x000a, 0x38a5: 0x000a, 0x38a6: 0x000a, 0x38a7: 0x000a, 0x38a8: 0x000a, 0x38a9: 0x000a, + 0x38aa: 0x000a, 0x38ab: 0x000a, + 0x38b0: 0x000a, 0x38b1: 0x000a, 0x38b2: 0x000a, 0x38b3: 0x000a, 0x38b4: 0x000a, 0x38b5: 0x000a, + 0x38b6: 0x000a, 0x38b7: 0x000a, 0x38b8: 0x000a, 0x38b9: 0x000a, 0x38ba: 0x000a, 0x38bb: 0x000a, + 0x38bc: 0x000a, 0x38bd: 0x000a, 0x38be: 0x000a, 0x38bf: 0x000a, + // Block 0xe3, offset 0x38c0 + 0x38c0: 0x000a, 0x38c1: 0x000a, 0x38c2: 0x000a, 0x38c3: 0x000a, 0x38c4: 0x000a, 0x38c5: 0x000a, + 0x38c6: 0x000a, 0x38c7: 0x000a, 0x38c8: 0x000a, 0x38c9: 0x000a, 0x38ca: 0x000a, 0x38cb: 0x000a, + 0x38cc: 0x000a, 0x38cd: 0x000a, 0x38ce: 0x000a, 0x38cf: 0x000a, 0x38d0: 0x000a, 0x38d1: 0x000a, + 0x38d2: 0x000a, 0x38d3: 0x000a, + 0x38e0: 0x000a, 0x38e1: 0x000a, 0x38e2: 0x000a, 0x38e3: 0x000a, + 0x38e4: 0x000a, 0x38e5: 0x000a, 0x38e6: 0x000a, 0x38e7: 0x000a, 0x38e8: 0x000a, 0x38e9: 0x000a, + 0x38ea: 0x000a, 0x38eb: 0x000a, 0x38ec: 0x000a, 0x38ed: 0x000a, 0x38ee: 0x000a, + 0x38f1: 0x000a, 0x38f2: 0x000a, 0x38f3: 0x000a, 0x38f4: 0x000a, 0x38f5: 0x000a, + 0x38f6: 0x000a, 0x38f7: 0x000a, 0x38f8: 0x000a, 0x38f9: 0x000a, 0x38fa: 0x000a, 0x38fb: 0x000a, + 0x38fc: 0x000a, 0x38fd: 0x000a, 0x38fe: 0x000a, 0x38ff: 0x000a, + // Block 0xe4, offset 0x3900 + 0x3901: 0x000a, 0x3902: 0x000a, 0x3903: 0x000a, 0x3904: 0x000a, 0x3905: 0x000a, + 0x3906: 0x000a, 0x3907: 0x000a, 0x3908: 0x000a, 0x3909: 0x000a, 0x390a: 0x000a, 0x390b: 0x000a, + 0x390c: 0x000a, 0x390d: 0x000a, 0x390e: 0x000a, 0x390f: 0x000a, 0x3911: 0x000a, + 0x3912: 0x000a, 0x3913: 0x000a, 0x3914: 0x000a, 0x3915: 0x000a, 0x3916: 0x000a, 0x3917: 0x000a, + 0x3918: 0x000a, 0x3919: 0x000a, 0x391a: 0x000a, 0x391b: 0x000a, 0x391c: 0x000a, 0x391d: 0x000a, + 0x391e: 0x000a, 0x391f: 0x000a, 0x3920: 0x000a, 0x3921: 0x000a, 0x3922: 0x000a, 0x3923: 0x000a, + 0x3924: 0x000a, 0x3925: 0x000a, 0x3926: 0x000a, 0x3927: 0x000a, 0x3928: 0x000a, 0x3929: 0x000a, + 0x392a: 0x000a, 0x392b: 0x000a, 0x392c: 0x000a, 0x392d: 0x000a, 0x392e: 0x000a, 0x392f: 0x000a, + 0x3930: 0x000a, 0x3931: 0x000a, 0x3932: 0x000a, 0x3933: 0x000a, 0x3934: 0x000a, 0x3935: 0x000a, + // Block 0xe5, offset 0x3940 + 0x3940: 0x0002, 0x3941: 0x0002, 0x3942: 0x0002, 0x3943: 0x0002, 0x3944: 0x0002, 0x3945: 0x0002, + 0x3946: 0x0002, 0x3947: 0x0002, 0x3948: 0x0002, 0x3949: 0x0002, 0x394a: 0x0002, 0x394b: 0x000a, + 0x394c: 0x000a, 0x394d: 0x000a, 0x394e: 0x000a, 0x394f: 0x000a, + 0x396f: 0x000a, + // Block 0xe6, offset 0x3980 + 0x39aa: 0x000a, 0x39ab: 0x000a, 0x39ac: 0x000a, 0x39ad: 0x000a, 0x39ae: 0x000a, 0x39af: 0x000a, + // Block 0xe7, offset 0x39c0 + 0x39ed: 0x000a, + // Block 0xe8, offset 0x3a00 + 0x3a20: 0x000a, 0x3a21: 0x000a, 0x3a22: 0x000a, 0x3a23: 0x000a, + 0x3a24: 0x000a, 0x3a25: 0x000a, + // Block 0xe9, offset 0x3a40 + 0x3a40: 0x000a, 0x3a41: 0x000a, 0x3a42: 0x000a, 0x3a43: 0x000a, 0x3a44: 0x000a, 0x3a45: 0x000a, + 0x3a46: 0x000a, 0x3a47: 0x000a, 0x3a48: 0x000a, 0x3a49: 0x000a, 0x3a4a: 0x000a, 0x3a4b: 0x000a, + 0x3a4c: 0x000a, 0x3a4d: 0x000a, 0x3a4e: 0x000a, 0x3a4f: 0x000a, 0x3a50: 0x000a, 0x3a51: 0x000a, + 0x3a52: 0x000a, 0x3a53: 0x000a, 0x3a54: 0x000a, 0x3a55: 0x000a, 0x3a56: 0x000a, 0x3a57: 0x000a, + 0x3a60: 0x000a, 0x3a61: 0x000a, 0x3a62: 0x000a, 0x3a63: 0x000a, + 0x3a64: 0x000a, 0x3a65: 0x000a, 0x3a66: 0x000a, 0x3a67: 0x000a, 0x3a68: 0x000a, 0x3a69: 0x000a, + 0x3a6a: 0x000a, 0x3a6b: 0x000a, 0x3a6c: 0x000a, + 0x3a70: 0x000a, 0x3a71: 0x000a, 0x3a72: 0x000a, 0x3a73: 0x000a, 0x3a74: 0x000a, 0x3a75: 0x000a, + 0x3a76: 0x000a, 0x3a77: 0x000a, 0x3a78: 0x000a, 0x3a79: 0x000a, 0x3a7a: 0x000a, 0x3a7b: 0x000a, + 0x3a7c: 0x000a, + // Block 0xea, offset 0x3a80 + 0x3a80: 0x000a, 0x3a81: 0x000a, 0x3a82: 0x000a, 0x3a83: 0x000a, 0x3a84: 0x000a, 0x3a85: 0x000a, + 0x3a86: 0x000a, 0x3a87: 0x000a, 0x3a88: 0x000a, 0x3a89: 0x000a, 0x3a8a: 0x000a, 0x3a8b: 0x000a, + 0x3a8c: 0x000a, 0x3a8d: 0x000a, 0x3a8e: 0x000a, 0x3a8f: 0x000a, 0x3a90: 0x000a, 0x3a91: 0x000a, + 0x3a92: 0x000a, 0x3a93: 0x000a, 0x3a94: 0x000a, 0x3a95: 0x000a, 0x3a96: 0x000a, 0x3a97: 0x000a, + 0x3a98: 0x000a, + 0x3aa0: 0x000a, 0x3aa1: 0x000a, 0x3aa2: 0x000a, 0x3aa3: 0x000a, + 0x3aa4: 0x000a, 0x3aa5: 0x000a, 0x3aa6: 0x000a, 0x3aa7: 0x000a, 0x3aa8: 0x000a, 0x3aa9: 0x000a, + 0x3aaa: 0x000a, 0x3aab: 0x000a, + // Block 0xeb, offset 0x3ac0 + 0x3ac0: 0x000a, 0x3ac1: 0x000a, 0x3ac2: 0x000a, 0x3ac3: 0x000a, 0x3ac4: 0x000a, 0x3ac5: 0x000a, + 0x3ac6: 0x000a, 0x3ac7: 0x000a, 0x3ac8: 0x000a, 0x3ac9: 0x000a, 0x3aca: 0x000a, 0x3acb: 0x000a, + 0x3ad0: 0x000a, 0x3ad1: 0x000a, + 0x3ad2: 0x000a, 0x3ad3: 0x000a, 0x3ad4: 0x000a, 0x3ad5: 0x000a, 0x3ad6: 0x000a, 0x3ad7: 0x000a, + 0x3ad8: 0x000a, 0x3ad9: 0x000a, 0x3ada: 0x000a, 0x3adb: 0x000a, 0x3adc: 0x000a, 0x3add: 0x000a, + 0x3ade: 0x000a, 0x3adf: 0x000a, 0x3ae0: 0x000a, 0x3ae1: 0x000a, 0x3ae2: 0x000a, 0x3ae3: 0x000a, + 0x3ae4: 0x000a, 0x3ae5: 0x000a, 0x3ae6: 0x000a, 0x3ae7: 0x000a, 0x3ae8: 0x000a, 0x3ae9: 0x000a, + 0x3aea: 0x000a, 0x3aeb: 0x000a, 0x3aec: 0x000a, 0x3aed: 0x000a, 0x3aee: 0x000a, 0x3aef: 0x000a, + 0x3af0: 0x000a, 0x3af1: 0x000a, 0x3af2: 0x000a, 0x3af3: 0x000a, 0x3af4: 0x000a, 0x3af5: 0x000a, + 0x3af6: 0x000a, 0x3af7: 0x000a, 0x3af8: 0x000a, 0x3af9: 0x000a, 0x3afa: 0x000a, 0x3afb: 0x000a, + 0x3afc: 0x000a, 0x3afd: 0x000a, 0x3afe: 0x000a, 0x3aff: 0x000a, + // Block 0xec, offset 0x3b00 + 0x3b00: 0x000a, 0x3b01: 0x000a, 0x3b02: 0x000a, 0x3b03: 0x000a, 0x3b04: 0x000a, 0x3b05: 0x000a, + 0x3b06: 0x000a, 0x3b07: 0x000a, + 0x3b10: 0x000a, 0x3b11: 0x000a, + 0x3b12: 0x000a, 0x3b13: 0x000a, 0x3b14: 0x000a, 0x3b15: 0x000a, 0x3b16: 0x000a, 0x3b17: 0x000a, + 0x3b18: 0x000a, 0x3b19: 0x000a, + 0x3b20: 0x000a, 0x3b21: 0x000a, 0x3b22: 0x000a, 0x3b23: 0x000a, + 0x3b24: 0x000a, 0x3b25: 0x000a, 0x3b26: 0x000a, 0x3b27: 0x000a, 0x3b28: 0x000a, 0x3b29: 0x000a, + 0x3b2a: 0x000a, 0x3b2b: 0x000a, 0x3b2c: 0x000a, 0x3b2d: 0x000a, 0x3b2e: 0x000a, 0x3b2f: 0x000a, + 0x3b30: 0x000a, 0x3b31: 0x000a, 0x3b32: 0x000a, 0x3b33: 0x000a, 0x3b34: 0x000a, 0x3b35: 0x000a, + 0x3b36: 0x000a, 0x3b37: 0x000a, 0x3b38: 0x000a, 0x3b39: 0x000a, 0x3b3a: 0x000a, 0x3b3b: 0x000a, + 0x3b3c: 0x000a, 0x3b3d: 0x000a, 0x3b3e: 0x000a, 0x3b3f: 0x000a, + // Block 0xed, offset 0x3b40 + 0x3b40: 0x000a, 0x3b41: 0x000a, 0x3b42: 0x000a, 0x3b43: 0x000a, 0x3b44: 0x000a, 0x3b45: 0x000a, + 0x3b46: 0x000a, 0x3b47: 0x000a, + 0x3b50: 0x000a, 0x3b51: 0x000a, + 0x3b52: 0x000a, 0x3b53: 0x000a, 0x3b54: 0x000a, 0x3b55: 0x000a, 0x3b56: 0x000a, 0x3b57: 0x000a, + 0x3b58: 0x000a, 0x3b59: 0x000a, 0x3b5a: 0x000a, 0x3b5b: 0x000a, 0x3b5c: 0x000a, 0x3b5d: 0x000a, + 0x3b5e: 0x000a, 0x3b5f: 0x000a, 0x3b60: 0x000a, 0x3b61: 0x000a, 0x3b62: 0x000a, 0x3b63: 0x000a, + 0x3b64: 0x000a, 0x3b65: 0x000a, 0x3b66: 0x000a, 0x3b67: 0x000a, 0x3b68: 0x000a, 0x3b69: 0x000a, + 0x3b6a: 0x000a, 0x3b6b: 0x000a, 0x3b6c: 0x000a, 0x3b6d: 0x000a, + 0x3b70: 0x000a, 0x3b71: 0x000a, + // Block 0xee, offset 0x3b80 + 0x3b80: 0x000a, 0x3b81: 0x000a, 0x3b82: 0x000a, 0x3b83: 0x000a, 0x3b84: 0x000a, 0x3b85: 0x000a, + 0x3b86: 0x000a, 0x3b87: 0x000a, 0x3b88: 0x000a, 0x3b89: 0x000a, 0x3b8a: 0x000a, 0x3b8b: 0x000a, + 0x3b8c: 0x000a, 0x3b8d: 0x000a, 0x3b8e: 0x000a, 0x3b8f: 0x000a, 0x3b90: 0x000a, 0x3b91: 0x000a, + 0x3b92: 0x000a, 0x3b93: 0x000a, 0x3b94: 0x000a, 0x3b95: 0x000a, 0x3b96: 0x000a, 0x3b97: 0x000a, + 0x3b98: 0x000a, 0x3b99: 0x000a, 0x3b9a: 0x000a, 0x3b9b: 0x000a, 0x3b9c: 0x000a, 0x3b9d: 0x000a, + 0x3b9e: 0x000a, 0x3b9f: 0x000a, 0x3ba0: 0x000a, 0x3ba1: 0x000a, 0x3ba2: 0x000a, 0x3ba3: 0x000a, + 0x3ba4: 0x000a, 0x3ba5: 0x000a, 0x3ba6: 0x000a, 0x3ba7: 0x000a, 0x3ba8: 0x000a, 0x3ba9: 0x000a, + 0x3baa: 0x000a, 0x3bab: 0x000a, 0x3bac: 0x000a, 0x3bad: 0x000a, 0x3bae: 0x000a, 0x3baf: 0x000a, + 0x3bb0: 0x000a, 0x3bb1: 0x000a, 0x3bb2: 0x000a, 0x3bb3: 0x000a, 0x3bb4: 0x000a, 0x3bb5: 0x000a, + 0x3bb6: 0x000a, 0x3bb7: 0x000a, 0x3bb8: 0x000a, 0x3bba: 0x000a, 0x3bbb: 0x000a, + 0x3bbc: 0x000a, 0x3bbd: 0x000a, 0x3bbe: 0x000a, 0x3bbf: 0x000a, + // Block 0xef, offset 0x3bc0 + 0x3bc0: 0x000a, 0x3bc1: 0x000a, 0x3bc2: 0x000a, 0x3bc3: 0x000a, 0x3bc4: 0x000a, 0x3bc5: 0x000a, + 0x3bc6: 0x000a, 0x3bc7: 0x000a, 0x3bc8: 0x000a, 0x3bc9: 0x000a, 0x3bca: 0x000a, 0x3bcb: 0x000a, + 0x3bcd: 0x000a, 0x3bce: 0x000a, 0x3bcf: 0x000a, 0x3bd0: 0x000a, 0x3bd1: 0x000a, + 0x3bd2: 0x000a, 0x3bd3: 0x000a, 0x3bd4: 0x000a, 0x3bd5: 0x000a, 0x3bd6: 0x000a, 0x3bd7: 0x000a, + 0x3bd8: 0x000a, 0x3bd9: 0x000a, 0x3bda: 0x000a, 0x3bdb: 0x000a, 0x3bdc: 0x000a, 0x3bdd: 0x000a, + 0x3bde: 0x000a, 0x3bdf: 0x000a, 0x3be0: 0x000a, 0x3be1: 0x000a, 0x3be2: 0x000a, 0x3be3: 0x000a, + 0x3be4: 0x000a, 0x3be5: 0x000a, 0x3be6: 0x000a, 0x3be7: 0x000a, 0x3be8: 0x000a, 0x3be9: 0x000a, + 0x3bea: 0x000a, 0x3beb: 0x000a, 0x3bec: 0x000a, 0x3bed: 0x000a, 0x3bee: 0x000a, 0x3bef: 0x000a, + 0x3bf0: 0x000a, 0x3bf1: 0x000a, 0x3bf2: 0x000a, 0x3bf3: 0x000a, 0x3bf4: 0x000a, 0x3bf5: 0x000a, + 0x3bf6: 0x000a, 0x3bf7: 0x000a, 0x3bf8: 0x000a, 0x3bf9: 0x000a, 0x3bfa: 0x000a, 0x3bfb: 0x000a, + 0x3bfc: 0x000a, 0x3bfd: 0x000a, 0x3bfe: 0x000a, 0x3bff: 0x000a, + // Block 0xf0, offset 0x3c00 + 0x3c00: 0x000a, 0x3c01: 0x000a, 0x3c02: 0x000a, 0x3c03: 0x000a, 0x3c04: 0x000a, 0x3c05: 0x000a, + 0x3c06: 0x000a, 0x3c07: 0x000a, 0x3c08: 0x000a, 0x3c09: 0x000a, 0x3c0a: 0x000a, 0x3c0b: 0x000a, + 0x3c0c: 0x000a, 0x3c0d: 0x000a, 0x3c0e: 0x000a, 0x3c0f: 0x000a, 0x3c10: 0x000a, 0x3c11: 0x000a, + 0x3c12: 0x000a, 0x3c13: 0x000a, + 0x3c20: 0x000a, 0x3c21: 0x000a, 0x3c22: 0x000a, 0x3c23: 0x000a, + 0x3c24: 0x000a, 0x3c25: 0x000a, 0x3c26: 0x000a, 0x3c27: 0x000a, 0x3c28: 0x000a, 0x3c29: 0x000a, + 0x3c2a: 0x000a, 0x3c2b: 0x000a, 0x3c2c: 0x000a, 0x3c2d: 0x000a, + 0x3c30: 0x000a, 0x3c31: 0x000a, 0x3c32: 0x000a, 0x3c33: 0x000a, 0x3c34: 0x000a, + 0x3c38: 0x000a, 0x3c39: 0x000a, 0x3c3a: 0x000a, + // Block 0xf1, offset 0x3c40 + 0x3c40: 0x000a, 0x3c41: 0x000a, 0x3c42: 0x000a, 0x3c43: 0x000a, 0x3c44: 0x000a, 0x3c45: 0x000a, + 0x3c46: 0x000a, + 0x3c50: 0x000a, 0x3c51: 0x000a, + 0x3c52: 0x000a, 0x3c53: 0x000a, 0x3c54: 0x000a, 0x3c55: 0x000a, 0x3c56: 0x000a, 0x3c57: 0x000a, + 0x3c58: 0x000a, 0x3c59: 0x000a, 0x3c5a: 0x000a, 0x3c5b: 0x000a, 0x3c5c: 0x000a, 0x3c5d: 0x000a, + 0x3c5e: 0x000a, 0x3c5f: 0x000a, 0x3c60: 0x000a, 0x3c61: 0x000a, 0x3c62: 0x000a, 0x3c63: 0x000a, + 0x3c64: 0x000a, 0x3c65: 0x000a, 0x3c66: 0x000a, 0x3c67: 0x000a, 0x3c68: 0x000a, + 0x3c70: 0x000a, 0x3c71: 0x000a, 0x3c72: 0x000a, 0x3c73: 0x000a, 0x3c74: 0x000a, 0x3c75: 0x000a, + 0x3c76: 0x000a, + // Block 0xf2, offset 0x3c80 + 0x3c80: 0x000a, 0x3c81: 0x000a, 0x3c82: 0x000a, + 0x3c90: 0x000a, 0x3c91: 0x000a, + 0x3c92: 0x000a, 0x3c93: 0x000a, 0x3c94: 0x000a, 0x3c95: 0x000a, 0x3c96: 0x000a, + // Block 0xf3, offset 0x3cc0 + 0x3cc0: 0x000a, 0x3cc1: 0x000a, 0x3cc2: 0x000a, 0x3cc3: 0x000a, 0x3cc4: 0x000a, 0x3cc5: 0x000a, + 0x3cc6: 0x000a, 0x3cc7: 0x000a, 0x3cc8: 0x000a, 0x3cc9: 0x000a, 0x3cca: 0x000a, 0x3ccb: 0x000a, + 0x3ccc: 0x000a, 0x3ccd: 0x000a, 0x3cce: 0x000a, 0x3ccf: 0x000a, 0x3cd0: 0x000a, 0x3cd1: 0x000a, + 0x3cd2: 0x000a, 0x3cd4: 0x000a, 0x3cd5: 0x000a, 0x3cd6: 0x000a, 0x3cd7: 0x000a, + 0x3cd8: 0x000a, 0x3cd9: 0x000a, 0x3cda: 0x000a, 0x3cdb: 0x000a, 0x3cdc: 0x000a, 0x3cdd: 0x000a, + 0x3cde: 0x000a, 0x3cdf: 0x000a, 0x3ce0: 0x000a, 0x3ce1: 0x000a, 0x3ce2: 0x000a, 0x3ce3: 0x000a, + 0x3ce4: 0x000a, 0x3ce5: 0x000a, 0x3ce6: 0x000a, 0x3ce7: 0x000a, 0x3ce8: 0x000a, 0x3ce9: 0x000a, + 0x3cea: 0x000a, 0x3ceb: 0x000a, 0x3cec: 0x000a, 0x3ced: 0x000a, 0x3cee: 0x000a, 0x3cef: 0x000a, + 0x3cf0: 0x000a, 0x3cf1: 0x000a, 0x3cf2: 0x000a, 0x3cf3: 0x000a, 0x3cf4: 0x000a, 0x3cf5: 0x000a, + 0x3cf6: 0x000a, 0x3cf7: 0x000a, 0x3cf8: 0x000a, 0x3cf9: 0x000a, 0x3cfa: 0x000a, 0x3cfb: 0x000a, + 0x3cfc: 0x000a, 0x3cfd: 0x000a, 0x3cfe: 0x000a, 0x3cff: 0x000a, + // Block 0xf4, offset 0x3d00 + 0x3d00: 0x000a, 0x3d01: 0x000a, 0x3d02: 0x000a, 0x3d03: 0x000a, 0x3d04: 0x000a, 0x3d05: 0x000a, + 0x3d06: 0x000a, 0x3d07: 0x000a, 0x3d08: 0x000a, 0x3d09: 0x000a, 0x3d0a: 0x000a, + 0x3d30: 0x0002, 0x3d31: 0x0002, 0x3d32: 0x0002, 0x3d33: 0x0002, 0x3d34: 0x0002, 0x3d35: 0x0002, + 0x3d36: 0x0002, 0x3d37: 0x0002, 0x3d38: 0x0002, 0x3d39: 0x0002, + // Block 0xf5, offset 0x3d40 + 0x3d7e: 0x000b, 0x3d7f: 0x000b, + // Block 0xf6, offset 0x3d80 + 0x3d80: 0x000b, 0x3d81: 0x000b, 0x3d82: 0x000b, 0x3d83: 0x000b, 0x3d84: 0x000b, 0x3d85: 0x000b, + 0x3d86: 0x000b, 0x3d87: 0x000b, 0x3d88: 0x000b, 0x3d89: 0x000b, 0x3d8a: 0x000b, 0x3d8b: 0x000b, + 0x3d8c: 0x000b, 0x3d8d: 0x000b, 0x3d8e: 0x000b, 0x3d8f: 0x000b, 0x3d90: 0x000b, 0x3d91: 0x000b, + 0x3d92: 0x000b, 0x3d93: 0x000b, 0x3d94: 0x000b, 0x3d95: 0x000b, 0x3d96: 0x000b, 0x3d97: 0x000b, + 0x3d98: 0x000b, 0x3d99: 0x000b, 0x3d9a: 0x000b, 0x3d9b: 0x000b, 0x3d9c: 0x000b, 0x3d9d: 0x000b, + 0x3d9e: 0x000b, 0x3d9f: 0x000b, 0x3da0: 0x000b, 0x3da1: 0x000b, 0x3da2: 0x000b, 0x3da3: 0x000b, + 0x3da4: 0x000b, 0x3da5: 0x000b, 0x3da6: 0x000b, 0x3da7: 0x000b, 0x3da8: 0x000b, 0x3da9: 0x000b, + 0x3daa: 0x000b, 0x3dab: 0x000b, 0x3dac: 0x000b, 0x3dad: 0x000b, 0x3dae: 0x000b, 0x3daf: 0x000b, + 0x3db0: 0x000b, 0x3db1: 0x000b, 0x3db2: 0x000b, 0x3db3: 0x000b, 0x3db4: 0x000b, 0x3db5: 0x000b, + 0x3db6: 0x000b, 0x3db7: 0x000b, 0x3db8: 0x000b, 0x3db9: 0x000b, 0x3dba: 0x000b, 0x3dbb: 0x000b, + 0x3dbc: 0x000b, 0x3dbd: 0x000b, 0x3dbe: 0x000b, 0x3dbf: 0x000b, + // Block 0xf7, offset 0x3dc0 + 0x3dc0: 0x000c, 0x3dc1: 0x000c, 0x3dc2: 0x000c, 0x3dc3: 0x000c, 0x3dc4: 0x000c, 0x3dc5: 0x000c, + 0x3dc6: 0x000c, 0x3dc7: 0x000c, 0x3dc8: 0x000c, 0x3dc9: 0x000c, 0x3dca: 0x000c, 0x3dcb: 0x000c, + 0x3dcc: 0x000c, 0x3dcd: 0x000c, 0x3dce: 0x000c, 0x3dcf: 0x000c, 0x3dd0: 0x000c, 0x3dd1: 0x000c, + 0x3dd2: 0x000c, 0x3dd3: 0x000c, 0x3dd4: 0x000c, 0x3dd5: 0x000c, 0x3dd6: 0x000c, 0x3dd7: 0x000c, + 0x3dd8: 0x000c, 0x3dd9: 0x000c, 0x3dda: 0x000c, 0x3ddb: 0x000c, 0x3ddc: 0x000c, 0x3ddd: 0x000c, + 0x3dde: 0x000c, 0x3ddf: 0x000c, 0x3de0: 0x000c, 0x3de1: 0x000c, 0x3de2: 0x000c, 0x3de3: 0x000c, + 0x3de4: 0x000c, 0x3de5: 0x000c, 0x3de6: 0x000c, 0x3de7: 0x000c, 0x3de8: 0x000c, 0x3de9: 0x000c, + 0x3dea: 0x000c, 0x3deb: 0x000c, 0x3dec: 0x000c, 0x3ded: 0x000c, 0x3dee: 0x000c, 0x3def: 0x000c, + 0x3df0: 0x000b, 0x3df1: 0x000b, 0x3df2: 0x000b, 0x3df3: 0x000b, 0x3df4: 0x000b, 0x3df5: 0x000b, + 0x3df6: 0x000b, 0x3df7: 0x000b, 0x3df8: 0x000b, 0x3df9: 0x000b, 0x3dfa: 0x000b, 0x3dfb: 0x000b, + 0x3dfc: 0x000b, 0x3dfd: 0x000b, 0x3dfe: 0x000b, 0x3dff: 0x000b, +} + +// bidiIndex: 24 blocks, 1536 entries, 1536 bytes +// Block 0 is the zero block. +var bidiIndex = [1536]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, + 0xca: 0x03, 0xcb: 0x04, 0xcc: 0x05, 0xcd: 0x06, 0xce: 0x07, 0xcf: 0x08, + 0xd2: 0x09, 0xd6: 0x0a, 0xd7: 0x0b, + 0xd8: 0x0c, 0xd9: 0x0d, 0xda: 0x0e, 0xdb: 0x0f, 0xdc: 0x10, 0xdd: 0x11, 0xde: 0x12, 0xdf: 0x13, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, + 0xea: 0x07, 0xef: 0x08, + 0xf0: 0x11, 0xf1: 0x12, 0xf2: 0x12, 0xf3: 0x14, 0xf4: 0x15, + // Block 0x4, offset 0x100 + 0x120: 0x14, 0x121: 0x15, 0x122: 0x16, 0x123: 0x17, 0x124: 0x18, 0x125: 0x19, 0x126: 0x1a, 0x127: 0x1b, + 0x128: 0x1c, 0x129: 0x1d, 0x12a: 0x1c, 0x12b: 0x1e, 0x12c: 0x1f, 0x12d: 0x20, 0x12e: 0x21, 0x12f: 0x22, + 0x130: 0x23, 0x131: 0x24, 0x132: 0x1a, 0x133: 0x25, 0x134: 0x26, 0x135: 0x27, 0x136: 0x28, 0x137: 0x29, + 0x138: 0x2a, 0x139: 0x2b, 0x13a: 0x2c, 0x13b: 0x2d, 0x13c: 0x2e, 0x13d: 0x2f, 0x13e: 0x30, 0x13f: 0x31, + // Block 0x5, offset 0x140 + 0x140: 0x32, 0x141: 0x33, 0x142: 0x34, + 0x14d: 0x35, 0x14e: 0x36, + 0x150: 0x37, + 0x15a: 0x38, 0x15c: 0x39, 0x15d: 0x3a, 0x15e: 0x3b, 0x15f: 0x3c, + 0x160: 0x3d, 0x162: 0x3e, 0x164: 0x3f, 0x165: 0x40, 0x167: 0x41, + 0x168: 0x42, 0x169: 0x43, 0x16a: 0x44, 0x16b: 0x45, 0x16c: 0x46, 0x16d: 0x47, 0x16e: 0x48, 0x16f: 0x49, + 0x170: 0x4a, 0x173: 0x4b, 0x177: 0x4c, + 0x17e: 0x4d, 0x17f: 0x4e, + // Block 0x6, offset 0x180 + 0x180: 0x4f, 0x181: 0x50, 0x182: 0x51, 0x183: 0x52, 0x184: 0x53, 0x185: 0x54, 0x186: 0x55, 0x187: 0x56, + 0x188: 0x57, 0x189: 0x56, 0x18a: 0x56, 0x18b: 0x56, 0x18c: 0x58, 0x18d: 0x59, 0x18e: 0x5a, 0x18f: 0x56, + 0x190: 0x5b, 0x191: 0x5c, 0x192: 0x5d, 0x193: 0x5e, 0x194: 0x56, 0x195: 0x56, 0x196: 0x56, 0x197: 0x56, + 0x198: 0x56, 0x199: 0x56, 0x19a: 0x5f, 0x19b: 0x56, 0x19c: 0x56, 0x19d: 0x60, 0x19e: 0x56, 0x19f: 0x61, + 0x1a4: 0x56, 0x1a5: 0x56, 0x1a6: 0x62, 0x1a7: 0x63, + 0x1a8: 0x56, 0x1a9: 0x56, 0x1aa: 0x56, 0x1ab: 0x56, 0x1ac: 0x56, 0x1ad: 0x64, 0x1ae: 0x65, 0x1af: 0x56, + 0x1b3: 0x66, 0x1b5: 0x67, 0x1b7: 0x68, + 0x1b8: 0x69, 0x1b9: 0x6a, 0x1ba: 0x6b, 0x1bb: 0x6c, 0x1bc: 0x56, 0x1bd: 0x56, 0x1be: 0x56, 0x1bf: 0x6d, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x6e, 0x1c2: 0x6f, 0x1c3: 0x70, 0x1c7: 0x71, + 0x1c8: 0x72, 0x1c9: 0x73, 0x1ca: 0x74, 0x1cb: 0x75, 0x1cd: 0x76, 0x1cf: 0x77, + // Block 0x8, offset 0x200 + 0x237: 0x56, + // Block 0x9, offset 0x240 + 0x252: 0x78, 0x253: 0x79, + 0x258: 0x7a, 0x259: 0x7b, 0x25a: 0x7c, 0x25b: 0x7d, 0x25c: 0x7e, 0x25e: 0x7f, + 0x260: 0x80, 0x261: 0x81, 0x263: 0x82, 0x264: 0x83, 0x265: 0x84, 0x266: 0x85, 0x267: 0x86, + 0x268: 0x87, 0x269: 0x88, 0x26a: 0x89, 0x26b: 0x8a, 0x26d: 0x8b, 0x26f: 0x8c, + // Block 0xa, offset 0x280 + 0x2ac: 0x8d, 0x2ad: 0x8e, 0x2ae: 0x0e, 0x2af: 0x0e, + 0x2b0: 0x0e, 0x2b1: 0x0e, 0x2b2: 0x0e, 0x2b3: 0x0e, 0x2b4: 0x8f, 0x2b5: 0x0e, 0x2b6: 0x0e, 0x2b7: 0x90, + 0x2b8: 0x91, 0x2b9: 0x92, 0x2ba: 0x0e, 0x2bb: 0x93, 0x2bc: 0x94, 0x2bd: 0x95, 0x2bf: 0x96, + // Block 0xb, offset 0x2c0 + 0x2c4: 0x97, 0x2c5: 0x56, 0x2c6: 0x98, 0x2c7: 0x99, + 0x2cb: 0x9a, 0x2cd: 0x9b, + 0x2e0: 0x9c, 0x2e1: 0x9c, 0x2e2: 0x9c, 0x2e3: 0x9c, 0x2e4: 0x9d, 0x2e5: 0x9c, 0x2e6: 0x9c, 0x2e7: 0x9c, + 0x2e8: 0x9e, 0x2e9: 0x9c, 0x2ea: 0x9c, 0x2eb: 0x9f, 0x2ec: 0xa0, 0x2ed: 0x9c, 0x2ee: 0x9c, 0x2ef: 0x9c, + 0x2f0: 0x9c, 0x2f1: 0x9c, 0x2f2: 0x9c, 0x2f3: 0x9c, 0x2f4: 0xa1, 0x2f5: 0x9c, 0x2f6: 0x9c, 0x2f7: 0x9c, + 0x2f8: 0x9c, 0x2f9: 0xa2, 0x2fa: 0xa3, 0x2fb: 0x9c, 0x2fc: 0xa4, 0x2fd: 0xa5, 0x2fe: 0x9c, 0x2ff: 0x9c, + // Block 0xc, offset 0x300 + 0x300: 0xa6, 0x301: 0xa7, 0x302: 0xa8, 0x304: 0xa9, 0x305: 0xaa, 0x306: 0xab, 0x307: 0xac, + 0x308: 0xad, 0x30b: 0xae, 0x30c: 0x26, 0x30d: 0xaf, + 0x310: 0xb0, 0x311: 0xb1, 0x312: 0xb2, 0x313: 0xb3, 0x316: 0xb4, 0x317: 0xb5, + 0x318: 0xb6, 0x319: 0xb7, 0x31a: 0xb8, 0x31c: 0xb9, + 0x320: 0xba, 0x324: 0xbb, 0x325: 0xbc, 0x327: 0xbd, + 0x328: 0xbe, 0x329: 0xbf, 0x32a: 0xc0, + 0x330: 0xc1, 0x332: 0xc2, 0x334: 0xc3, 0x335: 0xc4, 0x336: 0xc5, + 0x33b: 0xc6, 0x33f: 0xc7, + // Block 0xd, offset 0x340 + 0x36b: 0xc8, 0x36c: 0xc9, + 0x37d: 0xca, 0x37e: 0xcb, 0x37f: 0xcc, + // Block 0xe, offset 0x380 + 0x3b2: 0xcd, + // Block 0xf, offset 0x3c0 + 0x3c5: 0xce, 0x3c6: 0xcf, + 0x3c8: 0x56, 0x3c9: 0xd0, 0x3cc: 0x56, 0x3cd: 0xd1, + 0x3db: 0xd2, 0x3dc: 0xd3, 0x3dd: 0xd4, 0x3de: 0xd5, 0x3df: 0xd6, + 0x3e8: 0xd7, 0x3e9: 0xd8, 0x3ea: 0xd9, + // Block 0x10, offset 0x400 + 0x400: 0xda, 0x404: 0xc9, + 0x40b: 0xdb, + 0x420: 0x9c, 0x421: 0x9c, 0x422: 0x9c, 0x423: 0xdc, 0x424: 0x9c, 0x425: 0xdd, 0x426: 0x9c, 0x427: 0x9c, + 0x428: 0x9c, 0x429: 0x9c, 0x42a: 0x9c, 0x42b: 0x9c, 0x42c: 0x9c, 0x42d: 0x9c, 0x42e: 0x9c, 0x42f: 0x9c, + 0x430: 0x9c, 0x431: 0xa4, 0x432: 0x0e, 0x433: 0x9c, 0x434: 0x0e, 0x435: 0xde, 0x436: 0x9c, 0x437: 0x9c, + 0x438: 0x0e, 0x439: 0x0e, 0x43a: 0x0e, 0x43b: 0xdf, 0x43c: 0x9c, 0x43d: 0x9c, 0x43e: 0x9c, 0x43f: 0x9c, + // Block 0x11, offset 0x440 + 0x440: 0xe0, 0x441: 0x56, 0x442: 0xe1, 0x443: 0xe2, 0x444: 0xe3, 0x445: 0xe4, 0x446: 0xe5, + 0x449: 0xe6, 0x44c: 0x56, 0x44d: 0x56, 0x44e: 0x56, 0x44f: 0x56, + 0x450: 0x56, 0x451: 0x56, 0x452: 0x56, 0x453: 0x56, 0x454: 0x56, 0x455: 0x56, 0x456: 0x56, 0x457: 0x56, + 0x458: 0x56, 0x459: 0x56, 0x45a: 0x56, 0x45b: 0xe7, 0x45c: 0x56, 0x45d: 0x6c, 0x45e: 0x56, 0x45f: 0xe8, + 0x460: 0xe9, 0x461: 0xea, 0x462: 0xeb, 0x464: 0x56, 0x465: 0xec, 0x466: 0x56, 0x467: 0xed, + 0x468: 0x56, 0x469: 0xee, 0x46a: 0xef, 0x46b: 0xf0, 0x46c: 0x56, 0x46d: 0x56, 0x46e: 0xf1, 0x46f: 0xf2, + 0x47f: 0xf3, + // Block 0x12, offset 0x480 + 0x4bf: 0xf3, + // Block 0x13, offset 0x4c0 + 0x4d0: 0x09, 0x4d1: 0x0a, 0x4d6: 0x0b, + 0x4db: 0x0c, 0x4dd: 0x0d, 0x4de: 0x0e, 0x4df: 0x0f, + 0x4ef: 0x10, + 0x4ff: 0x10, + // Block 0x14, offset 0x500 + 0x50f: 0x10, + 0x51f: 0x10, + 0x52f: 0x10, + 0x53f: 0x10, + // Block 0x15, offset 0x540 + 0x540: 0xf4, 0x541: 0xf4, 0x542: 0xf4, 0x543: 0xf4, 0x544: 0x05, 0x545: 0x05, 0x546: 0x05, 0x547: 0xf5, + 0x548: 0xf4, 0x549: 0xf4, 0x54a: 0xf4, 0x54b: 0xf4, 0x54c: 0xf4, 0x54d: 0xf4, 0x54e: 0xf4, 0x54f: 0xf4, + 0x550: 0xf4, 0x551: 0xf4, 0x552: 0xf4, 0x553: 0xf4, 0x554: 0xf4, 0x555: 0xf4, 0x556: 0xf4, 0x557: 0xf4, + 0x558: 0xf4, 0x559: 0xf4, 0x55a: 0xf4, 0x55b: 0xf4, 0x55c: 0xf4, 0x55d: 0xf4, 0x55e: 0xf4, 0x55f: 0xf4, + 0x560: 0xf4, 0x561: 0xf4, 0x562: 0xf4, 0x563: 0xf4, 0x564: 0xf4, 0x565: 0xf4, 0x566: 0xf4, 0x567: 0xf4, + 0x568: 0xf4, 0x569: 0xf4, 0x56a: 0xf4, 0x56b: 0xf4, 0x56c: 0xf4, 0x56d: 0xf4, 0x56e: 0xf4, 0x56f: 0xf4, + 0x570: 0xf4, 0x571: 0xf4, 0x572: 0xf4, 0x573: 0xf4, 0x574: 0xf4, 0x575: 0xf4, 0x576: 0xf4, 0x577: 0xf4, + 0x578: 0xf4, 0x579: 0xf4, 0x57a: 0xf4, 0x57b: 0xf4, 0x57c: 0xf4, 0x57d: 0xf4, 0x57e: 0xf4, 0x57f: 0xf4, + // Block 0x16, offset 0x580 + 0x58f: 0x10, + 0x59f: 0x10, + 0x5a0: 0x13, + 0x5af: 0x10, + 0x5bf: 0x10, + // Block 0x17, offset 0x5c0 + 0x5cf: 0x10, +} + +// Total table size 17464 bytes (17KiB); checksum: F50EF68C diff --git a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go index 10f5202c6..7e1ae096e 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.14 +// +build go1.14,!go1.16 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go new file mode 100644 index 000000000..9ea1b4214 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go @@ -0,0 +1,7760 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.16 + +package norm + +import "sync" + +const ( + // Version is the Unicode edition from which the tables are derived. + Version = "13.0.0" + + // MaxTransformChunkSize indicates the maximum number of bytes that Transform + // may need to write atomically for any Form. Making a destination buffer at + // least this size ensures that Transform can always make progress and that + // the user does not need to grow the buffer on an ErrShortDst. + MaxTransformChunkSize = 35 + maxNonStarters*4 +) + +var ccc = [56]uint8{ + 0, 1, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, + 36, 84, 91, 103, 107, 118, 122, 129, + 130, 132, 202, 214, 216, 218, 220, 222, + 224, 226, 228, 230, 232, 233, 234, 240, +} + +const ( + firstMulti = 0x1870 + firstCCC = 0x2CAB + endMulti = 0x2F77 + firstLeadingCCC = 0x49C5 + firstCCCZeroExcept = 0x4A8F + firstStarterWithNLead = 0x4AB6 + lastDecomp = 0x4AB8 + maxDecomp = 0x8000 +) + +// decomps: 19128 bytes +var decomps = [...]byte{ + // Bytes 0 - 3f + 0x00, 0x41, 0x20, 0x41, 0x21, 0x41, 0x22, 0x41, + 0x23, 0x41, 0x24, 0x41, 0x25, 0x41, 0x26, 0x41, + 0x27, 0x41, 0x28, 0x41, 0x29, 0x41, 0x2A, 0x41, + 0x2B, 0x41, 0x2C, 0x41, 0x2D, 0x41, 0x2E, 0x41, + 0x2F, 0x41, 0x30, 0x41, 0x31, 0x41, 0x32, 0x41, + 0x33, 0x41, 0x34, 0x41, 0x35, 0x41, 0x36, 0x41, + 0x37, 0x41, 0x38, 0x41, 0x39, 0x41, 0x3A, 0x41, + 0x3B, 0x41, 0x3C, 0x41, 0x3D, 0x41, 0x3E, 0x41, + // Bytes 40 - 7f + 0x3F, 0x41, 0x40, 0x41, 0x41, 0x41, 0x42, 0x41, + 0x43, 0x41, 0x44, 0x41, 0x45, 0x41, 0x46, 0x41, + 0x47, 0x41, 0x48, 0x41, 0x49, 0x41, 0x4A, 0x41, + 0x4B, 0x41, 0x4C, 0x41, 0x4D, 0x41, 0x4E, 0x41, + 0x4F, 0x41, 0x50, 0x41, 0x51, 0x41, 0x52, 0x41, + 0x53, 0x41, 0x54, 0x41, 0x55, 0x41, 0x56, 0x41, + 0x57, 0x41, 0x58, 0x41, 0x59, 0x41, 0x5A, 0x41, + 0x5B, 0x41, 0x5C, 0x41, 0x5D, 0x41, 0x5E, 0x41, + // Bytes 80 - bf + 0x5F, 0x41, 0x60, 0x41, 0x61, 0x41, 0x62, 0x41, + 0x63, 0x41, 0x64, 0x41, 0x65, 0x41, 0x66, 0x41, + 0x67, 0x41, 0x68, 0x41, 0x69, 0x41, 0x6A, 0x41, + 0x6B, 0x41, 0x6C, 0x41, 0x6D, 0x41, 0x6E, 0x41, + 0x6F, 0x41, 0x70, 0x41, 0x71, 0x41, 0x72, 0x41, + 0x73, 0x41, 0x74, 0x41, 0x75, 0x41, 0x76, 0x41, + 0x77, 0x41, 0x78, 0x41, 0x79, 0x41, 0x7A, 0x41, + 0x7B, 0x41, 0x7C, 0x41, 0x7D, 0x41, 0x7E, 0x42, + // Bytes c0 - ff + 0xC2, 0xA2, 0x42, 0xC2, 0xA3, 0x42, 0xC2, 0xA5, + 0x42, 0xC2, 0xA6, 0x42, 0xC2, 0xAC, 0x42, 0xC2, + 0xB7, 0x42, 0xC3, 0x86, 0x42, 0xC3, 0xB0, 0x42, + 0xC4, 0xA6, 0x42, 0xC4, 0xA7, 0x42, 0xC4, 0xB1, + 0x42, 0xC5, 0x8B, 0x42, 0xC5, 0x93, 0x42, 0xC6, + 0x8E, 0x42, 0xC6, 0x90, 0x42, 0xC6, 0xAB, 0x42, + 0xC8, 0xA2, 0x42, 0xC8, 0xB7, 0x42, 0xC9, 0x90, + 0x42, 0xC9, 0x91, 0x42, 0xC9, 0x92, 0x42, 0xC9, + // Bytes 100 - 13f + 0x94, 0x42, 0xC9, 0x95, 0x42, 0xC9, 0x99, 0x42, + 0xC9, 0x9B, 0x42, 0xC9, 0x9C, 0x42, 0xC9, 0x9F, + 0x42, 0xC9, 0xA1, 0x42, 0xC9, 0xA3, 0x42, 0xC9, + 0xA5, 0x42, 0xC9, 0xA6, 0x42, 0xC9, 0xA8, 0x42, + 0xC9, 0xA9, 0x42, 0xC9, 0xAA, 0x42, 0xC9, 0xAB, + 0x42, 0xC9, 0xAD, 0x42, 0xC9, 0xAF, 0x42, 0xC9, + 0xB0, 0x42, 0xC9, 0xB1, 0x42, 0xC9, 0xB2, 0x42, + 0xC9, 0xB3, 0x42, 0xC9, 0xB4, 0x42, 0xC9, 0xB5, + // Bytes 140 - 17f + 0x42, 0xC9, 0xB8, 0x42, 0xC9, 0xB9, 0x42, 0xC9, + 0xBB, 0x42, 0xCA, 0x81, 0x42, 0xCA, 0x82, 0x42, + 0xCA, 0x83, 0x42, 0xCA, 0x89, 0x42, 0xCA, 0x8A, + 0x42, 0xCA, 0x8B, 0x42, 0xCA, 0x8C, 0x42, 0xCA, + 0x8D, 0x42, 0xCA, 0x90, 0x42, 0xCA, 0x91, 0x42, + 0xCA, 0x92, 0x42, 0xCA, 0x95, 0x42, 0xCA, 0x9D, + 0x42, 0xCA, 0x9F, 0x42, 0xCA, 0xB9, 0x42, 0xCE, + 0x91, 0x42, 0xCE, 0x92, 0x42, 0xCE, 0x93, 0x42, + // Bytes 180 - 1bf + 0xCE, 0x94, 0x42, 0xCE, 0x95, 0x42, 0xCE, 0x96, + 0x42, 0xCE, 0x97, 0x42, 0xCE, 0x98, 0x42, 0xCE, + 0x99, 0x42, 0xCE, 0x9A, 0x42, 0xCE, 0x9B, 0x42, + 0xCE, 0x9C, 0x42, 0xCE, 0x9D, 0x42, 0xCE, 0x9E, + 0x42, 0xCE, 0x9F, 0x42, 0xCE, 0xA0, 0x42, 0xCE, + 0xA1, 0x42, 0xCE, 0xA3, 0x42, 0xCE, 0xA4, 0x42, + 0xCE, 0xA5, 0x42, 0xCE, 0xA6, 0x42, 0xCE, 0xA7, + 0x42, 0xCE, 0xA8, 0x42, 0xCE, 0xA9, 0x42, 0xCE, + // Bytes 1c0 - 1ff + 0xB1, 0x42, 0xCE, 0xB2, 0x42, 0xCE, 0xB3, 0x42, + 0xCE, 0xB4, 0x42, 0xCE, 0xB5, 0x42, 0xCE, 0xB6, + 0x42, 0xCE, 0xB7, 0x42, 0xCE, 0xB8, 0x42, 0xCE, + 0xB9, 0x42, 0xCE, 0xBA, 0x42, 0xCE, 0xBB, 0x42, + 0xCE, 0xBC, 0x42, 0xCE, 0xBD, 0x42, 0xCE, 0xBE, + 0x42, 0xCE, 0xBF, 0x42, 0xCF, 0x80, 0x42, 0xCF, + 0x81, 0x42, 0xCF, 0x82, 0x42, 0xCF, 0x83, 0x42, + 0xCF, 0x84, 0x42, 0xCF, 0x85, 0x42, 0xCF, 0x86, + // Bytes 200 - 23f + 0x42, 0xCF, 0x87, 0x42, 0xCF, 0x88, 0x42, 0xCF, + 0x89, 0x42, 0xCF, 0x9C, 0x42, 0xCF, 0x9D, 0x42, + 0xD0, 0xBD, 0x42, 0xD1, 0x8A, 0x42, 0xD1, 0x8C, + 0x42, 0xD7, 0x90, 0x42, 0xD7, 0x91, 0x42, 0xD7, + 0x92, 0x42, 0xD7, 0x93, 0x42, 0xD7, 0x94, 0x42, + 0xD7, 0x9B, 0x42, 0xD7, 0x9C, 0x42, 0xD7, 0x9D, + 0x42, 0xD7, 0xA2, 0x42, 0xD7, 0xA8, 0x42, 0xD7, + 0xAA, 0x42, 0xD8, 0xA1, 0x42, 0xD8, 0xA7, 0x42, + // Bytes 240 - 27f + 0xD8, 0xA8, 0x42, 0xD8, 0xA9, 0x42, 0xD8, 0xAA, + 0x42, 0xD8, 0xAB, 0x42, 0xD8, 0xAC, 0x42, 0xD8, + 0xAD, 0x42, 0xD8, 0xAE, 0x42, 0xD8, 0xAF, 0x42, + 0xD8, 0xB0, 0x42, 0xD8, 0xB1, 0x42, 0xD8, 0xB2, + 0x42, 0xD8, 0xB3, 0x42, 0xD8, 0xB4, 0x42, 0xD8, + 0xB5, 0x42, 0xD8, 0xB6, 0x42, 0xD8, 0xB7, 0x42, + 0xD8, 0xB8, 0x42, 0xD8, 0xB9, 0x42, 0xD8, 0xBA, + 0x42, 0xD9, 0x81, 0x42, 0xD9, 0x82, 0x42, 0xD9, + // Bytes 280 - 2bf + 0x83, 0x42, 0xD9, 0x84, 0x42, 0xD9, 0x85, 0x42, + 0xD9, 0x86, 0x42, 0xD9, 0x87, 0x42, 0xD9, 0x88, + 0x42, 0xD9, 0x89, 0x42, 0xD9, 0x8A, 0x42, 0xD9, + 0xAE, 0x42, 0xD9, 0xAF, 0x42, 0xD9, 0xB1, 0x42, + 0xD9, 0xB9, 0x42, 0xD9, 0xBA, 0x42, 0xD9, 0xBB, + 0x42, 0xD9, 0xBE, 0x42, 0xD9, 0xBF, 0x42, 0xDA, + 0x80, 0x42, 0xDA, 0x83, 0x42, 0xDA, 0x84, 0x42, + 0xDA, 0x86, 0x42, 0xDA, 0x87, 0x42, 0xDA, 0x88, + // Bytes 2c0 - 2ff + 0x42, 0xDA, 0x8C, 0x42, 0xDA, 0x8D, 0x42, 0xDA, + 0x8E, 0x42, 0xDA, 0x91, 0x42, 0xDA, 0x98, 0x42, + 0xDA, 0xA1, 0x42, 0xDA, 0xA4, 0x42, 0xDA, 0xA6, + 0x42, 0xDA, 0xA9, 0x42, 0xDA, 0xAD, 0x42, 0xDA, + 0xAF, 0x42, 0xDA, 0xB1, 0x42, 0xDA, 0xB3, 0x42, + 0xDA, 0xBA, 0x42, 0xDA, 0xBB, 0x42, 0xDA, 0xBE, + 0x42, 0xDB, 0x81, 0x42, 0xDB, 0x85, 0x42, 0xDB, + 0x86, 0x42, 0xDB, 0x87, 0x42, 0xDB, 0x88, 0x42, + // Bytes 300 - 33f + 0xDB, 0x89, 0x42, 0xDB, 0x8B, 0x42, 0xDB, 0x8C, + 0x42, 0xDB, 0x90, 0x42, 0xDB, 0x92, 0x43, 0xE0, + 0xBC, 0x8B, 0x43, 0xE1, 0x83, 0x9C, 0x43, 0xE1, + 0x84, 0x80, 0x43, 0xE1, 0x84, 0x81, 0x43, 0xE1, + 0x84, 0x82, 0x43, 0xE1, 0x84, 0x83, 0x43, 0xE1, + 0x84, 0x84, 0x43, 0xE1, 0x84, 0x85, 0x43, 0xE1, + 0x84, 0x86, 0x43, 0xE1, 0x84, 0x87, 0x43, 0xE1, + 0x84, 0x88, 0x43, 0xE1, 0x84, 0x89, 0x43, 0xE1, + // Bytes 340 - 37f + 0x84, 0x8A, 0x43, 0xE1, 0x84, 0x8B, 0x43, 0xE1, + 0x84, 0x8C, 0x43, 0xE1, 0x84, 0x8D, 0x43, 0xE1, + 0x84, 0x8E, 0x43, 0xE1, 0x84, 0x8F, 0x43, 0xE1, + 0x84, 0x90, 0x43, 0xE1, 0x84, 0x91, 0x43, 0xE1, + 0x84, 0x92, 0x43, 0xE1, 0x84, 0x94, 0x43, 0xE1, + 0x84, 0x95, 0x43, 0xE1, 0x84, 0x9A, 0x43, 0xE1, + 0x84, 0x9C, 0x43, 0xE1, 0x84, 0x9D, 0x43, 0xE1, + 0x84, 0x9E, 0x43, 0xE1, 0x84, 0xA0, 0x43, 0xE1, + // Bytes 380 - 3bf + 0x84, 0xA1, 0x43, 0xE1, 0x84, 0xA2, 0x43, 0xE1, + 0x84, 0xA3, 0x43, 0xE1, 0x84, 0xA7, 0x43, 0xE1, + 0x84, 0xA9, 0x43, 0xE1, 0x84, 0xAB, 0x43, 0xE1, + 0x84, 0xAC, 0x43, 0xE1, 0x84, 0xAD, 0x43, 0xE1, + 0x84, 0xAE, 0x43, 0xE1, 0x84, 0xAF, 0x43, 0xE1, + 0x84, 0xB2, 0x43, 0xE1, 0x84, 0xB6, 0x43, 0xE1, + 0x85, 0x80, 0x43, 0xE1, 0x85, 0x87, 0x43, 0xE1, + 0x85, 0x8C, 0x43, 0xE1, 0x85, 0x97, 0x43, 0xE1, + // Bytes 3c0 - 3ff + 0x85, 0x98, 0x43, 0xE1, 0x85, 0x99, 0x43, 0xE1, + 0x85, 0xA0, 0x43, 0xE1, 0x86, 0x84, 0x43, 0xE1, + 0x86, 0x85, 0x43, 0xE1, 0x86, 0x88, 0x43, 0xE1, + 0x86, 0x91, 0x43, 0xE1, 0x86, 0x92, 0x43, 0xE1, + 0x86, 0x94, 0x43, 0xE1, 0x86, 0x9E, 0x43, 0xE1, + 0x86, 0xA1, 0x43, 0xE1, 0x87, 0x87, 0x43, 0xE1, + 0x87, 0x88, 0x43, 0xE1, 0x87, 0x8C, 0x43, 0xE1, + 0x87, 0x8E, 0x43, 0xE1, 0x87, 0x93, 0x43, 0xE1, + // Bytes 400 - 43f + 0x87, 0x97, 0x43, 0xE1, 0x87, 0x99, 0x43, 0xE1, + 0x87, 0x9D, 0x43, 0xE1, 0x87, 0x9F, 0x43, 0xE1, + 0x87, 0xB1, 0x43, 0xE1, 0x87, 0xB2, 0x43, 0xE1, + 0xB4, 0x82, 0x43, 0xE1, 0xB4, 0x96, 0x43, 0xE1, + 0xB4, 0x97, 0x43, 0xE1, 0xB4, 0x9C, 0x43, 0xE1, + 0xB4, 0x9D, 0x43, 0xE1, 0xB4, 0xA5, 0x43, 0xE1, + 0xB5, 0xBB, 0x43, 0xE1, 0xB6, 0x85, 0x43, 0xE2, + 0x80, 0x82, 0x43, 0xE2, 0x80, 0x83, 0x43, 0xE2, + // Bytes 440 - 47f + 0x80, 0x90, 0x43, 0xE2, 0x80, 0x93, 0x43, 0xE2, + 0x80, 0x94, 0x43, 0xE2, 0x82, 0xA9, 0x43, 0xE2, + 0x86, 0x90, 0x43, 0xE2, 0x86, 0x91, 0x43, 0xE2, + 0x86, 0x92, 0x43, 0xE2, 0x86, 0x93, 0x43, 0xE2, + 0x88, 0x82, 0x43, 0xE2, 0x88, 0x87, 0x43, 0xE2, + 0x88, 0x91, 0x43, 0xE2, 0x88, 0x92, 0x43, 0xE2, + 0x94, 0x82, 0x43, 0xE2, 0x96, 0xA0, 0x43, 0xE2, + 0x97, 0x8B, 0x43, 0xE2, 0xA6, 0x85, 0x43, 0xE2, + // Bytes 480 - 4bf + 0xA6, 0x86, 0x43, 0xE2, 0xB5, 0xA1, 0x43, 0xE3, + 0x80, 0x81, 0x43, 0xE3, 0x80, 0x82, 0x43, 0xE3, + 0x80, 0x88, 0x43, 0xE3, 0x80, 0x89, 0x43, 0xE3, + 0x80, 0x8A, 0x43, 0xE3, 0x80, 0x8B, 0x43, 0xE3, + 0x80, 0x8C, 0x43, 0xE3, 0x80, 0x8D, 0x43, 0xE3, + 0x80, 0x8E, 0x43, 0xE3, 0x80, 0x8F, 0x43, 0xE3, + 0x80, 0x90, 0x43, 0xE3, 0x80, 0x91, 0x43, 0xE3, + 0x80, 0x92, 0x43, 0xE3, 0x80, 0x94, 0x43, 0xE3, + // Bytes 4c0 - 4ff + 0x80, 0x95, 0x43, 0xE3, 0x80, 0x96, 0x43, 0xE3, + 0x80, 0x97, 0x43, 0xE3, 0x82, 0xA1, 0x43, 0xE3, + 0x82, 0xA2, 0x43, 0xE3, 0x82, 0xA3, 0x43, 0xE3, + 0x82, 0xA4, 0x43, 0xE3, 0x82, 0xA5, 0x43, 0xE3, + 0x82, 0xA6, 0x43, 0xE3, 0x82, 0xA7, 0x43, 0xE3, + 0x82, 0xA8, 0x43, 0xE3, 0x82, 0xA9, 0x43, 0xE3, + 0x82, 0xAA, 0x43, 0xE3, 0x82, 0xAB, 0x43, 0xE3, + 0x82, 0xAD, 0x43, 0xE3, 0x82, 0xAF, 0x43, 0xE3, + // Bytes 500 - 53f + 0x82, 0xB1, 0x43, 0xE3, 0x82, 0xB3, 0x43, 0xE3, + 0x82, 0xB5, 0x43, 0xE3, 0x82, 0xB7, 0x43, 0xE3, + 0x82, 0xB9, 0x43, 0xE3, 0x82, 0xBB, 0x43, 0xE3, + 0x82, 0xBD, 0x43, 0xE3, 0x82, 0xBF, 0x43, 0xE3, + 0x83, 0x81, 0x43, 0xE3, 0x83, 0x83, 0x43, 0xE3, + 0x83, 0x84, 0x43, 0xE3, 0x83, 0x86, 0x43, 0xE3, + 0x83, 0x88, 0x43, 0xE3, 0x83, 0x8A, 0x43, 0xE3, + 0x83, 0x8B, 0x43, 0xE3, 0x83, 0x8C, 0x43, 0xE3, + // Bytes 540 - 57f + 0x83, 0x8D, 0x43, 0xE3, 0x83, 0x8E, 0x43, 0xE3, + 0x83, 0x8F, 0x43, 0xE3, 0x83, 0x92, 0x43, 0xE3, + 0x83, 0x95, 0x43, 0xE3, 0x83, 0x98, 0x43, 0xE3, + 0x83, 0x9B, 0x43, 0xE3, 0x83, 0x9E, 0x43, 0xE3, + 0x83, 0x9F, 0x43, 0xE3, 0x83, 0xA0, 0x43, 0xE3, + 0x83, 0xA1, 0x43, 0xE3, 0x83, 0xA2, 0x43, 0xE3, + 0x83, 0xA3, 0x43, 0xE3, 0x83, 0xA4, 0x43, 0xE3, + 0x83, 0xA5, 0x43, 0xE3, 0x83, 0xA6, 0x43, 0xE3, + // Bytes 580 - 5bf + 0x83, 0xA7, 0x43, 0xE3, 0x83, 0xA8, 0x43, 0xE3, + 0x83, 0xA9, 0x43, 0xE3, 0x83, 0xAA, 0x43, 0xE3, + 0x83, 0xAB, 0x43, 0xE3, 0x83, 0xAC, 0x43, 0xE3, + 0x83, 0xAD, 0x43, 0xE3, 0x83, 0xAF, 0x43, 0xE3, + 0x83, 0xB0, 0x43, 0xE3, 0x83, 0xB1, 0x43, 0xE3, + 0x83, 0xB2, 0x43, 0xE3, 0x83, 0xB3, 0x43, 0xE3, + 0x83, 0xBB, 0x43, 0xE3, 0x83, 0xBC, 0x43, 0xE3, + 0x92, 0x9E, 0x43, 0xE3, 0x92, 0xB9, 0x43, 0xE3, + // Bytes 5c0 - 5ff + 0x92, 0xBB, 0x43, 0xE3, 0x93, 0x9F, 0x43, 0xE3, + 0x94, 0x95, 0x43, 0xE3, 0x9B, 0xAE, 0x43, 0xE3, + 0x9B, 0xBC, 0x43, 0xE3, 0x9E, 0x81, 0x43, 0xE3, + 0xA0, 0xAF, 0x43, 0xE3, 0xA1, 0xA2, 0x43, 0xE3, + 0xA1, 0xBC, 0x43, 0xE3, 0xA3, 0x87, 0x43, 0xE3, + 0xA3, 0xA3, 0x43, 0xE3, 0xA4, 0x9C, 0x43, 0xE3, + 0xA4, 0xBA, 0x43, 0xE3, 0xA8, 0xAE, 0x43, 0xE3, + 0xA9, 0xAC, 0x43, 0xE3, 0xAB, 0xA4, 0x43, 0xE3, + // Bytes 600 - 63f + 0xAC, 0x88, 0x43, 0xE3, 0xAC, 0x99, 0x43, 0xE3, + 0xAD, 0x89, 0x43, 0xE3, 0xAE, 0x9D, 0x43, 0xE3, + 0xB0, 0x98, 0x43, 0xE3, 0xB1, 0x8E, 0x43, 0xE3, + 0xB4, 0xB3, 0x43, 0xE3, 0xB6, 0x96, 0x43, 0xE3, + 0xBA, 0xAC, 0x43, 0xE3, 0xBA, 0xB8, 0x43, 0xE3, + 0xBC, 0x9B, 0x43, 0xE3, 0xBF, 0xBC, 0x43, 0xE4, + 0x80, 0x88, 0x43, 0xE4, 0x80, 0x98, 0x43, 0xE4, + 0x80, 0xB9, 0x43, 0xE4, 0x81, 0x86, 0x43, 0xE4, + // Bytes 640 - 67f + 0x82, 0x96, 0x43, 0xE4, 0x83, 0xA3, 0x43, 0xE4, + 0x84, 0xAF, 0x43, 0xE4, 0x88, 0x82, 0x43, 0xE4, + 0x88, 0xA7, 0x43, 0xE4, 0x8A, 0xA0, 0x43, 0xE4, + 0x8C, 0x81, 0x43, 0xE4, 0x8C, 0xB4, 0x43, 0xE4, + 0x8D, 0x99, 0x43, 0xE4, 0x8F, 0x95, 0x43, 0xE4, + 0x8F, 0x99, 0x43, 0xE4, 0x90, 0x8B, 0x43, 0xE4, + 0x91, 0xAB, 0x43, 0xE4, 0x94, 0xAB, 0x43, 0xE4, + 0x95, 0x9D, 0x43, 0xE4, 0x95, 0xA1, 0x43, 0xE4, + // Bytes 680 - 6bf + 0x95, 0xAB, 0x43, 0xE4, 0x97, 0x97, 0x43, 0xE4, + 0x97, 0xB9, 0x43, 0xE4, 0x98, 0xB5, 0x43, 0xE4, + 0x9A, 0xBE, 0x43, 0xE4, 0x9B, 0x87, 0x43, 0xE4, + 0xA6, 0x95, 0x43, 0xE4, 0xA7, 0xA6, 0x43, 0xE4, + 0xA9, 0xAE, 0x43, 0xE4, 0xA9, 0xB6, 0x43, 0xE4, + 0xAA, 0xB2, 0x43, 0xE4, 0xAC, 0xB3, 0x43, 0xE4, + 0xAF, 0x8E, 0x43, 0xE4, 0xB3, 0x8E, 0x43, 0xE4, + 0xB3, 0xAD, 0x43, 0xE4, 0xB3, 0xB8, 0x43, 0xE4, + // Bytes 6c0 - 6ff + 0xB5, 0x96, 0x43, 0xE4, 0xB8, 0x80, 0x43, 0xE4, + 0xB8, 0x81, 0x43, 0xE4, 0xB8, 0x83, 0x43, 0xE4, + 0xB8, 0x89, 0x43, 0xE4, 0xB8, 0x8A, 0x43, 0xE4, + 0xB8, 0x8B, 0x43, 0xE4, 0xB8, 0x8D, 0x43, 0xE4, + 0xB8, 0x99, 0x43, 0xE4, 0xB8, 0xA6, 0x43, 0xE4, + 0xB8, 0xA8, 0x43, 0xE4, 0xB8, 0xAD, 0x43, 0xE4, + 0xB8, 0xB2, 0x43, 0xE4, 0xB8, 0xB6, 0x43, 0xE4, + 0xB8, 0xB8, 0x43, 0xE4, 0xB8, 0xB9, 0x43, 0xE4, + // Bytes 700 - 73f + 0xB8, 0xBD, 0x43, 0xE4, 0xB8, 0xBF, 0x43, 0xE4, + 0xB9, 0x81, 0x43, 0xE4, 0xB9, 0x99, 0x43, 0xE4, + 0xB9, 0x9D, 0x43, 0xE4, 0xBA, 0x82, 0x43, 0xE4, + 0xBA, 0x85, 0x43, 0xE4, 0xBA, 0x86, 0x43, 0xE4, + 0xBA, 0x8C, 0x43, 0xE4, 0xBA, 0x94, 0x43, 0xE4, + 0xBA, 0xA0, 0x43, 0xE4, 0xBA, 0xA4, 0x43, 0xE4, + 0xBA, 0xAE, 0x43, 0xE4, 0xBA, 0xBA, 0x43, 0xE4, + 0xBB, 0x80, 0x43, 0xE4, 0xBB, 0x8C, 0x43, 0xE4, + // Bytes 740 - 77f + 0xBB, 0xA4, 0x43, 0xE4, 0xBC, 0x81, 0x43, 0xE4, + 0xBC, 0x91, 0x43, 0xE4, 0xBD, 0xA0, 0x43, 0xE4, + 0xBE, 0x80, 0x43, 0xE4, 0xBE, 0x86, 0x43, 0xE4, + 0xBE, 0x8B, 0x43, 0xE4, 0xBE, 0xAE, 0x43, 0xE4, + 0xBE, 0xBB, 0x43, 0xE4, 0xBE, 0xBF, 0x43, 0xE5, + 0x80, 0x82, 0x43, 0xE5, 0x80, 0xAB, 0x43, 0xE5, + 0x81, 0xBA, 0x43, 0xE5, 0x82, 0x99, 0x43, 0xE5, + 0x83, 0x8F, 0x43, 0xE5, 0x83, 0x9A, 0x43, 0xE5, + // Bytes 780 - 7bf + 0x83, 0xA7, 0x43, 0xE5, 0x84, 0xAA, 0x43, 0xE5, + 0x84, 0xBF, 0x43, 0xE5, 0x85, 0x80, 0x43, 0xE5, + 0x85, 0x85, 0x43, 0xE5, 0x85, 0x8D, 0x43, 0xE5, + 0x85, 0x94, 0x43, 0xE5, 0x85, 0xA4, 0x43, 0xE5, + 0x85, 0xA5, 0x43, 0xE5, 0x85, 0xA7, 0x43, 0xE5, + 0x85, 0xA8, 0x43, 0xE5, 0x85, 0xA9, 0x43, 0xE5, + 0x85, 0xAB, 0x43, 0xE5, 0x85, 0xAD, 0x43, 0xE5, + 0x85, 0xB7, 0x43, 0xE5, 0x86, 0x80, 0x43, 0xE5, + // Bytes 7c0 - 7ff + 0x86, 0x82, 0x43, 0xE5, 0x86, 0x8D, 0x43, 0xE5, + 0x86, 0x92, 0x43, 0xE5, 0x86, 0x95, 0x43, 0xE5, + 0x86, 0x96, 0x43, 0xE5, 0x86, 0x97, 0x43, 0xE5, + 0x86, 0x99, 0x43, 0xE5, 0x86, 0xA4, 0x43, 0xE5, + 0x86, 0xAB, 0x43, 0xE5, 0x86, 0xAC, 0x43, 0xE5, + 0x86, 0xB5, 0x43, 0xE5, 0x86, 0xB7, 0x43, 0xE5, + 0x87, 0x89, 0x43, 0xE5, 0x87, 0x8C, 0x43, 0xE5, + 0x87, 0x9C, 0x43, 0xE5, 0x87, 0x9E, 0x43, 0xE5, + // Bytes 800 - 83f + 0x87, 0xA0, 0x43, 0xE5, 0x87, 0xB5, 0x43, 0xE5, + 0x88, 0x80, 0x43, 0xE5, 0x88, 0x83, 0x43, 0xE5, + 0x88, 0x87, 0x43, 0xE5, 0x88, 0x97, 0x43, 0xE5, + 0x88, 0x9D, 0x43, 0xE5, 0x88, 0xA9, 0x43, 0xE5, + 0x88, 0xBA, 0x43, 0xE5, 0x88, 0xBB, 0x43, 0xE5, + 0x89, 0x86, 0x43, 0xE5, 0x89, 0x8D, 0x43, 0xE5, + 0x89, 0xB2, 0x43, 0xE5, 0x89, 0xB7, 0x43, 0xE5, + 0x8A, 0x89, 0x43, 0xE5, 0x8A, 0x9B, 0x43, 0xE5, + // Bytes 840 - 87f + 0x8A, 0xA3, 0x43, 0xE5, 0x8A, 0xB3, 0x43, 0xE5, + 0x8A, 0xB4, 0x43, 0xE5, 0x8B, 0x87, 0x43, 0xE5, + 0x8B, 0x89, 0x43, 0xE5, 0x8B, 0x92, 0x43, 0xE5, + 0x8B, 0x9E, 0x43, 0xE5, 0x8B, 0xA4, 0x43, 0xE5, + 0x8B, 0xB5, 0x43, 0xE5, 0x8B, 0xB9, 0x43, 0xE5, + 0x8B, 0xBA, 0x43, 0xE5, 0x8C, 0x85, 0x43, 0xE5, + 0x8C, 0x86, 0x43, 0xE5, 0x8C, 0x95, 0x43, 0xE5, + 0x8C, 0x97, 0x43, 0xE5, 0x8C, 0x9A, 0x43, 0xE5, + // Bytes 880 - 8bf + 0x8C, 0xB8, 0x43, 0xE5, 0x8C, 0xBB, 0x43, 0xE5, + 0x8C, 0xBF, 0x43, 0xE5, 0x8D, 0x81, 0x43, 0xE5, + 0x8D, 0x84, 0x43, 0xE5, 0x8D, 0x85, 0x43, 0xE5, + 0x8D, 0x89, 0x43, 0xE5, 0x8D, 0x91, 0x43, 0xE5, + 0x8D, 0x94, 0x43, 0xE5, 0x8D, 0x9A, 0x43, 0xE5, + 0x8D, 0x9C, 0x43, 0xE5, 0x8D, 0xA9, 0x43, 0xE5, + 0x8D, 0xB0, 0x43, 0xE5, 0x8D, 0xB3, 0x43, 0xE5, + 0x8D, 0xB5, 0x43, 0xE5, 0x8D, 0xBD, 0x43, 0xE5, + // Bytes 8c0 - 8ff + 0x8D, 0xBF, 0x43, 0xE5, 0x8E, 0x82, 0x43, 0xE5, + 0x8E, 0xB6, 0x43, 0xE5, 0x8F, 0x83, 0x43, 0xE5, + 0x8F, 0x88, 0x43, 0xE5, 0x8F, 0x8A, 0x43, 0xE5, + 0x8F, 0x8C, 0x43, 0xE5, 0x8F, 0x9F, 0x43, 0xE5, + 0x8F, 0xA3, 0x43, 0xE5, 0x8F, 0xA5, 0x43, 0xE5, + 0x8F, 0xAB, 0x43, 0xE5, 0x8F, 0xAF, 0x43, 0xE5, + 0x8F, 0xB1, 0x43, 0xE5, 0x8F, 0xB3, 0x43, 0xE5, + 0x90, 0x86, 0x43, 0xE5, 0x90, 0x88, 0x43, 0xE5, + // Bytes 900 - 93f + 0x90, 0x8D, 0x43, 0xE5, 0x90, 0x8F, 0x43, 0xE5, + 0x90, 0x9D, 0x43, 0xE5, 0x90, 0xB8, 0x43, 0xE5, + 0x90, 0xB9, 0x43, 0xE5, 0x91, 0x82, 0x43, 0xE5, + 0x91, 0x88, 0x43, 0xE5, 0x91, 0xA8, 0x43, 0xE5, + 0x92, 0x9E, 0x43, 0xE5, 0x92, 0xA2, 0x43, 0xE5, + 0x92, 0xBD, 0x43, 0xE5, 0x93, 0xB6, 0x43, 0xE5, + 0x94, 0x90, 0x43, 0xE5, 0x95, 0x8F, 0x43, 0xE5, + 0x95, 0x93, 0x43, 0xE5, 0x95, 0x95, 0x43, 0xE5, + // Bytes 940 - 97f + 0x95, 0xA3, 0x43, 0xE5, 0x96, 0x84, 0x43, 0xE5, + 0x96, 0x87, 0x43, 0xE5, 0x96, 0x99, 0x43, 0xE5, + 0x96, 0x9D, 0x43, 0xE5, 0x96, 0xAB, 0x43, 0xE5, + 0x96, 0xB3, 0x43, 0xE5, 0x96, 0xB6, 0x43, 0xE5, + 0x97, 0x80, 0x43, 0xE5, 0x97, 0x82, 0x43, 0xE5, + 0x97, 0xA2, 0x43, 0xE5, 0x98, 0x86, 0x43, 0xE5, + 0x99, 0x91, 0x43, 0xE5, 0x99, 0xA8, 0x43, 0xE5, + 0x99, 0xB4, 0x43, 0xE5, 0x9B, 0x97, 0x43, 0xE5, + // Bytes 980 - 9bf + 0x9B, 0x9B, 0x43, 0xE5, 0x9B, 0xB9, 0x43, 0xE5, + 0x9C, 0x96, 0x43, 0xE5, 0x9C, 0x97, 0x43, 0xE5, + 0x9C, 0x9F, 0x43, 0xE5, 0x9C, 0xB0, 0x43, 0xE5, + 0x9E, 0x8B, 0x43, 0xE5, 0x9F, 0x8E, 0x43, 0xE5, + 0x9F, 0xB4, 0x43, 0xE5, 0xA0, 0x8D, 0x43, 0xE5, + 0xA0, 0xB1, 0x43, 0xE5, 0xA0, 0xB2, 0x43, 0xE5, + 0xA1, 0x80, 0x43, 0xE5, 0xA1, 0x9A, 0x43, 0xE5, + 0xA1, 0x9E, 0x43, 0xE5, 0xA2, 0xA8, 0x43, 0xE5, + // Bytes 9c0 - 9ff + 0xA2, 0xAC, 0x43, 0xE5, 0xA2, 0xB3, 0x43, 0xE5, + 0xA3, 0x98, 0x43, 0xE5, 0xA3, 0x9F, 0x43, 0xE5, + 0xA3, 0xAB, 0x43, 0xE5, 0xA3, 0xAE, 0x43, 0xE5, + 0xA3, 0xB0, 0x43, 0xE5, 0xA3, 0xB2, 0x43, 0xE5, + 0xA3, 0xB7, 0x43, 0xE5, 0xA4, 0x82, 0x43, 0xE5, + 0xA4, 0x86, 0x43, 0xE5, 0xA4, 0x8A, 0x43, 0xE5, + 0xA4, 0x95, 0x43, 0xE5, 0xA4, 0x9A, 0x43, 0xE5, + 0xA4, 0x9C, 0x43, 0xE5, 0xA4, 0xA2, 0x43, 0xE5, + // Bytes a00 - a3f + 0xA4, 0xA7, 0x43, 0xE5, 0xA4, 0xA9, 0x43, 0xE5, + 0xA5, 0x84, 0x43, 0xE5, 0xA5, 0x88, 0x43, 0xE5, + 0xA5, 0x91, 0x43, 0xE5, 0xA5, 0x94, 0x43, 0xE5, + 0xA5, 0xA2, 0x43, 0xE5, 0xA5, 0xB3, 0x43, 0xE5, + 0xA7, 0x98, 0x43, 0xE5, 0xA7, 0xAC, 0x43, 0xE5, + 0xA8, 0x9B, 0x43, 0xE5, 0xA8, 0xA7, 0x43, 0xE5, + 0xA9, 0xA2, 0x43, 0xE5, 0xA9, 0xA6, 0x43, 0xE5, + 0xAA, 0xB5, 0x43, 0xE5, 0xAC, 0x88, 0x43, 0xE5, + // Bytes a40 - a7f + 0xAC, 0xA8, 0x43, 0xE5, 0xAC, 0xBE, 0x43, 0xE5, + 0xAD, 0x90, 0x43, 0xE5, 0xAD, 0x97, 0x43, 0xE5, + 0xAD, 0xA6, 0x43, 0xE5, 0xAE, 0x80, 0x43, 0xE5, + 0xAE, 0x85, 0x43, 0xE5, 0xAE, 0x97, 0x43, 0xE5, + 0xAF, 0x83, 0x43, 0xE5, 0xAF, 0x98, 0x43, 0xE5, + 0xAF, 0xA7, 0x43, 0xE5, 0xAF, 0xAE, 0x43, 0xE5, + 0xAF, 0xB3, 0x43, 0xE5, 0xAF, 0xB8, 0x43, 0xE5, + 0xAF, 0xBF, 0x43, 0xE5, 0xB0, 0x86, 0x43, 0xE5, + // Bytes a80 - abf + 0xB0, 0x8F, 0x43, 0xE5, 0xB0, 0xA2, 0x43, 0xE5, + 0xB0, 0xB8, 0x43, 0xE5, 0xB0, 0xBF, 0x43, 0xE5, + 0xB1, 0xA0, 0x43, 0xE5, 0xB1, 0xA2, 0x43, 0xE5, + 0xB1, 0xA4, 0x43, 0xE5, 0xB1, 0xA5, 0x43, 0xE5, + 0xB1, 0xAE, 0x43, 0xE5, 0xB1, 0xB1, 0x43, 0xE5, + 0xB2, 0x8D, 0x43, 0xE5, 0xB3, 0x80, 0x43, 0xE5, + 0xB4, 0x99, 0x43, 0xE5, 0xB5, 0x83, 0x43, 0xE5, + 0xB5, 0x90, 0x43, 0xE5, 0xB5, 0xAB, 0x43, 0xE5, + // Bytes ac0 - aff + 0xB5, 0xAE, 0x43, 0xE5, 0xB5, 0xBC, 0x43, 0xE5, + 0xB6, 0xB2, 0x43, 0xE5, 0xB6, 0xBA, 0x43, 0xE5, + 0xB7, 0x9B, 0x43, 0xE5, 0xB7, 0xA1, 0x43, 0xE5, + 0xB7, 0xA2, 0x43, 0xE5, 0xB7, 0xA5, 0x43, 0xE5, + 0xB7, 0xA6, 0x43, 0xE5, 0xB7, 0xB1, 0x43, 0xE5, + 0xB7, 0xBD, 0x43, 0xE5, 0xB7, 0xBE, 0x43, 0xE5, + 0xB8, 0xA8, 0x43, 0xE5, 0xB8, 0xBD, 0x43, 0xE5, + 0xB9, 0xA9, 0x43, 0xE5, 0xB9, 0xB2, 0x43, 0xE5, + // Bytes b00 - b3f + 0xB9, 0xB4, 0x43, 0xE5, 0xB9, 0xBA, 0x43, 0xE5, + 0xB9, 0xBC, 0x43, 0xE5, 0xB9, 0xBF, 0x43, 0xE5, + 0xBA, 0xA6, 0x43, 0xE5, 0xBA, 0xB0, 0x43, 0xE5, + 0xBA, 0xB3, 0x43, 0xE5, 0xBA, 0xB6, 0x43, 0xE5, + 0xBB, 0x89, 0x43, 0xE5, 0xBB, 0x8A, 0x43, 0xE5, + 0xBB, 0x92, 0x43, 0xE5, 0xBB, 0x93, 0x43, 0xE5, + 0xBB, 0x99, 0x43, 0xE5, 0xBB, 0xAC, 0x43, 0xE5, + 0xBB, 0xB4, 0x43, 0xE5, 0xBB, 0xBE, 0x43, 0xE5, + // Bytes b40 - b7f + 0xBC, 0x84, 0x43, 0xE5, 0xBC, 0x8B, 0x43, 0xE5, + 0xBC, 0x93, 0x43, 0xE5, 0xBC, 0xA2, 0x43, 0xE5, + 0xBD, 0x90, 0x43, 0xE5, 0xBD, 0x93, 0x43, 0xE5, + 0xBD, 0xA1, 0x43, 0xE5, 0xBD, 0xA2, 0x43, 0xE5, + 0xBD, 0xA9, 0x43, 0xE5, 0xBD, 0xAB, 0x43, 0xE5, + 0xBD, 0xB3, 0x43, 0xE5, 0xBE, 0x8B, 0x43, 0xE5, + 0xBE, 0x8C, 0x43, 0xE5, 0xBE, 0x97, 0x43, 0xE5, + 0xBE, 0x9A, 0x43, 0xE5, 0xBE, 0xA9, 0x43, 0xE5, + // Bytes b80 - bbf + 0xBE, 0xAD, 0x43, 0xE5, 0xBF, 0x83, 0x43, 0xE5, + 0xBF, 0x8D, 0x43, 0xE5, 0xBF, 0x97, 0x43, 0xE5, + 0xBF, 0xB5, 0x43, 0xE5, 0xBF, 0xB9, 0x43, 0xE6, + 0x80, 0x92, 0x43, 0xE6, 0x80, 0x9C, 0x43, 0xE6, + 0x81, 0xB5, 0x43, 0xE6, 0x82, 0x81, 0x43, 0xE6, + 0x82, 0x94, 0x43, 0xE6, 0x83, 0x87, 0x43, 0xE6, + 0x83, 0x98, 0x43, 0xE6, 0x83, 0xA1, 0x43, 0xE6, + 0x84, 0x88, 0x43, 0xE6, 0x85, 0x84, 0x43, 0xE6, + // Bytes bc0 - bff + 0x85, 0x88, 0x43, 0xE6, 0x85, 0x8C, 0x43, 0xE6, + 0x85, 0x8E, 0x43, 0xE6, 0x85, 0xA0, 0x43, 0xE6, + 0x85, 0xA8, 0x43, 0xE6, 0x85, 0xBA, 0x43, 0xE6, + 0x86, 0x8E, 0x43, 0xE6, 0x86, 0x90, 0x43, 0xE6, + 0x86, 0xA4, 0x43, 0xE6, 0x86, 0xAF, 0x43, 0xE6, + 0x86, 0xB2, 0x43, 0xE6, 0x87, 0x9E, 0x43, 0xE6, + 0x87, 0xB2, 0x43, 0xE6, 0x87, 0xB6, 0x43, 0xE6, + 0x88, 0x80, 0x43, 0xE6, 0x88, 0x88, 0x43, 0xE6, + // Bytes c00 - c3f + 0x88, 0x90, 0x43, 0xE6, 0x88, 0x9B, 0x43, 0xE6, + 0x88, 0xAE, 0x43, 0xE6, 0x88, 0xB4, 0x43, 0xE6, + 0x88, 0xB6, 0x43, 0xE6, 0x89, 0x8B, 0x43, 0xE6, + 0x89, 0x93, 0x43, 0xE6, 0x89, 0x9D, 0x43, 0xE6, + 0x8A, 0x95, 0x43, 0xE6, 0x8A, 0xB1, 0x43, 0xE6, + 0x8B, 0x89, 0x43, 0xE6, 0x8B, 0x8F, 0x43, 0xE6, + 0x8B, 0x93, 0x43, 0xE6, 0x8B, 0x94, 0x43, 0xE6, + 0x8B, 0xBC, 0x43, 0xE6, 0x8B, 0xBE, 0x43, 0xE6, + // Bytes c40 - c7f + 0x8C, 0x87, 0x43, 0xE6, 0x8C, 0xBD, 0x43, 0xE6, + 0x8D, 0x90, 0x43, 0xE6, 0x8D, 0x95, 0x43, 0xE6, + 0x8D, 0xA8, 0x43, 0xE6, 0x8D, 0xBB, 0x43, 0xE6, + 0x8E, 0x83, 0x43, 0xE6, 0x8E, 0xA0, 0x43, 0xE6, + 0x8E, 0xA9, 0x43, 0xE6, 0x8F, 0x84, 0x43, 0xE6, + 0x8F, 0x85, 0x43, 0xE6, 0x8F, 0xA4, 0x43, 0xE6, + 0x90, 0x9C, 0x43, 0xE6, 0x90, 0xA2, 0x43, 0xE6, + 0x91, 0x92, 0x43, 0xE6, 0x91, 0xA9, 0x43, 0xE6, + // Bytes c80 - cbf + 0x91, 0xB7, 0x43, 0xE6, 0x91, 0xBE, 0x43, 0xE6, + 0x92, 0x9A, 0x43, 0xE6, 0x92, 0x9D, 0x43, 0xE6, + 0x93, 0x84, 0x43, 0xE6, 0x94, 0xAF, 0x43, 0xE6, + 0x94, 0xB4, 0x43, 0xE6, 0x95, 0x8F, 0x43, 0xE6, + 0x95, 0x96, 0x43, 0xE6, 0x95, 0xAC, 0x43, 0xE6, + 0x95, 0xB8, 0x43, 0xE6, 0x96, 0x87, 0x43, 0xE6, + 0x96, 0x97, 0x43, 0xE6, 0x96, 0x99, 0x43, 0xE6, + 0x96, 0xA4, 0x43, 0xE6, 0x96, 0xB0, 0x43, 0xE6, + // Bytes cc0 - cff + 0x96, 0xB9, 0x43, 0xE6, 0x97, 0x85, 0x43, 0xE6, + 0x97, 0xA0, 0x43, 0xE6, 0x97, 0xA2, 0x43, 0xE6, + 0x97, 0xA3, 0x43, 0xE6, 0x97, 0xA5, 0x43, 0xE6, + 0x98, 0x93, 0x43, 0xE6, 0x98, 0xA0, 0x43, 0xE6, + 0x99, 0x89, 0x43, 0xE6, 0x99, 0xB4, 0x43, 0xE6, + 0x9A, 0x88, 0x43, 0xE6, 0x9A, 0x91, 0x43, 0xE6, + 0x9A, 0x9C, 0x43, 0xE6, 0x9A, 0xB4, 0x43, 0xE6, + 0x9B, 0x86, 0x43, 0xE6, 0x9B, 0xB0, 0x43, 0xE6, + // Bytes d00 - d3f + 0x9B, 0xB4, 0x43, 0xE6, 0x9B, 0xB8, 0x43, 0xE6, + 0x9C, 0x80, 0x43, 0xE6, 0x9C, 0x88, 0x43, 0xE6, + 0x9C, 0x89, 0x43, 0xE6, 0x9C, 0x97, 0x43, 0xE6, + 0x9C, 0x9B, 0x43, 0xE6, 0x9C, 0xA1, 0x43, 0xE6, + 0x9C, 0xA8, 0x43, 0xE6, 0x9D, 0x8E, 0x43, 0xE6, + 0x9D, 0x93, 0x43, 0xE6, 0x9D, 0x96, 0x43, 0xE6, + 0x9D, 0x9E, 0x43, 0xE6, 0x9D, 0xBB, 0x43, 0xE6, + 0x9E, 0x85, 0x43, 0xE6, 0x9E, 0x97, 0x43, 0xE6, + // Bytes d40 - d7f + 0x9F, 0xB3, 0x43, 0xE6, 0x9F, 0xBA, 0x43, 0xE6, + 0xA0, 0x97, 0x43, 0xE6, 0xA0, 0x9F, 0x43, 0xE6, + 0xA0, 0xAA, 0x43, 0xE6, 0xA1, 0x92, 0x43, 0xE6, + 0xA2, 0x81, 0x43, 0xE6, 0xA2, 0x85, 0x43, 0xE6, + 0xA2, 0x8E, 0x43, 0xE6, 0xA2, 0xA8, 0x43, 0xE6, + 0xA4, 0x94, 0x43, 0xE6, 0xA5, 0x82, 0x43, 0xE6, + 0xA6, 0xA3, 0x43, 0xE6, 0xA7, 0xAA, 0x43, 0xE6, + 0xA8, 0x82, 0x43, 0xE6, 0xA8, 0x93, 0x43, 0xE6, + // Bytes d80 - dbf + 0xAA, 0xA8, 0x43, 0xE6, 0xAB, 0x93, 0x43, 0xE6, + 0xAB, 0x9B, 0x43, 0xE6, 0xAC, 0x84, 0x43, 0xE6, + 0xAC, 0xA0, 0x43, 0xE6, 0xAC, 0xA1, 0x43, 0xE6, + 0xAD, 0x94, 0x43, 0xE6, 0xAD, 0xA2, 0x43, 0xE6, + 0xAD, 0xA3, 0x43, 0xE6, 0xAD, 0xB2, 0x43, 0xE6, + 0xAD, 0xB7, 0x43, 0xE6, 0xAD, 0xB9, 0x43, 0xE6, + 0xAE, 0x9F, 0x43, 0xE6, 0xAE, 0xAE, 0x43, 0xE6, + 0xAE, 0xB3, 0x43, 0xE6, 0xAE, 0xBA, 0x43, 0xE6, + // Bytes dc0 - dff + 0xAE, 0xBB, 0x43, 0xE6, 0xAF, 0x8B, 0x43, 0xE6, + 0xAF, 0x8D, 0x43, 0xE6, 0xAF, 0x94, 0x43, 0xE6, + 0xAF, 0x9B, 0x43, 0xE6, 0xB0, 0x8F, 0x43, 0xE6, + 0xB0, 0x94, 0x43, 0xE6, 0xB0, 0xB4, 0x43, 0xE6, + 0xB1, 0x8E, 0x43, 0xE6, 0xB1, 0xA7, 0x43, 0xE6, + 0xB2, 0x88, 0x43, 0xE6, 0xB2, 0xBF, 0x43, 0xE6, + 0xB3, 0x8C, 0x43, 0xE6, 0xB3, 0x8D, 0x43, 0xE6, + 0xB3, 0xA5, 0x43, 0xE6, 0xB3, 0xA8, 0x43, 0xE6, + // Bytes e00 - e3f + 0xB4, 0x96, 0x43, 0xE6, 0xB4, 0x9B, 0x43, 0xE6, + 0xB4, 0x9E, 0x43, 0xE6, 0xB4, 0xB4, 0x43, 0xE6, + 0xB4, 0xBE, 0x43, 0xE6, 0xB5, 0x81, 0x43, 0xE6, + 0xB5, 0xA9, 0x43, 0xE6, 0xB5, 0xAA, 0x43, 0xE6, + 0xB5, 0xB7, 0x43, 0xE6, 0xB5, 0xB8, 0x43, 0xE6, + 0xB6, 0x85, 0x43, 0xE6, 0xB7, 0x8B, 0x43, 0xE6, + 0xB7, 0x9A, 0x43, 0xE6, 0xB7, 0xAA, 0x43, 0xE6, + 0xB7, 0xB9, 0x43, 0xE6, 0xB8, 0x9A, 0x43, 0xE6, + // Bytes e40 - e7f + 0xB8, 0xAF, 0x43, 0xE6, 0xB9, 0xAE, 0x43, 0xE6, + 0xBA, 0x80, 0x43, 0xE6, 0xBA, 0x9C, 0x43, 0xE6, + 0xBA, 0xBA, 0x43, 0xE6, 0xBB, 0x87, 0x43, 0xE6, + 0xBB, 0x8B, 0x43, 0xE6, 0xBB, 0x91, 0x43, 0xE6, + 0xBB, 0x9B, 0x43, 0xE6, 0xBC, 0x8F, 0x43, 0xE6, + 0xBC, 0x94, 0x43, 0xE6, 0xBC, 0xA2, 0x43, 0xE6, + 0xBC, 0xA3, 0x43, 0xE6, 0xBD, 0xAE, 0x43, 0xE6, + 0xBF, 0x86, 0x43, 0xE6, 0xBF, 0xAB, 0x43, 0xE6, + // Bytes e80 - ebf + 0xBF, 0xBE, 0x43, 0xE7, 0x80, 0x9B, 0x43, 0xE7, + 0x80, 0x9E, 0x43, 0xE7, 0x80, 0xB9, 0x43, 0xE7, + 0x81, 0x8A, 0x43, 0xE7, 0x81, 0xAB, 0x43, 0xE7, + 0x81, 0xB0, 0x43, 0xE7, 0x81, 0xB7, 0x43, 0xE7, + 0x81, 0xBD, 0x43, 0xE7, 0x82, 0x99, 0x43, 0xE7, + 0x82, 0xAD, 0x43, 0xE7, 0x83, 0x88, 0x43, 0xE7, + 0x83, 0x99, 0x43, 0xE7, 0x84, 0xA1, 0x43, 0xE7, + 0x85, 0x85, 0x43, 0xE7, 0x85, 0x89, 0x43, 0xE7, + // Bytes ec0 - eff + 0x85, 0xAE, 0x43, 0xE7, 0x86, 0x9C, 0x43, 0xE7, + 0x87, 0x8E, 0x43, 0xE7, 0x87, 0x90, 0x43, 0xE7, + 0x88, 0x90, 0x43, 0xE7, 0x88, 0x9B, 0x43, 0xE7, + 0x88, 0xA8, 0x43, 0xE7, 0x88, 0xAA, 0x43, 0xE7, + 0x88, 0xAB, 0x43, 0xE7, 0x88, 0xB5, 0x43, 0xE7, + 0x88, 0xB6, 0x43, 0xE7, 0x88, 0xBB, 0x43, 0xE7, + 0x88, 0xBF, 0x43, 0xE7, 0x89, 0x87, 0x43, 0xE7, + 0x89, 0x90, 0x43, 0xE7, 0x89, 0x99, 0x43, 0xE7, + // Bytes f00 - f3f + 0x89, 0x9B, 0x43, 0xE7, 0x89, 0xA2, 0x43, 0xE7, + 0x89, 0xB9, 0x43, 0xE7, 0x8A, 0x80, 0x43, 0xE7, + 0x8A, 0x95, 0x43, 0xE7, 0x8A, 0xAC, 0x43, 0xE7, + 0x8A, 0xAF, 0x43, 0xE7, 0x8B, 0x80, 0x43, 0xE7, + 0x8B, 0xBC, 0x43, 0xE7, 0x8C, 0xAA, 0x43, 0xE7, + 0x8D, 0xB5, 0x43, 0xE7, 0x8D, 0xBA, 0x43, 0xE7, + 0x8E, 0x84, 0x43, 0xE7, 0x8E, 0x87, 0x43, 0xE7, + 0x8E, 0x89, 0x43, 0xE7, 0x8E, 0x8B, 0x43, 0xE7, + // Bytes f40 - f7f + 0x8E, 0xA5, 0x43, 0xE7, 0x8E, 0xB2, 0x43, 0xE7, + 0x8F, 0x9E, 0x43, 0xE7, 0x90, 0x86, 0x43, 0xE7, + 0x90, 0x89, 0x43, 0xE7, 0x90, 0xA2, 0x43, 0xE7, + 0x91, 0x87, 0x43, 0xE7, 0x91, 0x9C, 0x43, 0xE7, + 0x91, 0xA9, 0x43, 0xE7, 0x91, 0xB1, 0x43, 0xE7, + 0x92, 0x85, 0x43, 0xE7, 0x92, 0x89, 0x43, 0xE7, + 0x92, 0x98, 0x43, 0xE7, 0x93, 0x8A, 0x43, 0xE7, + 0x93, 0x9C, 0x43, 0xE7, 0x93, 0xA6, 0x43, 0xE7, + // Bytes f80 - fbf + 0x94, 0x86, 0x43, 0xE7, 0x94, 0x98, 0x43, 0xE7, + 0x94, 0x9F, 0x43, 0xE7, 0x94, 0xA4, 0x43, 0xE7, + 0x94, 0xA8, 0x43, 0xE7, 0x94, 0xB0, 0x43, 0xE7, + 0x94, 0xB2, 0x43, 0xE7, 0x94, 0xB3, 0x43, 0xE7, + 0x94, 0xB7, 0x43, 0xE7, 0x94, 0xBB, 0x43, 0xE7, + 0x94, 0xBE, 0x43, 0xE7, 0x95, 0x99, 0x43, 0xE7, + 0x95, 0xA5, 0x43, 0xE7, 0x95, 0xB0, 0x43, 0xE7, + 0x96, 0x8B, 0x43, 0xE7, 0x96, 0x92, 0x43, 0xE7, + // Bytes fc0 - fff + 0x97, 0xA2, 0x43, 0xE7, 0x98, 0x90, 0x43, 0xE7, + 0x98, 0x9D, 0x43, 0xE7, 0x98, 0x9F, 0x43, 0xE7, + 0x99, 0x82, 0x43, 0xE7, 0x99, 0xA9, 0x43, 0xE7, + 0x99, 0xB6, 0x43, 0xE7, 0x99, 0xBD, 0x43, 0xE7, + 0x9A, 0xAE, 0x43, 0xE7, 0x9A, 0xBF, 0x43, 0xE7, + 0x9B, 0x8A, 0x43, 0xE7, 0x9B, 0x9B, 0x43, 0xE7, + 0x9B, 0xA3, 0x43, 0xE7, 0x9B, 0xA7, 0x43, 0xE7, + 0x9B, 0xAE, 0x43, 0xE7, 0x9B, 0xB4, 0x43, 0xE7, + // Bytes 1000 - 103f + 0x9C, 0x81, 0x43, 0xE7, 0x9C, 0x9E, 0x43, 0xE7, + 0x9C, 0x9F, 0x43, 0xE7, 0x9D, 0x80, 0x43, 0xE7, + 0x9D, 0x8A, 0x43, 0xE7, 0x9E, 0x8B, 0x43, 0xE7, + 0x9E, 0xA7, 0x43, 0xE7, 0x9F, 0x9B, 0x43, 0xE7, + 0x9F, 0xA2, 0x43, 0xE7, 0x9F, 0xB3, 0x43, 0xE7, + 0xA1, 0x8E, 0x43, 0xE7, 0xA1, 0xAB, 0x43, 0xE7, + 0xA2, 0x8C, 0x43, 0xE7, 0xA2, 0x91, 0x43, 0xE7, + 0xA3, 0x8A, 0x43, 0xE7, 0xA3, 0x8C, 0x43, 0xE7, + // Bytes 1040 - 107f + 0xA3, 0xBB, 0x43, 0xE7, 0xA4, 0xAA, 0x43, 0xE7, + 0xA4, 0xBA, 0x43, 0xE7, 0xA4, 0xBC, 0x43, 0xE7, + 0xA4, 0xBE, 0x43, 0xE7, 0xA5, 0x88, 0x43, 0xE7, + 0xA5, 0x89, 0x43, 0xE7, 0xA5, 0x90, 0x43, 0xE7, + 0xA5, 0x96, 0x43, 0xE7, 0xA5, 0x9D, 0x43, 0xE7, + 0xA5, 0x9E, 0x43, 0xE7, 0xA5, 0xA5, 0x43, 0xE7, + 0xA5, 0xBF, 0x43, 0xE7, 0xA6, 0x81, 0x43, 0xE7, + 0xA6, 0x8D, 0x43, 0xE7, 0xA6, 0x8E, 0x43, 0xE7, + // Bytes 1080 - 10bf + 0xA6, 0x8F, 0x43, 0xE7, 0xA6, 0xAE, 0x43, 0xE7, + 0xA6, 0xB8, 0x43, 0xE7, 0xA6, 0xBE, 0x43, 0xE7, + 0xA7, 0x8A, 0x43, 0xE7, 0xA7, 0x98, 0x43, 0xE7, + 0xA7, 0xAB, 0x43, 0xE7, 0xA8, 0x9C, 0x43, 0xE7, + 0xA9, 0x80, 0x43, 0xE7, 0xA9, 0x8A, 0x43, 0xE7, + 0xA9, 0x8F, 0x43, 0xE7, 0xA9, 0xB4, 0x43, 0xE7, + 0xA9, 0xBA, 0x43, 0xE7, 0xAA, 0x81, 0x43, 0xE7, + 0xAA, 0xB1, 0x43, 0xE7, 0xAB, 0x8B, 0x43, 0xE7, + // Bytes 10c0 - 10ff + 0xAB, 0xAE, 0x43, 0xE7, 0xAB, 0xB9, 0x43, 0xE7, + 0xAC, 0xA0, 0x43, 0xE7, 0xAE, 0x8F, 0x43, 0xE7, + 0xAF, 0x80, 0x43, 0xE7, 0xAF, 0x86, 0x43, 0xE7, + 0xAF, 0x89, 0x43, 0xE7, 0xB0, 0xBE, 0x43, 0xE7, + 0xB1, 0xA0, 0x43, 0xE7, 0xB1, 0xB3, 0x43, 0xE7, + 0xB1, 0xBB, 0x43, 0xE7, 0xB2, 0x92, 0x43, 0xE7, + 0xB2, 0xBE, 0x43, 0xE7, 0xB3, 0x92, 0x43, 0xE7, + 0xB3, 0x96, 0x43, 0xE7, 0xB3, 0xA3, 0x43, 0xE7, + // Bytes 1100 - 113f + 0xB3, 0xA7, 0x43, 0xE7, 0xB3, 0xA8, 0x43, 0xE7, + 0xB3, 0xB8, 0x43, 0xE7, 0xB4, 0x80, 0x43, 0xE7, + 0xB4, 0x90, 0x43, 0xE7, 0xB4, 0xA2, 0x43, 0xE7, + 0xB4, 0xAF, 0x43, 0xE7, 0xB5, 0x82, 0x43, 0xE7, + 0xB5, 0x9B, 0x43, 0xE7, 0xB5, 0xA3, 0x43, 0xE7, + 0xB6, 0xA0, 0x43, 0xE7, 0xB6, 0xBE, 0x43, 0xE7, + 0xB7, 0x87, 0x43, 0xE7, 0xB7, 0xB4, 0x43, 0xE7, + 0xB8, 0x82, 0x43, 0xE7, 0xB8, 0x89, 0x43, 0xE7, + // Bytes 1140 - 117f + 0xB8, 0xB7, 0x43, 0xE7, 0xB9, 0x81, 0x43, 0xE7, + 0xB9, 0x85, 0x43, 0xE7, 0xBC, 0xB6, 0x43, 0xE7, + 0xBC, 0xBE, 0x43, 0xE7, 0xBD, 0x91, 0x43, 0xE7, + 0xBD, 0xB2, 0x43, 0xE7, 0xBD, 0xB9, 0x43, 0xE7, + 0xBD, 0xBA, 0x43, 0xE7, 0xBE, 0x85, 0x43, 0xE7, + 0xBE, 0x8A, 0x43, 0xE7, 0xBE, 0x95, 0x43, 0xE7, + 0xBE, 0x9A, 0x43, 0xE7, 0xBE, 0xBD, 0x43, 0xE7, + 0xBF, 0xBA, 0x43, 0xE8, 0x80, 0x81, 0x43, 0xE8, + // Bytes 1180 - 11bf + 0x80, 0x85, 0x43, 0xE8, 0x80, 0x8C, 0x43, 0xE8, + 0x80, 0x92, 0x43, 0xE8, 0x80, 0xB3, 0x43, 0xE8, + 0x81, 0x86, 0x43, 0xE8, 0x81, 0xA0, 0x43, 0xE8, + 0x81, 0xAF, 0x43, 0xE8, 0x81, 0xB0, 0x43, 0xE8, + 0x81, 0xBE, 0x43, 0xE8, 0x81, 0xBF, 0x43, 0xE8, + 0x82, 0x89, 0x43, 0xE8, 0x82, 0x8B, 0x43, 0xE8, + 0x82, 0xAD, 0x43, 0xE8, 0x82, 0xB2, 0x43, 0xE8, + 0x84, 0x83, 0x43, 0xE8, 0x84, 0xBE, 0x43, 0xE8, + // Bytes 11c0 - 11ff + 0x87, 0x98, 0x43, 0xE8, 0x87, 0xA3, 0x43, 0xE8, + 0x87, 0xA8, 0x43, 0xE8, 0x87, 0xAA, 0x43, 0xE8, + 0x87, 0xAD, 0x43, 0xE8, 0x87, 0xB3, 0x43, 0xE8, + 0x87, 0xBC, 0x43, 0xE8, 0x88, 0x81, 0x43, 0xE8, + 0x88, 0x84, 0x43, 0xE8, 0x88, 0x8C, 0x43, 0xE8, + 0x88, 0x98, 0x43, 0xE8, 0x88, 0x9B, 0x43, 0xE8, + 0x88, 0x9F, 0x43, 0xE8, 0x89, 0xAE, 0x43, 0xE8, + 0x89, 0xAF, 0x43, 0xE8, 0x89, 0xB2, 0x43, 0xE8, + // Bytes 1200 - 123f + 0x89, 0xB8, 0x43, 0xE8, 0x89, 0xB9, 0x43, 0xE8, + 0x8A, 0x8B, 0x43, 0xE8, 0x8A, 0x91, 0x43, 0xE8, + 0x8A, 0x9D, 0x43, 0xE8, 0x8A, 0xB1, 0x43, 0xE8, + 0x8A, 0xB3, 0x43, 0xE8, 0x8A, 0xBD, 0x43, 0xE8, + 0x8B, 0xA5, 0x43, 0xE8, 0x8B, 0xA6, 0x43, 0xE8, + 0x8C, 0x9D, 0x43, 0xE8, 0x8C, 0xA3, 0x43, 0xE8, + 0x8C, 0xB6, 0x43, 0xE8, 0x8D, 0x92, 0x43, 0xE8, + 0x8D, 0x93, 0x43, 0xE8, 0x8D, 0xA3, 0x43, 0xE8, + // Bytes 1240 - 127f + 0x8E, 0xAD, 0x43, 0xE8, 0x8E, 0xBD, 0x43, 0xE8, + 0x8F, 0x89, 0x43, 0xE8, 0x8F, 0x8A, 0x43, 0xE8, + 0x8F, 0x8C, 0x43, 0xE8, 0x8F, 0x9C, 0x43, 0xE8, + 0x8F, 0xA7, 0x43, 0xE8, 0x8F, 0xAF, 0x43, 0xE8, + 0x8F, 0xB1, 0x43, 0xE8, 0x90, 0xBD, 0x43, 0xE8, + 0x91, 0x89, 0x43, 0xE8, 0x91, 0x97, 0x43, 0xE8, + 0x93, 0xAE, 0x43, 0xE8, 0x93, 0xB1, 0x43, 0xE8, + 0x93, 0xB3, 0x43, 0xE8, 0x93, 0xBC, 0x43, 0xE8, + // Bytes 1280 - 12bf + 0x94, 0x96, 0x43, 0xE8, 0x95, 0xA4, 0x43, 0xE8, + 0x97, 0x8D, 0x43, 0xE8, 0x97, 0xBA, 0x43, 0xE8, + 0x98, 0x86, 0x43, 0xE8, 0x98, 0x92, 0x43, 0xE8, + 0x98, 0xAD, 0x43, 0xE8, 0x98, 0xBF, 0x43, 0xE8, + 0x99, 0x8D, 0x43, 0xE8, 0x99, 0x90, 0x43, 0xE8, + 0x99, 0x9C, 0x43, 0xE8, 0x99, 0xA7, 0x43, 0xE8, + 0x99, 0xA9, 0x43, 0xE8, 0x99, 0xAB, 0x43, 0xE8, + 0x9A, 0x88, 0x43, 0xE8, 0x9A, 0xA9, 0x43, 0xE8, + // Bytes 12c0 - 12ff + 0x9B, 0xA2, 0x43, 0xE8, 0x9C, 0x8E, 0x43, 0xE8, + 0x9C, 0xA8, 0x43, 0xE8, 0x9D, 0xAB, 0x43, 0xE8, + 0x9D, 0xB9, 0x43, 0xE8, 0x9E, 0x86, 0x43, 0xE8, + 0x9E, 0xBA, 0x43, 0xE8, 0x9F, 0xA1, 0x43, 0xE8, + 0xA0, 0x81, 0x43, 0xE8, 0xA0, 0x9F, 0x43, 0xE8, + 0xA1, 0x80, 0x43, 0xE8, 0xA1, 0x8C, 0x43, 0xE8, + 0xA1, 0xA0, 0x43, 0xE8, 0xA1, 0xA3, 0x43, 0xE8, + 0xA3, 0x82, 0x43, 0xE8, 0xA3, 0x8F, 0x43, 0xE8, + // Bytes 1300 - 133f + 0xA3, 0x97, 0x43, 0xE8, 0xA3, 0x9E, 0x43, 0xE8, + 0xA3, 0xA1, 0x43, 0xE8, 0xA3, 0xB8, 0x43, 0xE8, + 0xA3, 0xBA, 0x43, 0xE8, 0xA4, 0x90, 0x43, 0xE8, + 0xA5, 0x81, 0x43, 0xE8, 0xA5, 0xA4, 0x43, 0xE8, + 0xA5, 0xBE, 0x43, 0xE8, 0xA6, 0x86, 0x43, 0xE8, + 0xA6, 0x8B, 0x43, 0xE8, 0xA6, 0x96, 0x43, 0xE8, + 0xA7, 0x92, 0x43, 0xE8, 0xA7, 0xA3, 0x43, 0xE8, + 0xA8, 0x80, 0x43, 0xE8, 0xAA, 0xA0, 0x43, 0xE8, + // Bytes 1340 - 137f + 0xAA, 0xAA, 0x43, 0xE8, 0xAA, 0xBF, 0x43, 0xE8, + 0xAB, 0x8B, 0x43, 0xE8, 0xAB, 0x92, 0x43, 0xE8, + 0xAB, 0x96, 0x43, 0xE8, 0xAB, 0xAD, 0x43, 0xE8, + 0xAB, 0xB8, 0x43, 0xE8, 0xAB, 0xBE, 0x43, 0xE8, + 0xAC, 0x81, 0x43, 0xE8, 0xAC, 0xB9, 0x43, 0xE8, + 0xAD, 0x98, 0x43, 0xE8, 0xAE, 0x80, 0x43, 0xE8, + 0xAE, 0x8A, 0x43, 0xE8, 0xB0, 0xB7, 0x43, 0xE8, + 0xB1, 0x86, 0x43, 0xE8, 0xB1, 0x88, 0x43, 0xE8, + // Bytes 1380 - 13bf + 0xB1, 0x95, 0x43, 0xE8, 0xB1, 0xB8, 0x43, 0xE8, + 0xB2, 0x9D, 0x43, 0xE8, 0xB2, 0xA1, 0x43, 0xE8, + 0xB2, 0xA9, 0x43, 0xE8, 0xB2, 0xAB, 0x43, 0xE8, + 0xB3, 0x81, 0x43, 0xE8, 0xB3, 0x82, 0x43, 0xE8, + 0xB3, 0x87, 0x43, 0xE8, 0xB3, 0x88, 0x43, 0xE8, + 0xB3, 0x93, 0x43, 0xE8, 0xB4, 0x88, 0x43, 0xE8, + 0xB4, 0x9B, 0x43, 0xE8, 0xB5, 0xA4, 0x43, 0xE8, + 0xB5, 0xB0, 0x43, 0xE8, 0xB5, 0xB7, 0x43, 0xE8, + // Bytes 13c0 - 13ff + 0xB6, 0xB3, 0x43, 0xE8, 0xB6, 0xBC, 0x43, 0xE8, + 0xB7, 0x8B, 0x43, 0xE8, 0xB7, 0xAF, 0x43, 0xE8, + 0xB7, 0xB0, 0x43, 0xE8, 0xBA, 0xAB, 0x43, 0xE8, + 0xBB, 0x8A, 0x43, 0xE8, 0xBB, 0x94, 0x43, 0xE8, + 0xBC, 0xA6, 0x43, 0xE8, 0xBC, 0xAA, 0x43, 0xE8, + 0xBC, 0xB8, 0x43, 0xE8, 0xBC, 0xBB, 0x43, 0xE8, + 0xBD, 0xA2, 0x43, 0xE8, 0xBE, 0x9B, 0x43, 0xE8, + 0xBE, 0x9E, 0x43, 0xE8, 0xBE, 0xB0, 0x43, 0xE8, + // Bytes 1400 - 143f + 0xBE, 0xB5, 0x43, 0xE8, 0xBE, 0xB6, 0x43, 0xE9, + 0x80, 0xA3, 0x43, 0xE9, 0x80, 0xB8, 0x43, 0xE9, + 0x81, 0x8A, 0x43, 0xE9, 0x81, 0xA9, 0x43, 0xE9, + 0x81, 0xB2, 0x43, 0xE9, 0x81, 0xBC, 0x43, 0xE9, + 0x82, 0x8F, 0x43, 0xE9, 0x82, 0x91, 0x43, 0xE9, + 0x82, 0x94, 0x43, 0xE9, 0x83, 0x8E, 0x43, 0xE9, + 0x83, 0x9E, 0x43, 0xE9, 0x83, 0xB1, 0x43, 0xE9, + 0x83, 0xBD, 0x43, 0xE9, 0x84, 0x91, 0x43, 0xE9, + // Bytes 1440 - 147f + 0x84, 0x9B, 0x43, 0xE9, 0x85, 0x89, 0x43, 0xE9, + 0x85, 0x8D, 0x43, 0xE9, 0x85, 0xAA, 0x43, 0xE9, + 0x86, 0x99, 0x43, 0xE9, 0x86, 0xB4, 0x43, 0xE9, + 0x87, 0x86, 0x43, 0xE9, 0x87, 0x8C, 0x43, 0xE9, + 0x87, 0x8F, 0x43, 0xE9, 0x87, 0x91, 0x43, 0xE9, + 0x88, 0xB4, 0x43, 0xE9, 0x88, 0xB8, 0x43, 0xE9, + 0x89, 0xB6, 0x43, 0xE9, 0x89, 0xBC, 0x43, 0xE9, + 0x8B, 0x97, 0x43, 0xE9, 0x8B, 0x98, 0x43, 0xE9, + // Bytes 1480 - 14bf + 0x8C, 0x84, 0x43, 0xE9, 0x8D, 0x8A, 0x43, 0xE9, + 0x8F, 0xB9, 0x43, 0xE9, 0x90, 0x95, 0x43, 0xE9, + 0x95, 0xB7, 0x43, 0xE9, 0x96, 0x80, 0x43, 0xE9, + 0x96, 0x8B, 0x43, 0xE9, 0x96, 0xAD, 0x43, 0xE9, + 0x96, 0xB7, 0x43, 0xE9, 0x98, 0x9C, 0x43, 0xE9, + 0x98, 0xAE, 0x43, 0xE9, 0x99, 0x8B, 0x43, 0xE9, + 0x99, 0x8D, 0x43, 0xE9, 0x99, 0xB5, 0x43, 0xE9, + 0x99, 0xB8, 0x43, 0xE9, 0x99, 0xBC, 0x43, 0xE9, + // Bytes 14c0 - 14ff + 0x9A, 0x86, 0x43, 0xE9, 0x9A, 0xA3, 0x43, 0xE9, + 0x9A, 0xB6, 0x43, 0xE9, 0x9A, 0xB7, 0x43, 0xE9, + 0x9A, 0xB8, 0x43, 0xE9, 0x9A, 0xB9, 0x43, 0xE9, + 0x9B, 0x83, 0x43, 0xE9, 0x9B, 0xA2, 0x43, 0xE9, + 0x9B, 0xA3, 0x43, 0xE9, 0x9B, 0xA8, 0x43, 0xE9, + 0x9B, 0xB6, 0x43, 0xE9, 0x9B, 0xB7, 0x43, 0xE9, + 0x9C, 0xA3, 0x43, 0xE9, 0x9C, 0xB2, 0x43, 0xE9, + 0x9D, 0x88, 0x43, 0xE9, 0x9D, 0x91, 0x43, 0xE9, + // Bytes 1500 - 153f + 0x9D, 0x96, 0x43, 0xE9, 0x9D, 0x9E, 0x43, 0xE9, + 0x9D, 0xA2, 0x43, 0xE9, 0x9D, 0xA9, 0x43, 0xE9, + 0x9F, 0x8B, 0x43, 0xE9, 0x9F, 0x9B, 0x43, 0xE9, + 0x9F, 0xA0, 0x43, 0xE9, 0x9F, 0xAD, 0x43, 0xE9, + 0x9F, 0xB3, 0x43, 0xE9, 0x9F, 0xBF, 0x43, 0xE9, + 0xA0, 0x81, 0x43, 0xE9, 0xA0, 0x85, 0x43, 0xE9, + 0xA0, 0x8B, 0x43, 0xE9, 0xA0, 0x98, 0x43, 0xE9, + 0xA0, 0xA9, 0x43, 0xE9, 0xA0, 0xBB, 0x43, 0xE9, + // Bytes 1540 - 157f + 0xA1, 0x9E, 0x43, 0xE9, 0xA2, 0xA8, 0x43, 0xE9, + 0xA3, 0x9B, 0x43, 0xE9, 0xA3, 0x9F, 0x43, 0xE9, + 0xA3, 0xA2, 0x43, 0xE9, 0xA3, 0xAF, 0x43, 0xE9, + 0xA3, 0xBC, 0x43, 0xE9, 0xA4, 0xA8, 0x43, 0xE9, + 0xA4, 0xA9, 0x43, 0xE9, 0xA6, 0x96, 0x43, 0xE9, + 0xA6, 0x99, 0x43, 0xE9, 0xA6, 0xA7, 0x43, 0xE9, + 0xA6, 0xAC, 0x43, 0xE9, 0xA7, 0x82, 0x43, 0xE9, + 0xA7, 0xB1, 0x43, 0xE9, 0xA7, 0xBE, 0x43, 0xE9, + // Bytes 1580 - 15bf + 0xA9, 0xAA, 0x43, 0xE9, 0xAA, 0xA8, 0x43, 0xE9, + 0xAB, 0x98, 0x43, 0xE9, 0xAB, 0x9F, 0x43, 0xE9, + 0xAC, 0x92, 0x43, 0xE9, 0xAC, 0xA5, 0x43, 0xE9, + 0xAC, 0xAF, 0x43, 0xE9, 0xAC, 0xB2, 0x43, 0xE9, + 0xAC, 0xBC, 0x43, 0xE9, 0xAD, 0x9A, 0x43, 0xE9, + 0xAD, 0xAF, 0x43, 0xE9, 0xB1, 0x80, 0x43, 0xE9, + 0xB1, 0x97, 0x43, 0xE9, 0xB3, 0xA5, 0x43, 0xE9, + 0xB3, 0xBD, 0x43, 0xE9, 0xB5, 0xA7, 0x43, 0xE9, + // Bytes 15c0 - 15ff + 0xB6, 0xB4, 0x43, 0xE9, 0xB7, 0xBA, 0x43, 0xE9, + 0xB8, 0x9E, 0x43, 0xE9, 0xB9, 0xB5, 0x43, 0xE9, + 0xB9, 0xBF, 0x43, 0xE9, 0xBA, 0x97, 0x43, 0xE9, + 0xBA, 0x9F, 0x43, 0xE9, 0xBA, 0xA5, 0x43, 0xE9, + 0xBA, 0xBB, 0x43, 0xE9, 0xBB, 0x83, 0x43, 0xE9, + 0xBB, 0x8D, 0x43, 0xE9, 0xBB, 0x8E, 0x43, 0xE9, + 0xBB, 0x91, 0x43, 0xE9, 0xBB, 0xB9, 0x43, 0xE9, + 0xBB, 0xBD, 0x43, 0xE9, 0xBB, 0xBE, 0x43, 0xE9, + // Bytes 1600 - 163f + 0xBC, 0x85, 0x43, 0xE9, 0xBC, 0x8E, 0x43, 0xE9, + 0xBC, 0x8F, 0x43, 0xE9, 0xBC, 0x93, 0x43, 0xE9, + 0xBC, 0x96, 0x43, 0xE9, 0xBC, 0xA0, 0x43, 0xE9, + 0xBC, 0xBB, 0x43, 0xE9, 0xBD, 0x83, 0x43, 0xE9, + 0xBD, 0x8A, 0x43, 0xE9, 0xBD, 0x92, 0x43, 0xE9, + 0xBE, 0x8D, 0x43, 0xE9, 0xBE, 0x8E, 0x43, 0xE9, + 0xBE, 0x9C, 0x43, 0xE9, 0xBE, 0x9F, 0x43, 0xE9, + 0xBE, 0xA0, 0x43, 0xEA, 0x9C, 0xA7, 0x43, 0xEA, + // Bytes 1640 - 167f + 0x9D, 0xAF, 0x43, 0xEA, 0xAC, 0xB7, 0x43, 0xEA, + 0xAD, 0x92, 0x44, 0xF0, 0xA0, 0x84, 0xA2, 0x44, + 0xF0, 0xA0, 0x94, 0x9C, 0x44, 0xF0, 0xA0, 0x94, + 0xA5, 0x44, 0xF0, 0xA0, 0x95, 0x8B, 0x44, 0xF0, + 0xA0, 0x98, 0xBA, 0x44, 0xF0, 0xA0, 0xA0, 0x84, + 0x44, 0xF0, 0xA0, 0xA3, 0x9E, 0x44, 0xF0, 0xA0, + 0xA8, 0xAC, 0x44, 0xF0, 0xA0, 0xAD, 0xA3, 0x44, + 0xF0, 0xA1, 0x93, 0xA4, 0x44, 0xF0, 0xA1, 0x9A, + // Bytes 1680 - 16bf + 0xA8, 0x44, 0xF0, 0xA1, 0x9B, 0xAA, 0x44, 0xF0, + 0xA1, 0xA7, 0x88, 0x44, 0xF0, 0xA1, 0xAC, 0x98, + 0x44, 0xF0, 0xA1, 0xB4, 0x8B, 0x44, 0xF0, 0xA1, + 0xB7, 0xA4, 0x44, 0xF0, 0xA1, 0xB7, 0xA6, 0x44, + 0xF0, 0xA2, 0x86, 0x83, 0x44, 0xF0, 0xA2, 0x86, + 0x9F, 0x44, 0xF0, 0xA2, 0x8C, 0xB1, 0x44, 0xF0, + 0xA2, 0x9B, 0x94, 0x44, 0xF0, 0xA2, 0xA1, 0x84, + 0x44, 0xF0, 0xA2, 0xA1, 0x8A, 0x44, 0xF0, 0xA2, + // Bytes 16c0 - 16ff + 0xAC, 0x8C, 0x44, 0xF0, 0xA2, 0xAF, 0xB1, 0x44, + 0xF0, 0xA3, 0x80, 0x8A, 0x44, 0xF0, 0xA3, 0x8A, + 0xB8, 0x44, 0xF0, 0xA3, 0x8D, 0x9F, 0x44, 0xF0, + 0xA3, 0x8E, 0x93, 0x44, 0xF0, 0xA3, 0x8E, 0x9C, + 0x44, 0xF0, 0xA3, 0x8F, 0x83, 0x44, 0xF0, 0xA3, + 0x8F, 0x95, 0x44, 0xF0, 0xA3, 0x91, 0xAD, 0x44, + 0xF0, 0xA3, 0x9A, 0xA3, 0x44, 0xF0, 0xA3, 0xA2, + 0xA7, 0x44, 0xF0, 0xA3, 0xAA, 0x8D, 0x44, 0xF0, + // Bytes 1700 - 173f + 0xA3, 0xAB, 0xBA, 0x44, 0xF0, 0xA3, 0xB2, 0xBC, + 0x44, 0xF0, 0xA3, 0xB4, 0x9E, 0x44, 0xF0, 0xA3, + 0xBB, 0x91, 0x44, 0xF0, 0xA3, 0xBD, 0x9E, 0x44, + 0xF0, 0xA3, 0xBE, 0x8E, 0x44, 0xF0, 0xA4, 0x89, + 0xA3, 0x44, 0xF0, 0xA4, 0x8B, 0xAE, 0x44, 0xF0, + 0xA4, 0x8E, 0xAB, 0x44, 0xF0, 0xA4, 0x98, 0x88, + 0x44, 0xF0, 0xA4, 0x9C, 0xB5, 0x44, 0xF0, 0xA4, + 0xA0, 0x94, 0x44, 0xF0, 0xA4, 0xB0, 0xB6, 0x44, + // Bytes 1740 - 177f + 0xF0, 0xA4, 0xB2, 0x92, 0x44, 0xF0, 0xA4, 0xBE, + 0xA1, 0x44, 0xF0, 0xA4, 0xBE, 0xB8, 0x44, 0xF0, + 0xA5, 0x81, 0x84, 0x44, 0xF0, 0xA5, 0x83, 0xB2, + 0x44, 0xF0, 0xA5, 0x83, 0xB3, 0x44, 0xF0, 0xA5, + 0x84, 0x99, 0x44, 0xF0, 0xA5, 0x84, 0xB3, 0x44, + 0xF0, 0xA5, 0x89, 0x89, 0x44, 0xF0, 0xA5, 0x90, + 0x9D, 0x44, 0xF0, 0xA5, 0x98, 0xA6, 0x44, 0xF0, + 0xA5, 0x9A, 0x9A, 0x44, 0xF0, 0xA5, 0x9B, 0x85, + // Bytes 1780 - 17bf + 0x44, 0xF0, 0xA5, 0xA5, 0xBC, 0x44, 0xF0, 0xA5, + 0xAA, 0xA7, 0x44, 0xF0, 0xA5, 0xAE, 0xAB, 0x44, + 0xF0, 0xA5, 0xB2, 0x80, 0x44, 0xF0, 0xA5, 0xB3, + 0x90, 0x44, 0xF0, 0xA5, 0xBE, 0x86, 0x44, 0xF0, + 0xA6, 0x87, 0x9A, 0x44, 0xF0, 0xA6, 0x88, 0xA8, + 0x44, 0xF0, 0xA6, 0x89, 0x87, 0x44, 0xF0, 0xA6, + 0x8B, 0x99, 0x44, 0xF0, 0xA6, 0x8C, 0xBE, 0x44, + 0xF0, 0xA6, 0x93, 0x9A, 0x44, 0xF0, 0xA6, 0x94, + // Bytes 17c0 - 17ff + 0xA3, 0x44, 0xF0, 0xA6, 0x96, 0xA8, 0x44, 0xF0, + 0xA6, 0x9E, 0xA7, 0x44, 0xF0, 0xA6, 0x9E, 0xB5, + 0x44, 0xF0, 0xA6, 0xAC, 0xBC, 0x44, 0xF0, 0xA6, + 0xB0, 0xB6, 0x44, 0xF0, 0xA6, 0xB3, 0x95, 0x44, + 0xF0, 0xA6, 0xB5, 0xAB, 0x44, 0xF0, 0xA6, 0xBC, + 0xAC, 0x44, 0xF0, 0xA6, 0xBE, 0xB1, 0x44, 0xF0, + 0xA7, 0x83, 0x92, 0x44, 0xF0, 0xA7, 0x8F, 0x8A, + 0x44, 0xF0, 0xA7, 0x99, 0xA7, 0x44, 0xF0, 0xA7, + // Bytes 1800 - 183f + 0xA2, 0xAE, 0x44, 0xF0, 0xA7, 0xA5, 0xA6, 0x44, + 0xF0, 0xA7, 0xB2, 0xA8, 0x44, 0xF0, 0xA7, 0xBB, + 0x93, 0x44, 0xF0, 0xA7, 0xBC, 0xAF, 0x44, 0xF0, + 0xA8, 0x97, 0x92, 0x44, 0xF0, 0xA8, 0x97, 0xAD, + 0x44, 0xF0, 0xA8, 0x9C, 0xAE, 0x44, 0xF0, 0xA8, + 0xAF, 0xBA, 0x44, 0xF0, 0xA8, 0xB5, 0xB7, 0x44, + 0xF0, 0xA9, 0x85, 0x85, 0x44, 0xF0, 0xA9, 0x87, + 0x9F, 0x44, 0xF0, 0xA9, 0x88, 0x9A, 0x44, 0xF0, + // Bytes 1840 - 187f + 0xA9, 0x90, 0x8A, 0x44, 0xF0, 0xA9, 0x92, 0x96, + 0x44, 0xF0, 0xA9, 0x96, 0xB6, 0x44, 0xF0, 0xA9, + 0xAC, 0xB0, 0x44, 0xF0, 0xAA, 0x83, 0x8E, 0x44, + 0xF0, 0xAA, 0x84, 0x85, 0x44, 0xF0, 0xAA, 0x88, + 0x8E, 0x44, 0xF0, 0xAA, 0x8A, 0x91, 0x44, 0xF0, + 0xAA, 0x8E, 0x92, 0x44, 0xF0, 0xAA, 0x98, 0x80, + 0x42, 0x21, 0x21, 0x42, 0x21, 0x3F, 0x42, 0x2E, + 0x2E, 0x42, 0x30, 0x2C, 0x42, 0x30, 0x2E, 0x42, + // Bytes 1880 - 18bf + 0x31, 0x2C, 0x42, 0x31, 0x2E, 0x42, 0x31, 0x30, + 0x42, 0x31, 0x31, 0x42, 0x31, 0x32, 0x42, 0x31, + 0x33, 0x42, 0x31, 0x34, 0x42, 0x31, 0x35, 0x42, + 0x31, 0x36, 0x42, 0x31, 0x37, 0x42, 0x31, 0x38, + 0x42, 0x31, 0x39, 0x42, 0x32, 0x2C, 0x42, 0x32, + 0x2E, 0x42, 0x32, 0x30, 0x42, 0x32, 0x31, 0x42, + 0x32, 0x32, 0x42, 0x32, 0x33, 0x42, 0x32, 0x34, + 0x42, 0x32, 0x35, 0x42, 0x32, 0x36, 0x42, 0x32, + // Bytes 18c0 - 18ff + 0x37, 0x42, 0x32, 0x38, 0x42, 0x32, 0x39, 0x42, + 0x33, 0x2C, 0x42, 0x33, 0x2E, 0x42, 0x33, 0x30, + 0x42, 0x33, 0x31, 0x42, 0x33, 0x32, 0x42, 0x33, + 0x33, 0x42, 0x33, 0x34, 0x42, 0x33, 0x35, 0x42, + 0x33, 0x36, 0x42, 0x33, 0x37, 0x42, 0x33, 0x38, + 0x42, 0x33, 0x39, 0x42, 0x34, 0x2C, 0x42, 0x34, + 0x2E, 0x42, 0x34, 0x30, 0x42, 0x34, 0x31, 0x42, + 0x34, 0x32, 0x42, 0x34, 0x33, 0x42, 0x34, 0x34, + // Bytes 1900 - 193f + 0x42, 0x34, 0x35, 0x42, 0x34, 0x36, 0x42, 0x34, + 0x37, 0x42, 0x34, 0x38, 0x42, 0x34, 0x39, 0x42, + 0x35, 0x2C, 0x42, 0x35, 0x2E, 0x42, 0x35, 0x30, + 0x42, 0x36, 0x2C, 0x42, 0x36, 0x2E, 0x42, 0x37, + 0x2C, 0x42, 0x37, 0x2E, 0x42, 0x38, 0x2C, 0x42, + 0x38, 0x2E, 0x42, 0x39, 0x2C, 0x42, 0x39, 0x2E, + 0x42, 0x3D, 0x3D, 0x42, 0x3F, 0x21, 0x42, 0x3F, + 0x3F, 0x42, 0x41, 0x55, 0x42, 0x42, 0x71, 0x42, + // Bytes 1940 - 197f + 0x43, 0x44, 0x42, 0x44, 0x4A, 0x42, 0x44, 0x5A, + 0x42, 0x44, 0x7A, 0x42, 0x47, 0x42, 0x42, 0x47, + 0x79, 0x42, 0x48, 0x50, 0x42, 0x48, 0x56, 0x42, + 0x48, 0x67, 0x42, 0x48, 0x7A, 0x42, 0x49, 0x49, + 0x42, 0x49, 0x4A, 0x42, 0x49, 0x55, 0x42, 0x49, + 0x56, 0x42, 0x49, 0x58, 0x42, 0x4B, 0x42, 0x42, + 0x4B, 0x4B, 0x42, 0x4B, 0x4D, 0x42, 0x4C, 0x4A, + 0x42, 0x4C, 0x6A, 0x42, 0x4D, 0x42, 0x42, 0x4D, + // Bytes 1980 - 19bf + 0x43, 0x42, 0x4D, 0x44, 0x42, 0x4D, 0x52, 0x42, + 0x4D, 0x56, 0x42, 0x4D, 0x57, 0x42, 0x4E, 0x4A, + 0x42, 0x4E, 0x6A, 0x42, 0x4E, 0x6F, 0x42, 0x50, + 0x48, 0x42, 0x50, 0x52, 0x42, 0x50, 0x61, 0x42, + 0x52, 0x73, 0x42, 0x53, 0x44, 0x42, 0x53, 0x4D, + 0x42, 0x53, 0x53, 0x42, 0x53, 0x76, 0x42, 0x54, + 0x4D, 0x42, 0x56, 0x49, 0x42, 0x57, 0x43, 0x42, + 0x57, 0x5A, 0x42, 0x57, 0x62, 0x42, 0x58, 0x49, + // Bytes 19c0 - 19ff + 0x42, 0x63, 0x63, 0x42, 0x63, 0x64, 0x42, 0x63, + 0x6D, 0x42, 0x64, 0x42, 0x42, 0x64, 0x61, 0x42, + 0x64, 0x6C, 0x42, 0x64, 0x6D, 0x42, 0x64, 0x7A, + 0x42, 0x65, 0x56, 0x42, 0x66, 0x66, 0x42, 0x66, + 0x69, 0x42, 0x66, 0x6C, 0x42, 0x66, 0x6D, 0x42, + 0x68, 0x61, 0x42, 0x69, 0x69, 0x42, 0x69, 0x6A, + 0x42, 0x69, 0x6E, 0x42, 0x69, 0x76, 0x42, 0x69, + 0x78, 0x42, 0x6B, 0x41, 0x42, 0x6B, 0x56, 0x42, + // Bytes 1a00 - 1a3f + 0x6B, 0x57, 0x42, 0x6B, 0x67, 0x42, 0x6B, 0x6C, + 0x42, 0x6B, 0x6D, 0x42, 0x6B, 0x74, 0x42, 0x6C, + 0x6A, 0x42, 0x6C, 0x6D, 0x42, 0x6C, 0x6E, 0x42, + 0x6C, 0x78, 0x42, 0x6D, 0x32, 0x42, 0x6D, 0x33, + 0x42, 0x6D, 0x41, 0x42, 0x6D, 0x56, 0x42, 0x6D, + 0x57, 0x42, 0x6D, 0x62, 0x42, 0x6D, 0x67, 0x42, + 0x6D, 0x6C, 0x42, 0x6D, 0x6D, 0x42, 0x6D, 0x73, + 0x42, 0x6E, 0x41, 0x42, 0x6E, 0x46, 0x42, 0x6E, + // Bytes 1a40 - 1a7f + 0x56, 0x42, 0x6E, 0x57, 0x42, 0x6E, 0x6A, 0x42, + 0x6E, 0x6D, 0x42, 0x6E, 0x73, 0x42, 0x6F, 0x56, + 0x42, 0x70, 0x41, 0x42, 0x70, 0x46, 0x42, 0x70, + 0x56, 0x42, 0x70, 0x57, 0x42, 0x70, 0x63, 0x42, + 0x70, 0x73, 0x42, 0x73, 0x72, 0x42, 0x73, 0x74, + 0x42, 0x76, 0x69, 0x42, 0x78, 0x69, 0x43, 0x28, + 0x31, 0x29, 0x43, 0x28, 0x32, 0x29, 0x43, 0x28, + 0x33, 0x29, 0x43, 0x28, 0x34, 0x29, 0x43, 0x28, + // Bytes 1a80 - 1abf + 0x35, 0x29, 0x43, 0x28, 0x36, 0x29, 0x43, 0x28, + 0x37, 0x29, 0x43, 0x28, 0x38, 0x29, 0x43, 0x28, + 0x39, 0x29, 0x43, 0x28, 0x41, 0x29, 0x43, 0x28, + 0x42, 0x29, 0x43, 0x28, 0x43, 0x29, 0x43, 0x28, + 0x44, 0x29, 0x43, 0x28, 0x45, 0x29, 0x43, 0x28, + 0x46, 0x29, 0x43, 0x28, 0x47, 0x29, 0x43, 0x28, + 0x48, 0x29, 0x43, 0x28, 0x49, 0x29, 0x43, 0x28, + 0x4A, 0x29, 0x43, 0x28, 0x4B, 0x29, 0x43, 0x28, + // Bytes 1ac0 - 1aff + 0x4C, 0x29, 0x43, 0x28, 0x4D, 0x29, 0x43, 0x28, + 0x4E, 0x29, 0x43, 0x28, 0x4F, 0x29, 0x43, 0x28, + 0x50, 0x29, 0x43, 0x28, 0x51, 0x29, 0x43, 0x28, + 0x52, 0x29, 0x43, 0x28, 0x53, 0x29, 0x43, 0x28, + 0x54, 0x29, 0x43, 0x28, 0x55, 0x29, 0x43, 0x28, + 0x56, 0x29, 0x43, 0x28, 0x57, 0x29, 0x43, 0x28, + 0x58, 0x29, 0x43, 0x28, 0x59, 0x29, 0x43, 0x28, + 0x5A, 0x29, 0x43, 0x28, 0x61, 0x29, 0x43, 0x28, + // Bytes 1b00 - 1b3f + 0x62, 0x29, 0x43, 0x28, 0x63, 0x29, 0x43, 0x28, + 0x64, 0x29, 0x43, 0x28, 0x65, 0x29, 0x43, 0x28, + 0x66, 0x29, 0x43, 0x28, 0x67, 0x29, 0x43, 0x28, + 0x68, 0x29, 0x43, 0x28, 0x69, 0x29, 0x43, 0x28, + 0x6A, 0x29, 0x43, 0x28, 0x6B, 0x29, 0x43, 0x28, + 0x6C, 0x29, 0x43, 0x28, 0x6D, 0x29, 0x43, 0x28, + 0x6E, 0x29, 0x43, 0x28, 0x6F, 0x29, 0x43, 0x28, + 0x70, 0x29, 0x43, 0x28, 0x71, 0x29, 0x43, 0x28, + // Bytes 1b40 - 1b7f + 0x72, 0x29, 0x43, 0x28, 0x73, 0x29, 0x43, 0x28, + 0x74, 0x29, 0x43, 0x28, 0x75, 0x29, 0x43, 0x28, + 0x76, 0x29, 0x43, 0x28, 0x77, 0x29, 0x43, 0x28, + 0x78, 0x29, 0x43, 0x28, 0x79, 0x29, 0x43, 0x28, + 0x7A, 0x29, 0x43, 0x2E, 0x2E, 0x2E, 0x43, 0x31, + 0x30, 0x2E, 0x43, 0x31, 0x31, 0x2E, 0x43, 0x31, + 0x32, 0x2E, 0x43, 0x31, 0x33, 0x2E, 0x43, 0x31, + 0x34, 0x2E, 0x43, 0x31, 0x35, 0x2E, 0x43, 0x31, + // Bytes 1b80 - 1bbf + 0x36, 0x2E, 0x43, 0x31, 0x37, 0x2E, 0x43, 0x31, + 0x38, 0x2E, 0x43, 0x31, 0x39, 0x2E, 0x43, 0x32, + 0x30, 0x2E, 0x43, 0x3A, 0x3A, 0x3D, 0x43, 0x3D, + 0x3D, 0x3D, 0x43, 0x43, 0x6F, 0x2E, 0x43, 0x46, + 0x41, 0x58, 0x43, 0x47, 0x48, 0x7A, 0x43, 0x47, + 0x50, 0x61, 0x43, 0x49, 0x49, 0x49, 0x43, 0x4C, + 0x54, 0x44, 0x43, 0x4C, 0xC2, 0xB7, 0x43, 0x4D, + 0x48, 0x7A, 0x43, 0x4D, 0x50, 0x61, 0x43, 0x4D, + // Bytes 1bc0 - 1bff + 0xCE, 0xA9, 0x43, 0x50, 0x50, 0x4D, 0x43, 0x50, + 0x50, 0x56, 0x43, 0x50, 0x54, 0x45, 0x43, 0x54, + 0x45, 0x4C, 0x43, 0x54, 0x48, 0x7A, 0x43, 0x56, + 0x49, 0x49, 0x43, 0x58, 0x49, 0x49, 0x43, 0x61, + 0x2F, 0x63, 0x43, 0x61, 0x2F, 0x73, 0x43, 0x61, + 0xCA, 0xBE, 0x43, 0x62, 0x61, 0x72, 0x43, 0x63, + 0x2F, 0x6F, 0x43, 0x63, 0x2F, 0x75, 0x43, 0x63, + 0x61, 0x6C, 0x43, 0x63, 0x6D, 0x32, 0x43, 0x63, + // Bytes 1c00 - 1c3f + 0x6D, 0x33, 0x43, 0x64, 0x6D, 0x32, 0x43, 0x64, + 0x6D, 0x33, 0x43, 0x65, 0x72, 0x67, 0x43, 0x66, + 0x66, 0x69, 0x43, 0x66, 0x66, 0x6C, 0x43, 0x67, + 0x61, 0x6C, 0x43, 0x68, 0x50, 0x61, 0x43, 0x69, + 0x69, 0x69, 0x43, 0x6B, 0x48, 0x7A, 0x43, 0x6B, + 0x50, 0x61, 0x43, 0x6B, 0x6D, 0x32, 0x43, 0x6B, + 0x6D, 0x33, 0x43, 0x6B, 0xCE, 0xA9, 0x43, 0x6C, + 0x6F, 0x67, 0x43, 0x6C, 0xC2, 0xB7, 0x43, 0x6D, + // Bytes 1c40 - 1c7f + 0x69, 0x6C, 0x43, 0x6D, 0x6D, 0x32, 0x43, 0x6D, + 0x6D, 0x33, 0x43, 0x6D, 0x6F, 0x6C, 0x43, 0x72, + 0x61, 0x64, 0x43, 0x76, 0x69, 0x69, 0x43, 0x78, + 0x69, 0x69, 0x43, 0xC2, 0xB0, 0x43, 0x43, 0xC2, + 0xB0, 0x46, 0x43, 0xCA, 0xBC, 0x6E, 0x43, 0xCE, + 0xBC, 0x41, 0x43, 0xCE, 0xBC, 0x46, 0x43, 0xCE, + 0xBC, 0x56, 0x43, 0xCE, 0xBC, 0x57, 0x43, 0xCE, + 0xBC, 0x67, 0x43, 0xCE, 0xBC, 0x6C, 0x43, 0xCE, + // Bytes 1c80 - 1cbf + 0xBC, 0x6D, 0x43, 0xCE, 0xBC, 0x73, 0x44, 0x28, + 0x31, 0x30, 0x29, 0x44, 0x28, 0x31, 0x31, 0x29, + 0x44, 0x28, 0x31, 0x32, 0x29, 0x44, 0x28, 0x31, + 0x33, 0x29, 0x44, 0x28, 0x31, 0x34, 0x29, 0x44, + 0x28, 0x31, 0x35, 0x29, 0x44, 0x28, 0x31, 0x36, + 0x29, 0x44, 0x28, 0x31, 0x37, 0x29, 0x44, 0x28, + 0x31, 0x38, 0x29, 0x44, 0x28, 0x31, 0x39, 0x29, + 0x44, 0x28, 0x32, 0x30, 0x29, 0x44, 0x30, 0xE7, + // Bytes 1cc0 - 1cff + 0x82, 0xB9, 0x44, 0x31, 0xE2, 0x81, 0x84, 0x44, + 0x31, 0xE6, 0x97, 0xA5, 0x44, 0x31, 0xE6, 0x9C, + 0x88, 0x44, 0x31, 0xE7, 0x82, 0xB9, 0x44, 0x32, + 0xE6, 0x97, 0xA5, 0x44, 0x32, 0xE6, 0x9C, 0x88, + 0x44, 0x32, 0xE7, 0x82, 0xB9, 0x44, 0x33, 0xE6, + 0x97, 0xA5, 0x44, 0x33, 0xE6, 0x9C, 0x88, 0x44, + 0x33, 0xE7, 0x82, 0xB9, 0x44, 0x34, 0xE6, 0x97, + 0xA5, 0x44, 0x34, 0xE6, 0x9C, 0x88, 0x44, 0x34, + // Bytes 1d00 - 1d3f + 0xE7, 0x82, 0xB9, 0x44, 0x35, 0xE6, 0x97, 0xA5, + 0x44, 0x35, 0xE6, 0x9C, 0x88, 0x44, 0x35, 0xE7, + 0x82, 0xB9, 0x44, 0x36, 0xE6, 0x97, 0xA5, 0x44, + 0x36, 0xE6, 0x9C, 0x88, 0x44, 0x36, 0xE7, 0x82, + 0xB9, 0x44, 0x37, 0xE6, 0x97, 0xA5, 0x44, 0x37, + 0xE6, 0x9C, 0x88, 0x44, 0x37, 0xE7, 0x82, 0xB9, + 0x44, 0x38, 0xE6, 0x97, 0xA5, 0x44, 0x38, 0xE6, + 0x9C, 0x88, 0x44, 0x38, 0xE7, 0x82, 0xB9, 0x44, + // Bytes 1d40 - 1d7f + 0x39, 0xE6, 0x97, 0xA5, 0x44, 0x39, 0xE6, 0x9C, + 0x88, 0x44, 0x39, 0xE7, 0x82, 0xB9, 0x44, 0x56, + 0x49, 0x49, 0x49, 0x44, 0x61, 0x2E, 0x6D, 0x2E, + 0x44, 0x6B, 0x63, 0x61, 0x6C, 0x44, 0x70, 0x2E, + 0x6D, 0x2E, 0x44, 0x76, 0x69, 0x69, 0x69, 0x44, + 0xD5, 0xA5, 0xD6, 0x82, 0x44, 0xD5, 0xB4, 0xD5, + 0xA5, 0x44, 0xD5, 0xB4, 0xD5, 0xAB, 0x44, 0xD5, + 0xB4, 0xD5, 0xAD, 0x44, 0xD5, 0xB4, 0xD5, 0xB6, + // Bytes 1d80 - 1dbf + 0x44, 0xD5, 0xBE, 0xD5, 0xB6, 0x44, 0xD7, 0x90, + 0xD7, 0x9C, 0x44, 0xD8, 0xA7, 0xD9, 0xB4, 0x44, + 0xD8, 0xA8, 0xD8, 0xAC, 0x44, 0xD8, 0xA8, 0xD8, + 0xAD, 0x44, 0xD8, 0xA8, 0xD8, 0xAE, 0x44, 0xD8, + 0xA8, 0xD8, 0xB1, 0x44, 0xD8, 0xA8, 0xD8, 0xB2, + 0x44, 0xD8, 0xA8, 0xD9, 0x85, 0x44, 0xD8, 0xA8, + 0xD9, 0x86, 0x44, 0xD8, 0xA8, 0xD9, 0x87, 0x44, + 0xD8, 0xA8, 0xD9, 0x89, 0x44, 0xD8, 0xA8, 0xD9, + // Bytes 1dc0 - 1dff + 0x8A, 0x44, 0xD8, 0xAA, 0xD8, 0xAC, 0x44, 0xD8, + 0xAA, 0xD8, 0xAD, 0x44, 0xD8, 0xAA, 0xD8, 0xAE, + 0x44, 0xD8, 0xAA, 0xD8, 0xB1, 0x44, 0xD8, 0xAA, + 0xD8, 0xB2, 0x44, 0xD8, 0xAA, 0xD9, 0x85, 0x44, + 0xD8, 0xAA, 0xD9, 0x86, 0x44, 0xD8, 0xAA, 0xD9, + 0x87, 0x44, 0xD8, 0xAA, 0xD9, 0x89, 0x44, 0xD8, + 0xAA, 0xD9, 0x8A, 0x44, 0xD8, 0xAB, 0xD8, 0xAC, + 0x44, 0xD8, 0xAB, 0xD8, 0xB1, 0x44, 0xD8, 0xAB, + // Bytes 1e00 - 1e3f + 0xD8, 0xB2, 0x44, 0xD8, 0xAB, 0xD9, 0x85, 0x44, + 0xD8, 0xAB, 0xD9, 0x86, 0x44, 0xD8, 0xAB, 0xD9, + 0x87, 0x44, 0xD8, 0xAB, 0xD9, 0x89, 0x44, 0xD8, + 0xAB, 0xD9, 0x8A, 0x44, 0xD8, 0xAC, 0xD8, 0xAD, + 0x44, 0xD8, 0xAC, 0xD9, 0x85, 0x44, 0xD8, 0xAC, + 0xD9, 0x89, 0x44, 0xD8, 0xAC, 0xD9, 0x8A, 0x44, + 0xD8, 0xAD, 0xD8, 0xAC, 0x44, 0xD8, 0xAD, 0xD9, + 0x85, 0x44, 0xD8, 0xAD, 0xD9, 0x89, 0x44, 0xD8, + // Bytes 1e40 - 1e7f + 0xAD, 0xD9, 0x8A, 0x44, 0xD8, 0xAE, 0xD8, 0xAC, + 0x44, 0xD8, 0xAE, 0xD8, 0xAD, 0x44, 0xD8, 0xAE, + 0xD9, 0x85, 0x44, 0xD8, 0xAE, 0xD9, 0x89, 0x44, + 0xD8, 0xAE, 0xD9, 0x8A, 0x44, 0xD8, 0xB3, 0xD8, + 0xAC, 0x44, 0xD8, 0xB3, 0xD8, 0xAD, 0x44, 0xD8, + 0xB3, 0xD8, 0xAE, 0x44, 0xD8, 0xB3, 0xD8, 0xB1, + 0x44, 0xD8, 0xB3, 0xD9, 0x85, 0x44, 0xD8, 0xB3, + 0xD9, 0x87, 0x44, 0xD8, 0xB3, 0xD9, 0x89, 0x44, + // Bytes 1e80 - 1ebf + 0xD8, 0xB3, 0xD9, 0x8A, 0x44, 0xD8, 0xB4, 0xD8, + 0xAC, 0x44, 0xD8, 0xB4, 0xD8, 0xAD, 0x44, 0xD8, + 0xB4, 0xD8, 0xAE, 0x44, 0xD8, 0xB4, 0xD8, 0xB1, + 0x44, 0xD8, 0xB4, 0xD9, 0x85, 0x44, 0xD8, 0xB4, + 0xD9, 0x87, 0x44, 0xD8, 0xB4, 0xD9, 0x89, 0x44, + 0xD8, 0xB4, 0xD9, 0x8A, 0x44, 0xD8, 0xB5, 0xD8, + 0xAD, 0x44, 0xD8, 0xB5, 0xD8, 0xAE, 0x44, 0xD8, + 0xB5, 0xD8, 0xB1, 0x44, 0xD8, 0xB5, 0xD9, 0x85, + // Bytes 1ec0 - 1eff + 0x44, 0xD8, 0xB5, 0xD9, 0x89, 0x44, 0xD8, 0xB5, + 0xD9, 0x8A, 0x44, 0xD8, 0xB6, 0xD8, 0xAC, 0x44, + 0xD8, 0xB6, 0xD8, 0xAD, 0x44, 0xD8, 0xB6, 0xD8, + 0xAE, 0x44, 0xD8, 0xB6, 0xD8, 0xB1, 0x44, 0xD8, + 0xB6, 0xD9, 0x85, 0x44, 0xD8, 0xB6, 0xD9, 0x89, + 0x44, 0xD8, 0xB6, 0xD9, 0x8A, 0x44, 0xD8, 0xB7, + 0xD8, 0xAD, 0x44, 0xD8, 0xB7, 0xD9, 0x85, 0x44, + 0xD8, 0xB7, 0xD9, 0x89, 0x44, 0xD8, 0xB7, 0xD9, + // Bytes 1f00 - 1f3f + 0x8A, 0x44, 0xD8, 0xB8, 0xD9, 0x85, 0x44, 0xD8, + 0xB9, 0xD8, 0xAC, 0x44, 0xD8, 0xB9, 0xD9, 0x85, + 0x44, 0xD8, 0xB9, 0xD9, 0x89, 0x44, 0xD8, 0xB9, + 0xD9, 0x8A, 0x44, 0xD8, 0xBA, 0xD8, 0xAC, 0x44, + 0xD8, 0xBA, 0xD9, 0x85, 0x44, 0xD8, 0xBA, 0xD9, + 0x89, 0x44, 0xD8, 0xBA, 0xD9, 0x8A, 0x44, 0xD9, + 0x81, 0xD8, 0xAC, 0x44, 0xD9, 0x81, 0xD8, 0xAD, + 0x44, 0xD9, 0x81, 0xD8, 0xAE, 0x44, 0xD9, 0x81, + // Bytes 1f40 - 1f7f + 0xD9, 0x85, 0x44, 0xD9, 0x81, 0xD9, 0x89, 0x44, + 0xD9, 0x81, 0xD9, 0x8A, 0x44, 0xD9, 0x82, 0xD8, + 0xAD, 0x44, 0xD9, 0x82, 0xD9, 0x85, 0x44, 0xD9, + 0x82, 0xD9, 0x89, 0x44, 0xD9, 0x82, 0xD9, 0x8A, + 0x44, 0xD9, 0x83, 0xD8, 0xA7, 0x44, 0xD9, 0x83, + 0xD8, 0xAC, 0x44, 0xD9, 0x83, 0xD8, 0xAD, 0x44, + 0xD9, 0x83, 0xD8, 0xAE, 0x44, 0xD9, 0x83, 0xD9, + 0x84, 0x44, 0xD9, 0x83, 0xD9, 0x85, 0x44, 0xD9, + // Bytes 1f80 - 1fbf + 0x83, 0xD9, 0x89, 0x44, 0xD9, 0x83, 0xD9, 0x8A, + 0x44, 0xD9, 0x84, 0xD8, 0xA7, 0x44, 0xD9, 0x84, + 0xD8, 0xAC, 0x44, 0xD9, 0x84, 0xD8, 0xAD, 0x44, + 0xD9, 0x84, 0xD8, 0xAE, 0x44, 0xD9, 0x84, 0xD9, + 0x85, 0x44, 0xD9, 0x84, 0xD9, 0x87, 0x44, 0xD9, + 0x84, 0xD9, 0x89, 0x44, 0xD9, 0x84, 0xD9, 0x8A, + 0x44, 0xD9, 0x85, 0xD8, 0xA7, 0x44, 0xD9, 0x85, + 0xD8, 0xAC, 0x44, 0xD9, 0x85, 0xD8, 0xAD, 0x44, + // Bytes 1fc0 - 1fff + 0xD9, 0x85, 0xD8, 0xAE, 0x44, 0xD9, 0x85, 0xD9, + 0x85, 0x44, 0xD9, 0x85, 0xD9, 0x89, 0x44, 0xD9, + 0x85, 0xD9, 0x8A, 0x44, 0xD9, 0x86, 0xD8, 0xAC, + 0x44, 0xD9, 0x86, 0xD8, 0xAD, 0x44, 0xD9, 0x86, + 0xD8, 0xAE, 0x44, 0xD9, 0x86, 0xD8, 0xB1, 0x44, + 0xD9, 0x86, 0xD8, 0xB2, 0x44, 0xD9, 0x86, 0xD9, + 0x85, 0x44, 0xD9, 0x86, 0xD9, 0x86, 0x44, 0xD9, + 0x86, 0xD9, 0x87, 0x44, 0xD9, 0x86, 0xD9, 0x89, + // Bytes 2000 - 203f + 0x44, 0xD9, 0x86, 0xD9, 0x8A, 0x44, 0xD9, 0x87, + 0xD8, 0xAC, 0x44, 0xD9, 0x87, 0xD9, 0x85, 0x44, + 0xD9, 0x87, 0xD9, 0x89, 0x44, 0xD9, 0x87, 0xD9, + 0x8A, 0x44, 0xD9, 0x88, 0xD9, 0xB4, 0x44, 0xD9, + 0x8A, 0xD8, 0xAC, 0x44, 0xD9, 0x8A, 0xD8, 0xAD, + 0x44, 0xD9, 0x8A, 0xD8, 0xAE, 0x44, 0xD9, 0x8A, + 0xD8, 0xB1, 0x44, 0xD9, 0x8A, 0xD8, 0xB2, 0x44, + 0xD9, 0x8A, 0xD9, 0x85, 0x44, 0xD9, 0x8A, 0xD9, + // Bytes 2040 - 207f + 0x86, 0x44, 0xD9, 0x8A, 0xD9, 0x87, 0x44, 0xD9, + 0x8A, 0xD9, 0x89, 0x44, 0xD9, 0x8A, 0xD9, 0x8A, + 0x44, 0xD9, 0x8A, 0xD9, 0xB4, 0x44, 0xDB, 0x87, + 0xD9, 0xB4, 0x45, 0x28, 0xE1, 0x84, 0x80, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x82, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x83, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x85, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x86, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x87, 0x29, 0x45, 0x28, + // Bytes 2080 - 20bf + 0xE1, 0x84, 0x89, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x8B, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x8C, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x8E, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x8F, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x90, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x91, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x92, 0x29, 0x45, 0x28, + 0xE4, 0xB8, 0x80, 0x29, 0x45, 0x28, 0xE4, 0xB8, + 0x83, 0x29, 0x45, 0x28, 0xE4, 0xB8, 0x89, 0x29, + // Bytes 20c0 - 20ff + 0x45, 0x28, 0xE4, 0xB9, 0x9D, 0x29, 0x45, 0x28, + 0xE4, 0xBA, 0x8C, 0x29, 0x45, 0x28, 0xE4, 0xBA, + 0x94, 0x29, 0x45, 0x28, 0xE4, 0xBB, 0xA3, 0x29, + 0x45, 0x28, 0xE4, 0xBC, 0x81, 0x29, 0x45, 0x28, + 0xE4, 0xBC, 0x91, 0x29, 0x45, 0x28, 0xE5, 0x85, + 0xAB, 0x29, 0x45, 0x28, 0xE5, 0x85, 0xAD, 0x29, + 0x45, 0x28, 0xE5, 0x8A, 0xB4, 0x29, 0x45, 0x28, + 0xE5, 0x8D, 0x81, 0x29, 0x45, 0x28, 0xE5, 0x8D, + // Bytes 2100 - 213f + 0x94, 0x29, 0x45, 0x28, 0xE5, 0x90, 0x8D, 0x29, + 0x45, 0x28, 0xE5, 0x91, 0xBC, 0x29, 0x45, 0x28, + 0xE5, 0x9B, 0x9B, 0x29, 0x45, 0x28, 0xE5, 0x9C, + 0x9F, 0x29, 0x45, 0x28, 0xE5, 0xAD, 0xA6, 0x29, + 0x45, 0x28, 0xE6, 0x97, 0xA5, 0x29, 0x45, 0x28, + 0xE6, 0x9C, 0x88, 0x29, 0x45, 0x28, 0xE6, 0x9C, + 0x89, 0x29, 0x45, 0x28, 0xE6, 0x9C, 0xA8, 0x29, + 0x45, 0x28, 0xE6, 0xA0, 0xAA, 0x29, 0x45, 0x28, + // Bytes 2140 - 217f + 0xE6, 0xB0, 0xB4, 0x29, 0x45, 0x28, 0xE7, 0x81, + 0xAB, 0x29, 0x45, 0x28, 0xE7, 0x89, 0xB9, 0x29, + 0x45, 0x28, 0xE7, 0x9B, 0xA3, 0x29, 0x45, 0x28, + 0xE7, 0xA4, 0xBE, 0x29, 0x45, 0x28, 0xE7, 0xA5, + 0x9D, 0x29, 0x45, 0x28, 0xE7, 0xA5, 0xAD, 0x29, + 0x45, 0x28, 0xE8, 0x87, 0xAA, 0x29, 0x45, 0x28, + 0xE8, 0x87, 0xB3, 0x29, 0x45, 0x28, 0xE8, 0xB2, + 0xA1, 0x29, 0x45, 0x28, 0xE8, 0xB3, 0x87, 0x29, + // Bytes 2180 - 21bf + 0x45, 0x28, 0xE9, 0x87, 0x91, 0x29, 0x45, 0x30, + 0xE2, 0x81, 0x84, 0x33, 0x45, 0x31, 0x30, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x30, 0xE6, 0x9C, 0x88, + 0x45, 0x31, 0x30, 0xE7, 0x82, 0xB9, 0x45, 0x31, + 0x31, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x31, 0xE6, + 0x9C, 0x88, 0x45, 0x31, 0x31, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x32, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x32, 0xE6, 0x9C, 0x88, 0x45, 0x31, 0x32, 0xE7, + // Bytes 21c0 - 21ff + 0x82, 0xB9, 0x45, 0x31, 0x33, 0xE6, 0x97, 0xA5, + 0x45, 0x31, 0x33, 0xE7, 0x82, 0xB9, 0x45, 0x31, + 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x34, 0xE7, + 0x82, 0xB9, 0x45, 0x31, 0x35, 0xE6, 0x97, 0xA5, + 0x45, 0x31, 0x35, 0xE7, 0x82, 0xB9, 0x45, 0x31, + 0x36, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x36, 0xE7, + 0x82, 0xB9, 0x45, 0x31, 0x37, 0xE6, 0x97, 0xA5, + 0x45, 0x31, 0x37, 0xE7, 0x82, 0xB9, 0x45, 0x31, + // Bytes 2200 - 223f + 0x38, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x38, 0xE7, + 0x82, 0xB9, 0x45, 0x31, 0x39, 0xE6, 0x97, 0xA5, + 0x45, 0x31, 0x39, 0xE7, 0x82, 0xB9, 0x45, 0x31, + 0xE2, 0x81, 0x84, 0x32, 0x45, 0x31, 0xE2, 0x81, + 0x84, 0x33, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x34, + 0x45, 0x31, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x31, + 0xE2, 0x81, 0x84, 0x36, 0x45, 0x31, 0xE2, 0x81, + 0x84, 0x37, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x38, + // Bytes 2240 - 227f + 0x45, 0x31, 0xE2, 0x81, 0x84, 0x39, 0x45, 0x32, + 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x30, 0xE7, + 0x82, 0xB9, 0x45, 0x32, 0x31, 0xE6, 0x97, 0xA5, + 0x45, 0x32, 0x31, 0xE7, 0x82, 0xB9, 0x45, 0x32, + 0x32, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x32, 0xE7, + 0x82, 0xB9, 0x45, 0x32, 0x33, 0xE6, 0x97, 0xA5, + 0x45, 0x32, 0x33, 0xE7, 0x82, 0xB9, 0x45, 0x32, + 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x34, 0xE7, + // Bytes 2280 - 22bf + 0x82, 0xB9, 0x45, 0x32, 0x35, 0xE6, 0x97, 0xA5, + 0x45, 0x32, 0x36, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x37, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x38, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0x39, 0xE6, 0x97, 0xA5, + 0x45, 0x32, 0xE2, 0x81, 0x84, 0x33, 0x45, 0x32, + 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33, 0x30, 0xE6, + 0x97, 0xA5, 0x45, 0x33, 0x31, 0xE6, 0x97, 0xA5, + 0x45, 0x33, 0xE2, 0x81, 0x84, 0x34, 0x45, 0x33, + // Bytes 22c0 - 22ff + 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33, 0xE2, 0x81, + 0x84, 0x38, 0x45, 0x34, 0xE2, 0x81, 0x84, 0x35, + 0x45, 0x35, 0xE2, 0x81, 0x84, 0x36, 0x45, 0x35, + 0xE2, 0x81, 0x84, 0x38, 0x45, 0x37, 0xE2, 0x81, + 0x84, 0x38, 0x45, 0x41, 0xE2, 0x88, 0x95, 0x6D, + 0x45, 0x56, 0xE2, 0x88, 0x95, 0x6D, 0x45, 0x6D, + 0xE2, 0x88, 0x95, 0x73, 0x46, 0x31, 0xE2, 0x81, + 0x84, 0x31, 0x30, 0x46, 0x43, 0xE2, 0x88, 0x95, + // Bytes 2300 - 233f + 0x6B, 0x67, 0x46, 0x6D, 0xE2, 0x88, 0x95, 0x73, + 0x32, 0x46, 0xD8, 0xA8, 0xD8, 0xAD, 0xD9, 0x8A, + 0x46, 0xD8, 0xA8, 0xD8, 0xAE, 0xD9, 0x8A, 0x46, + 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x85, 0x46, 0xD8, + 0xAA, 0xD8, 0xAC, 0xD9, 0x89, 0x46, 0xD8, 0xAA, + 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, 0xAA, 0xD8, + 0xAD, 0xD8, 0xAC, 0x46, 0xD8, 0xAA, 0xD8, 0xAD, + 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, 0xD9, + // Bytes 2340 - 237f + 0x85, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, 0xD9, 0x89, + 0x46, 0xD8, 0xAA, 0xD8, 0xAE, 0xD9, 0x8A, 0x46, + 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAC, 0x46, 0xD8, + 0xAA, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, 0xAA, + 0xD9, 0x85, 0xD8, 0xAE, 0x46, 0xD8, 0xAA, 0xD9, + 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAA, 0xD9, 0x85, + 0xD9, 0x8A, 0x46, 0xD8, 0xAC, 0xD8, 0xAD, 0xD9, + 0x89, 0x46, 0xD8, 0xAC, 0xD8, 0xAD, 0xD9, 0x8A, + // Bytes 2380 - 23bf + 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD8, 0xAD, 0x46, + 0xD8, 0xAC, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, + 0xAC, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xAD, + 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, 0xAD, 0xD9, + 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAD, 0xD9, 0x85, + 0xD9, 0x8A, 0x46, 0xD8, 0xB3, 0xD8, 0xAC, 0xD8, + 0xAD, 0x46, 0xD8, 0xB3, 0xD8, 0xAC, 0xD9, 0x89, + 0x46, 0xD8, 0xB3, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, + // Bytes 23c0 - 23ff + 0xD8, 0xB3, 0xD8, 0xAE, 0xD9, 0x89, 0x46, 0xD8, + 0xB3, 0xD8, 0xAE, 0xD9, 0x8A, 0x46, 0xD8, 0xB3, + 0xD9, 0x85, 0xD8, 0xAC, 0x46, 0xD8, 0xB3, 0xD9, + 0x85, 0xD8, 0xAD, 0x46, 0xD8, 0xB3, 0xD9, 0x85, + 0xD9, 0x85, 0x46, 0xD8, 0xB4, 0xD8, 0xAC, 0xD9, + 0x8A, 0x46, 0xD8, 0xB4, 0xD8, 0xAD, 0xD9, 0x85, + 0x46, 0xD8, 0xB4, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, + 0xD8, 0xB4, 0xD9, 0x85, 0xD8, 0xAE, 0x46, 0xD8, + // Bytes 2400 - 243f + 0xB4, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB5, + 0xD8, 0xAD, 0xD8, 0xAD, 0x46, 0xD8, 0xB5, 0xD8, + 0xAD, 0xD9, 0x8A, 0x46, 0xD8, 0xB5, 0xD9, 0x84, + 0xD9, 0x89, 0x46, 0xD8, 0xB5, 0xD9, 0x84, 0xDB, + 0x92, 0x46, 0xD8, 0xB5, 0xD9, 0x85, 0xD9, 0x85, + 0x46, 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, 0x89, 0x46, + 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD8, + 0xB6, 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD8, 0xB7, + // Bytes 2440 - 247f + 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, 0xB7, 0xD9, + 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB7, 0xD9, 0x85, + 0xD9, 0x8A, 0x46, 0xD8, 0xB9, 0xD8, 0xAC, 0xD9, + 0x85, 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, 0x85, + 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, 0x89, 0x46, + 0xD8, 0xB9, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, + 0xBA, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xBA, + 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xBA, 0xD9, + // Bytes 2480 - 24bf + 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x81, 0xD8, 0xAE, + 0xD9, 0x85, 0x46, 0xD9, 0x81, 0xD9, 0x85, 0xD9, + 0x8A, 0x46, 0xD9, 0x82, 0xD9, 0x84, 0xDB, 0x92, + 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD8, 0xAD, 0x46, + 0xD9, 0x82, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, + 0x82, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x83, + 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, 0x83, 0xD9, + 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x84, 0xD8, 0xAC, + // Bytes 24c0 - 24ff + 0xD8, 0xAC, 0x46, 0xD9, 0x84, 0xD8, 0xAC, 0xD9, + 0x85, 0x46, 0xD9, 0x84, 0xD8, 0xAC, 0xD9, 0x8A, + 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x85, 0x46, + 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x89, 0x46, 0xD9, + 0x84, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x84, + 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9, 0x84, 0xD9, + 0x85, 0xD8, 0xAD, 0x46, 0xD9, 0x84, 0xD9, 0x85, + 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD8, + // Bytes 2500 - 253f + 0xAD, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD8, 0xAE, + 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD9, 0x85, 0x46, + 0xD9, 0x85, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, + 0x85, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, 0xD9, 0x85, + 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, 0x85, 0xD8, + 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD8, 0xAE, + 0xD8, 0xAC, 0x46, 0xD9, 0x85, 0xD8, 0xAE, 0xD9, + 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAE, 0xD9, 0x8A, + // Bytes 2540 - 257f + 0x46, 0xD9, 0x85, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD9, 0x86, 0xD8, 0xAC, 0xD8, 0xAD, 0x46, 0xD9, + 0x86, 0xD8, 0xAC, 0xD9, 0x85, 0x46, 0xD9, 0x86, + 0xD8, 0xAC, 0xD9, 0x89, 0x46, 0xD9, 0x86, 0xD8, + 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x86, 0xD8, 0xAD, + 0xD9, 0x85, 0x46, 0xD9, 0x86, 0xD8, 0xAD, 0xD9, + 0x89, 0x46, 0xD9, 0x86, 0xD8, 0xAD, 0xD9, 0x8A, + 0x46, 0xD9, 0x86, 0xD9, 0x85, 0xD9, 0x89, 0x46, + // Bytes 2580 - 25bf + 0xD9, 0x86, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, + 0x87, 0xD9, 0x85, 0xD8, 0xAC, 0x46, 0xD9, 0x87, + 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, 0x8A, 0xD8, + 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, 0x85, 0xD9, + 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x85, 0xD9, 0x8A, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xA7, 0x46, + 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAC, 0x46, 0xD9, + // Bytes 25c0 - 25ff + 0x8A, 0xD9, 0x94, 0xD8, 0xAD, 0x46, 0xD9, 0x8A, + 0xD9, 0x94, 0xD8, 0xAE, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xD8, 0xB1, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xD8, 0xB2, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, + 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x86, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x87, 0x46, + 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x88, 0x46, 0xD9, + 0x8A, 0xD9, 0x94, 0xD9, 0x89, 0x46, 0xD9, 0x8A, + // Bytes 2600 - 263f + 0xD9, 0x94, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xDB, 0x86, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xDB, 0x87, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xDB, + 0x88, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xDB, 0x90, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xDB, 0x95, 0x46, + 0xE0, 0xB9, 0x8D, 0xE0, 0xB8, 0xB2, 0x46, 0xE0, + 0xBA, 0xAB, 0xE0, 0xBA, 0x99, 0x46, 0xE0, 0xBA, + 0xAB, 0xE0, 0xBA, 0xA1, 0x46, 0xE0, 0xBB, 0x8D, + // Bytes 2640 - 267f + 0xE0, 0xBA, 0xB2, 0x46, 0xE0, 0xBD, 0x80, 0xE0, + 0xBE, 0xB5, 0x46, 0xE0, 0xBD, 0x82, 0xE0, 0xBE, + 0xB7, 0x46, 0xE0, 0xBD, 0x8C, 0xE0, 0xBE, 0xB7, + 0x46, 0xE0, 0xBD, 0x91, 0xE0, 0xBE, 0xB7, 0x46, + 0xE0, 0xBD, 0x96, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, + 0xBD, 0x9B, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, + 0x90, 0xE0, 0xBE, 0xB5, 0x46, 0xE0, 0xBE, 0x92, + 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0x9C, 0xE0, + // Bytes 2680 - 26bf + 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xA1, 0xE0, 0xBE, + 0xB7, 0x46, 0xE0, 0xBE, 0xA6, 0xE0, 0xBE, 0xB7, + 0x46, 0xE0, 0xBE, 0xAB, 0xE0, 0xBE, 0xB7, 0x46, + 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0x46, 0xE2, + 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0x46, 0xE2, 0x88, + 0xAB, 0xE2, 0x88, 0xAB, 0x46, 0xE2, 0x88, 0xAE, + 0xE2, 0x88, 0xAE, 0x46, 0xE3, 0x81, 0xBB, 0xE3, + 0x81, 0x8B, 0x46, 0xE3, 0x82, 0x88, 0xE3, 0x82, + // Bytes 26c0 - 26ff + 0x8A, 0x46, 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xAD, + 0x46, 0xE3, 0x82, 0xB3, 0xE3, 0x82, 0xB3, 0x46, + 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0x88, 0x46, 0xE3, + 0x83, 0x88, 0xE3, 0x83, 0xB3, 0x46, 0xE3, 0x83, + 0x8A, 0xE3, 0x83, 0x8E, 0x46, 0xE3, 0x83, 0x9B, + 0xE3, 0x83, 0xB3, 0x46, 0xE3, 0x83, 0x9F, 0xE3, + 0x83, 0xAA, 0x46, 0xE3, 0x83, 0xAA, 0xE3, 0x83, + 0xA9, 0x46, 0xE3, 0x83, 0xAC, 0xE3, 0x83, 0xA0, + // Bytes 2700 - 273f + 0x46, 0xE4, 0xBB, 0xA4, 0xE5, 0x92, 0x8C, 0x46, + 0xE5, 0xA4, 0xA7, 0xE6, 0xAD, 0xA3, 0x46, 0xE5, + 0xB9, 0xB3, 0xE6, 0x88, 0x90, 0x46, 0xE6, 0x98, + 0x8E, 0xE6, 0xB2, 0xBB, 0x46, 0xE6, 0x98, 0xAD, + 0xE5, 0x92, 0x8C, 0x47, 0x72, 0x61, 0x64, 0xE2, + 0x88, 0x95, 0x73, 0x47, 0xE3, 0x80, 0x94, 0x53, + 0xE3, 0x80, 0x95, 0x48, 0x28, 0xE1, 0x84, 0x80, + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + // Bytes 2740 - 277f + 0x82, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, + 0x84, 0x83, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, + 0xE1, 0x84, 0x85, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x86, 0xE1, 0x85, 0xA1, 0x29, + 0x48, 0x28, 0xE1, 0x84, 0x87, 0xE1, 0x85, 0xA1, + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x89, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x8B, 0xE1, + 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x8C, + // Bytes 2780 - 27bf + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + 0x8C, 0xE1, 0x85, 0xAE, 0x29, 0x48, 0x28, 0xE1, + 0x84, 0x8E, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, + 0xE1, 0x84, 0x8F, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x90, 0xE1, 0x85, 0xA1, 0x29, + 0x48, 0x28, 0xE1, 0x84, 0x91, 0xE1, 0x85, 0xA1, + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x92, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x72, 0x61, 0x64, 0xE2, 0x88, + // Bytes 27c0 - 27ff + 0x95, 0x73, 0x32, 0x48, 0xD8, 0xA7, 0xD9, 0x83, + 0xD8, 0xA8, 0xD8, 0xB1, 0x48, 0xD8, 0xA7, 0xD9, + 0x84, 0xD9, 0x84, 0xD9, 0x87, 0x48, 0xD8, 0xB1, + 0xD8, 0xB3, 0xD9, 0x88, 0xD9, 0x84, 0x48, 0xD8, + 0xB1, 0xDB, 0x8C, 0xD8, 0xA7, 0xD9, 0x84, 0x48, + 0xD8, 0xB5, 0xD9, 0x84, 0xD8, 0xB9, 0xD9, 0x85, + 0x48, 0xD8, 0xB9, 0xD9, 0x84, 0xD9, 0x8A, 0xD9, + 0x87, 0x48, 0xD9, 0x85, 0xD8, 0xAD, 0xD9, 0x85, + // Bytes 2800 - 283f + 0xD8, 0xAF, 0x48, 0xD9, 0x88, 0xD8, 0xB3, 0xD9, + 0x84, 0xD9, 0x85, 0x49, 0xE2, 0x80, 0xB2, 0xE2, + 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0x49, 0xE2, 0x80, + 0xB5, 0xE2, 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0x49, + 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0xE2, 0x88, + 0xAB, 0x49, 0xE2, 0x88, 0xAE, 0xE2, 0x88, 0xAE, + 0xE2, 0x88, 0xAE, 0x49, 0xE3, 0x80, 0x94, 0xE4, + 0xB8, 0x89, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, + // Bytes 2840 - 287f + 0x94, 0xE4, 0xBA, 0x8C, 0xE3, 0x80, 0x95, 0x49, + 0xE3, 0x80, 0x94, 0xE5, 0x8B, 0x9D, 0xE3, 0x80, + 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE5, 0xAE, 0x89, + 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE6, + 0x89, 0x93, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, + 0x94, 0xE6, 0x95, 0x97, 0xE3, 0x80, 0x95, 0x49, + 0xE3, 0x80, 0x94, 0xE6, 0x9C, 0xAC, 0xE3, 0x80, + 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE7, 0x82, 0xB9, + // Bytes 2880 - 28bf + 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE7, + 0x9B, 0x97, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x82, + 0xA2, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAB, 0x49, + 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0xB3, 0xE3, 0x83, + 0x81, 0x49, 0xE3, 0x82, 0xA6, 0xE3, 0x82, 0xA9, + 0xE3, 0x83, 0xB3, 0x49, 0xE3, 0x82, 0xAA, 0xE3, + 0x83, 0xB3, 0xE3, 0x82, 0xB9, 0x49, 0xE3, 0x82, + 0xAA, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xA0, 0x49, + // Bytes 28c0 - 28ff + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0xA4, 0xE3, 0x83, + 0xAA, 0x49, 0xE3, 0x82, 0xB1, 0xE3, 0x83, 0xBC, + 0xE3, 0x82, 0xB9, 0x49, 0xE3, 0x82, 0xB3, 0xE3, + 0x83, 0xAB, 0xE3, 0x83, 0x8A, 0x49, 0xE3, 0x82, + 0xBB, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x81, 0x49, + 0xE3, 0x82, 0xBB, 0xE3, 0x83, 0xB3, 0xE3, 0x83, + 0x88, 0x49, 0xE3, 0x83, 0x86, 0xE3, 0x82, 0x99, + 0xE3, 0x82, 0xB7, 0x49, 0xE3, 0x83, 0x88, 0xE3, + // Bytes 2900 - 293f + 0x82, 0x99, 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, + 0x8E, 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, 0x49, + 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0xA4, 0xE3, 0x83, + 0x84, 0x49, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x99, + 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, 0x92, 0xE3, + 0x82, 0x9A, 0xE3, 0x82, 0xB3, 0x49, 0xE3, 0x83, + 0x95, 0xE3, 0x83, 0xA9, 0xE3, 0x83, 0xB3, 0x49, + 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x82, + // Bytes 2940 - 297f + 0xBD, 0x49, 0xE3, 0x83, 0x98, 0xE3, 0x83, 0xAB, + 0xE3, 0x83, 0x84, 0x49, 0xE3, 0x83, 0x9B, 0xE3, + 0x83, 0xBC, 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, + 0x9B, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xB3, 0x49, + 0xE3, 0x83, 0x9E, 0xE3, 0x82, 0xA4, 0xE3, 0x83, + 0xAB, 0x49, 0xE3, 0x83, 0x9E, 0xE3, 0x83, 0x83, + 0xE3, 0x83, 0x8F, 0x49, 0xE3, 0x83, 0x9E, 0xE3, + 0x83, 0xAB, 0xE3, 0x82, 0xAF, 0x49, 0xE3, 0x83, + // Bytes 2980 - 29bf + 0xA4, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAB, 0x49, + 0xE3, 0x83, 0xA6, 0xE3, 0x82, 0xA2, 0xE3, 0x83, + 0xB3, 0x49, 0xE3, 0x83, 0xAF, 0xE3, 0x83, 0x83, + 0xE3, 0x83, 0x88, 0x4C, 0xE2, 0x80, 0xB2, 0xE2, + 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0x4C, 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0xE2, + 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0x4C, 0xE3, 0x82, + 0xA2, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x95, 0xE3, + // Bytes 29c0 - 29ff + 0x82, 0xA1, 0x4C, 0xE3, 0x82, 0xA8, 0xE3, 0x83, + 0xBC, 0xE3, 0x82, 0xAB, 0xE3, 0x83, 0xBC, 0x4C, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xAD, 0xE3, 0x83, 0xB3, 0x4C, 0xE3, 0x82, 0xAB, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xB3, 0xE3, 0x83, + 0x9E, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x83, 0xA9, + 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, 0x4C, 0xE3, + 0x82, 0xAB, 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xAA, + // Bytes 2a00 - 2a3f + 0xE3, 0x83, 0xBC, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0x8B, 0xE3, 0x83, 0xBC, + 0x4C, 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xA5, 0xE3, + 0x83, 0xAA, 0xE3, 0x83, 0xBC, 0x4C, 0xE3, 0x82, + 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xA9, 0xE3, + 0x83, 0xA0, 0x4C, 0xE3, 0x82, 0xAF, 0xE3, 0x83, + 0xAD, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x8D, 0x4C, + 0xE3, 0x82, 0xB5, 0xE3, 0x82, 0xA4, 0xE3, 0x82, + // Bytes 2a40 - 2a7f + 0xAF, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, 0x82, 0xBF, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x82, + 0xB9, 0x4C, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, + 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x84, 0x4C, 0xE3, + 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xAF, + 0xE3, 0x83, 0xAB, 0x4C, 0xE3, 0x83, 0x95, 0xE3, + 0x82, 0xA3, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, + 0x4C, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x99, 0xE3, + // Bytes 2a80 - 2abf + 0x83, 0xBC, 0xE3, 0x82, 0xBF, 0x4C, 0xE3, 0x83, + 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0x8B, 0xE3, + 0x83, 0x92, 0x4C, 0xE3, 0x83, 0x98, 0xE3, 0x82, + 0x9A, 0xE3, 0x83, 0xB3, 0xE3, 0x82, 0xB9, 0x4C, + 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xAB, 0xE3, 0x83, 0x88, 0x4C, 0xE3, 0x83, 0x9E, + 0xE3, 0x82, 0xA4, 0xE3, 0x82, 0xAF, 0xE3, 0x83, + 0xAD, 0x4C, 0xE3, 0x83, 0x9F, 0xE3, 0x82, 0xAF, + // Bytes 2ac0 - 2aff + 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xB3, 0x4C, 0xE3, + 0x83, 0xA1, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, + 0xE3, 0x83, 0xAB, 0x4C, 0xE3, 0x83, 0xAA, 0xE3, + 0x83, 0x83, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, + 0x4C, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x92, 0xE3, + 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0x4C, 0xE6, 0xA0, + 0xAA, 0xE5, 0xBC, 0x8F, 0xE4, 0xBC, 0x9A, 0xE7, + 0xA4, 0xBE, 0x4E, 0x28, 0xE1, 0x84, 0x8B, 0xE1, + // Bytes 2b00 - 2b3f + 0x85, 0xA9, 0xE1, 0x84, 0x92, 0xE1, 0x85, 0xAE, + 0x29, 0x4F, 0xD8, 0xAC, 0xD9, 0x84, 0x20, 0xD8, + 0xAC, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x84, 0xD9, + 0x87, 0x4F, 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0x88, 0x4F, 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x82, + 0xA2, 0x4F, 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xAD, + // Bytes 2b40 - 2b7f + 0xE3, 0x83, 0xAF, 0xE3, 0x83, 0x83, 0xE3, 0x83, + 0x88, 0x4F, 0xE3, 0x82, 0xB5, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x81, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xA0, 0x4F, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x99, + 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAC, 0xE3, 0x83, + 0xAB, 0x4F, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0xAF, + 0xE3, 0x82, 0xBF, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xAB, 0x4F, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, + // Bytes 2b80 - 2bbf + 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0xB3, 0xE3, 0x83, + 0x88, 0x4F, 0xE3, 0x83, 0x9E, 0xE3, 0x83, 0xB3, + 0xE3, 0x82, 0xB7, 0xE3, 0x83, 0xA7, 0xE3, 0x83, + 0xB3, 0x4F, 0xE3, 0x83, 0xA1, 0xE3, 0x82, 0xAB, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0x88, 0xE3, 0x83, + 0xB3, 0x4F, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0xBC, + 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xAB, 0x51, 0x28, 0xE1, 0x84, 0x8B, 0xE1, 0x85, + // Bytes 2bc0 - 2bff + 0xA9, 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xA5, 0xE1, + 0x86, 0xAB, 0x29, 0x52, 0xE3, 0x82, 0xAD, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0xAB, 0xE3, 0x82, 0xBF, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, 0x52, 0xE3, + 0x82, 0xAD, 0xE3, 0x83, 0xAD, 0xE3, 0x82, 0xAF, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xA9, 0xE3, 0x83, + 0xA0, 0x52, 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xAD, + 0xE3, 0x83, 0xA1, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + // Bytes 2c00 - 2c3f + 0x88, 0xE3, 0x83, 0xAB, 0x52, 0xE3, 0x82, 0xAF, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xA9, 0xE3, 0x83, + 0xA0, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xB3, 0x52, + 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAB, 0xE3, 0x82, + 0xBB, 0xE3, 0x82, 0x99, 0xE3, 0x82, 0xA4, 0xE3, + 0x83, 0xAD, 0x52, 0xE3, 0x83, 0x8F, 0xE3, 0x82, + 0x9A, 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xBB, 0xE3, + 0x83, 0xB3, 0xE3, 0x83, 0x88, 0x52, 0xE3, 0x83, + // Bytes 2c40 - 2c7f + 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xA2, 0xE3, + 0x82, 0xB9, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, + 0x52, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x99, 0xE3, + 0x83, 0x83, 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0xA7, + 0xE3, 0x83, 0xAB, 0x52, 0xE3, 0x83, 0x9F, 0xE3, + 0x83, 0xAA, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x99, + 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAB, 0x52, 0xE3, + 0x83, 0xAC, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x88, + // Bytes 2c80 - 2cbf + 0xE3, 0x82, 0xB1, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xB3, 0x61, 0xD8, 0xB5, 0xD9, 0x84, 0xD9, 0x89, + 0x20, 0xD8, 0xA7, 0xD9, 0x84, 0xD9, 0x84, 0xD9, + 0x87, 0x20, 0xD8, 0xB9, 0xD9, 0x84, 0xD9, 0x8A, + 0xD9, 0x87, 0x20, 0xD9, 0x88, 0xD8, 0xB3, 0xD9, + 0x84, 0xD9, 0x85, 0x06, 0xE0, 0xA7, 0x87, 0xE0, + 0xA6, 0xBE, 0x01, 0x06, 0xE0, 0xA7, 0x87, 0xE0, + 0xA7, 0x97, 0x01, 0x06, 0xE0, 0xAD, 0x87, 0xE0, + // Bytes 2cc0 - 2cff + 0xAC, 0xBE, 0x01, 0x06, 0xE0, 0xAD, 0x87, 0xE0, + 0xAD, 0x96, 0x01, 0x06, 0xE0, 0xAD, 0x87, 0xE0, + 0xAD, 0x97, 0x01, 0x06, 0xE0, 0xAE, 0x92, 0xE0, + 0xAF, 0x97, 0x01, 0x06, 0xE0, 0xAF, 0x86, 0xE0, + 0xAE, 0xBE, 0x01, 0x06, 0xE0, 0xAF, 0x86, 0xE0, + 0xAF, 0x97, 0x01, 0x06, 0xE0, 0xAF, 0x87, 0xE0, + 0xAE, 0xBE, 0x01, 0x06, 0xE0, 0xB2, 0xBF, 0xE0, + 0xB3, 0x95, 0x01, 0x06, 0xE0, 0xB3, 0x86, 0xE0, + // Bytes 2d00 - 2d3f + 0xB3, 0x95, 0x01, 0x06, 0xE0, 0xB3, 0x86, 0xE0, + 0xB3, 0x96, 0x01, 0x06, 0xE0, 0xB5, 0x86, 0xE0, + 0xB4, 0xBE, 0x01, 0x06, 0xE0, 0xB5, 0x86, 0xE0, + 0xB5, 0x97, 0x01, 0x06, 0xE0, 0xB5, 0x87, 0xE0, + 0xB4, 0xBE, 0x01, 0x06, 0xE0, 0xB7, 0x99, 0xE0, + 0xB7, 0x9F, 0x01, 0x06, 0xE1, 0x80, 0xA5, 0xE1, + 0x80, 0xAE, 0x01, 0x06, 0xE1, 0xAC, 0x85, 0xE1, + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0x87, 0xE1, + // Bytes 2d40 - 2d7f + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0x89, 0xE1, + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0x8B, 0xE1, + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0x8D, 0xE1, + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0x91, 0xE1, + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0xBA, 0xE1, + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0xBC, 0xE1, + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0xBE, 0xE1, + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0xBF, 0xE1, + // Bytes 2d80 - 2dbf + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAD, 0x82, 0xE1, + 0xAC, 0xB5, 0x01, 0x08, 0xF0, 0x91, 0x84, 0xB1, + 0xF0, 0x91, 0x84, 0xA7, 0x01, 0x08, 0xF0, 0x91, + 0x84, 0xB2, 0xF0, 0x91, 0x84, 0xA7, 0x01, 0x08, + 0xF0, 0x91, 0x8D, 0x87, 0xF0, 0x91, 0x8C, 0xBE, + 0x01, 0x08, 0xF0, 0x91, 0x8D, 0x87, 0xF0, 0x91, + 0x8D, 0x97, 0x01, 0x08, 0xF0, 0x91, 0x92, 0xB9, + 0xF0, 0x91, 0x92, 0xB0, 0x01, 0x08, 0xF0, 0x91, + // Bytes 2dc0 - 2dff + 0x92, 0xB9, 0xF0, 0x91, 0x92, 0xBA, 0x01, 0x08, + 0xF0, 0x91, 0x92, 0xB9, 0xF0, 0x91, 0x92, 0xBD, + 0x01, 0x08, 0xF0, 0x91, 0x96, 0xB8, 0xF0, 0x91, + 0x96, 0xAF, 0x01, 0x08, 0xF0, 0x91, 0x96, 0xB9, + 0xF0, 0x91, 0x96, 0xAF, 0x01, 0x08, 0xF0, 0x91, + 0xA4, 0xB5, 0xF0, 0x91, 0xA4, 0xB0, 0x01, 0x09, + 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, 0xE0, 0xB3, + 0x95, 0x02, 0x09, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, + // Bytes 2e00 - 2e3f + 0x8F, 0xE0, 0xB7, 0x8A, 0x16, 0x44, 0x44, 0x5A, + 0xCC, 0x8C, 0xCD, 0x44, 0x44, 0x7A, 0xCC, 0x8C, + 0xCD, 0x44, 0x64, 0x7A, 0xCC, 0x8C, 0xCD, 0x46, + 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x93, 0xCD, 0x46, + 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x94, 0xCD, 0x46, + 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x95, 0xB9, 0x46, + 0xE1, 0x84, 0x80, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x82, 0xE1, 0x85, 0xA1, 0x01, 0x46, + // Bytes 2e40 - 2e7f + 0xE1, 0x84, 0x83, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x85, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x86, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x87, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x89, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xAE, 0x01, 0x46, + 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xA1, 0x01, 0x46, + // Bytes 2e80 - 2ebf + 0xE1, 0x84, 0x8E, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x8F, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x90, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x91, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x92, 0xE1, 0x85, 0xA1, 0x01, 0x49, + 0xE3, 0x83, 0xA1, 0xE3, 0x82, 0xAB, 0xE3, 0x82, + 0x99, 0x11, 0x4C, 0xE1, 0x84, 0x8C, 0xE1, 0x85, + 0xAE, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xB4, 0x01, + // Bytes 2ec0 - 2eff + 0x4C, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, 0xE3, + 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x11, 0x4C, 0xE3, + 0x82, 0xB3, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x9B, + 0xE3, 0x82, 0x9A, 0x11, 0x4C, 0xE3, 0x83, 0xA4, + 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, 0xE3, 0x82, + 0x99, 0x11, 0x4F, 0xE1, 0x84, 0x8E, 0xE1, 0x85, + 0xA1, 0xE1, 0x86, 0xB7, 0xE1, 0x84, 0x80, 0xE1, + 0x85, 0xA9, 0x01, 0x4F, 0xE3, 0x82, 0xA4, 0xE3, + // Bytes 2f00 - 2f3f + 0x83, 0x8B, 0xE3, 0x83, 0xB3, 0xE3, 0x82, 0xAF, + 0xE3, 0x82, 0x99, 0x11, 0x4F, 0xE3, 0x82, 0xB7, + 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xB3, 0xE3, 0x82, + 0xAF, 0xE3, 0x82, 0x99, 0x11, 0x4F, 0xE3, 0x83, + 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, + 0x82, 0xB7, 0xE3, 0x82, 0x99, 0x11, 0x4F, 0xE3, + 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x11, 0x52, + // Bytes 2f40 - 2f7f + 0xE3, 0x82, 0xA8, 0xE3, 0x82, 0xB9, 0xE3, 0x82, + 0xAF, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, 0xE3, + 0x82, 0x99, 0x11, 0x52, 0xE3, 0x83, 0x95, 0xE3, + 0x82, 0xA1, 0xE3, 0x83, 0xA9, 0xE3, 0x83, 0x83, + 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x11, 0x86, + 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, 0x01, 0x86, + 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8F, 0x01, 0x03, + 0x3C, 0xCC, 0xB8, 0x05, 0x03, 0x3D, 0xCC, 0xB8, + // Bytes 2f80 - 2fbf + 0x05, 0x03, 0x3E, 0xCC, 0xB8, 0x05, 0x03, 0x41, + 0xCC, 0x80, 0xCD, 0x03, 0x41, 0xCC, 0x81, 0xCD, + 0x03, 0x41, 0xCC, 0x83, 0xCD, 0x03, 0x41, 0xCC, + 0x84, 0xCD, 0x03, 0x41, 0xCC, 0x89, 0xCD, 0x03, + 0x41, 0xCC, 0x8C, 0xCD, 0x03, 0x41, 0xCC, 0x8F, + 0xCD, 0x03, 0x41, 0xCC, 0x91, 0xCD, 0x03, 0x41, + 0xCC, 0xA5, 0xB9, 0x03, 0x41, 0xCC, 0xA8, 0xA9, + 0x03, 0x42, 0xCC, 0x87, 0xCD, 0x03, 0x42, 0xCC, + // Bytes 2fc0 - 2fff + 0xA3, 0xB9, 0x03, 0x42, 0xCC, 0xB1, 0xB9, 0x03, + 0x43, 0xCC, 0x81, 0xCD, 0x03, 0x43, 0xCC, 0x82, + 0xCD, 0x03, 0x43, 0xCC, 0x87, 0xCD, 0x03, 0x43, + 0xCC, 0x8C, 0xCD, 0x03, 0x44, 0xCC, 0x87, 0xCD, + 0x03, 0x44, 0xCC, 0x8C, 0xCD, 0x03, 0x44, 0xCC, + 0xA3, 0xB9, 0x03, 0x44, 0xCC, 0xA7, 0xA9, 0x03, + 0x44, 0xCC, 0xAD, 0xB9, 0x03, 0x44, 0xCC, 0xB1, + 0xB9, 0x03, 0x45, 0xCC, 0x80, 0xCD, 0x03, 0x45, + // Bytes 3000 - 303f + 0xCC, 0x81, 0xCD, 0x03, 0x45, 0xCC, 0x83, 0xCD, + 0x03, 0x45, 0xCC, 0x86, 0xCD, 0x03, 0x45, 0xCC, + 0x87, 0xCD, 0x03, 0x45, 0xCC, 0x88, 0xCD, 0x03, + 0x45, 0xCC, 0x89, 0xCD, 0x03, 0x45, 0xCC, 0x8C, + 0xCD, 0x03, 0x45, 0xCC, 0x8F, 0xCD, 0x03, 0x45, + 0xCC, 0x91, 0xCD, 0x03, 0x45, 0xCC, 0xA8, 0xA9, + 0x03, 0x45, 0xCC, 0xAD, 0xB9, 0x03, 0x45, 0xCC, + 0xB0, 0xB9, 0x03, 0x46, 0xCC, 0x87, 0xCD, 0x03, + // Bytes 3040 - 307f + 0x47, 0xCC, 0x81, 0xCD, 0x03, 0x47, 0xCC, 0x82, + 0xCD, 0x03, 0x47, 0xCC, 0x84, 0xCD, 0x03, 0x47, + 0xCC, 0x86, 0xCD, 0x03, 0x47, 0xCC, 0x87, 0xCD, + 0x03, 0x47, 0xCC, 0x8C, 0xCD, 0x03, 0x47, 0xCC, + 0xA7, 0xA9, 0x03, 0x48, 0xCC, 0x82, 0xCD, 0x03, + 0x48, 0xCC, 0x87, 0xCD, 0x03, 0x48, 0xCC, 0x88, + 0xCD, 0x03, 0x48, 0xCC, 0x8C, 0xCD, 0x03, 0x48, + 0xCC, 0xA3, 0xB9, 0x03, 0x48, 0xCC, 0xA7, 0xA9, + // Bytes 3080 - 30bf + 0x03, 0x48, 0xCC, 0xAE, 0xB9, 0x03, 0x49, 0xCC, + 0x80, 0xCD, 0x03, 0x49, 0xCC, 0x81, 0xCD, 0x03, + 0x49, 0xCC, 0x82, 0xCD, 0x03, 0x49, 0xCC, 0x83, + 0xCD, 0x03, 0x49, 0xCC, 0x84, 0xCD, 0x03, 0x49, + 0xCC, 0x86, 0xCD, 0x03, 0x49, 0xCC, 0x87, 0xCD, + 0x03, 0x49, 0xCC, 0x89, 0xCD, 0x03, 0x49, 0xCC, + 0x8C, 0xCD, 0x03, 0x49, 0xCC, 0x8F, 0xCD, 0x03, + 0x49, 0xCC, 0x91, 0xCD, 0x03, 0x49, 0xCC, 0xA3, + // Bytes 30c0 - 30ff + 0xB9, 0x03, 0x49, 0xCC, 0xA8, 0xA9, 0x03, 0x49, + 0xCC, 0xB0, 0xB9, 0x03, 0x4A, 0xCC, 0x82, 0xCD, + 0x03, 0x4B, 0xCC, 0x81, 0xCD, 0x03, 0x4B, 0xCC, + 0x8C, 0xCD, 0x03, 0x4B, 0xCC, 0xA3, 0xB9, 0x03, + 0x4B, 0xCC, 0xA7, 0xA9, 0x03, 0x4B, 0xCC, 0xB1, + 0xB9, 0x03, 0x4C, 0xCC, 0x81, 0xCD, 0x03, 0x4C, + 0xCC, 0x8C, 0xCD, 0x03, 0x4C, 0xCC, 0xA7, 0xA9, + 0x03, 0x4C, 0xCC, 0xAD, 0xB9, 0x03, 0x4C, 0xCC, + // Bytes 3100 - 313f + 0xB1, 0xB9, 0x03, 0x4D, 0xCC, 0x81, 0xCD, 0x03, + 0x4D, 0xCC, 0x87, 0xCD, 0x03, 0x4D, 0xCC, 0xA3, + 0xB9, 0x03, 0x4E, 0xCC, 0x80, 0xCD, 0x03, 0x4E, + 0xCC, 0x81, 0xCD, 0x03, 0x4E, 0xCC, 0x83, 0xCD, + 0x03, 0x4E, 0xCC, 0x87, 0xCD, 0x03, 0x4E, 0xCC, + 0x8C, 0xCD, 0x03, 0x4E, 0xCC, 0xA3, 0xB9, 0x03, + 0x4E, 0xCC, 0xA7, 0xA9, 0x03, 0x4E, 0xCC, 0xAD, + 0xB9, 0x03, 0x4E, 0xCC, 0xB1, 0xB9, 0x03, 0x4F, + // Bytes 3140 - 317f + 0xCC, 0x80, 0xCD, 0x03, 0x4F, 0xCC, 0x81, 0xCD, + 0x03, 0x4F, 0xCC, 0x86, 0xCD, 0x03, 0x4F, 0xCC, + 0x89, 0xCD, 0x03, 0x4F, 0xCC, 0x8B, 0xCD, 0x03, + 0x4F, 0xCC, 0x8C, 0xCD, 0x03, 0x4F, 0xCC, 0x8F, + 0xCD, 0x03, 0x4F, 0xCC, 0x91, 0xCD, 0x03, 0x50, + 0xCC, 0x81, 0xCD, 0x03, 0x50, 0xCC, 0x87, 0xCD, + 0x03, 0x52, 0xCC, 0x81, 0xCD, 0x03, 0x52, 0xCC, + 0x87, 0xCD, 0x03, 0x52, 0xCC, 0x8C, 0xCD, 0x03, + // Bytes 3180 - 31bf + 0x52, 0xCC, 0x8F, 0xCD, 0x03, 0x52, 0xCC, 0x91, + 0xCD, 0x03, 0x52, 0xCC, 0xA7, 0xA9, 0x03, 0x52, + 0xCC, 0xB1, 0xB9, 0x03, 0x53, 0xCC, 0x82, 0xCD, + 0x03, 0x53, 0xCC, 0x87, 0xCD, 0x03, 0x53, 0xCC, + 0xA6, 0xB9, 0x03, 0x53, 0xCC, 0xA7, 0xA9, 0x03, + 0x54, 0xCC, 0x87, 0xCD, 0x03, 0x54, 0xCC, 0x8C, + 0xCD, 0x03, 0x54, 0xCC, 0xA3, 0xB9, 0x03, 0x54, + 0xCC, 0xA6, 0xB9, 0x03, 0x54, 0xCC, 0xA7, 0xA9, + // Bytes 31c0 - 31ff + 0x03, 0x54, 0xCC, 0xAD, 0xB9, 0x03, 0x54, 0xCC, + 0xB1, 0xB9, 0x03, 0x55, 0xCC, 0x80, 0xCD, 0x03, + 0x55, 0xCC, 0x81, 0xCD, 0x03, 0x55, 0xCC, 0x82, + 0xCD, 0x03, 0x55, 0xCC, 0x86, 0xCD, 0x03, 0x55, + 0xCC, 0x89, 0xCD, 0x03, 0x55, 0xCC, 0x8A, 0xCD, + 0x03, 0x55, 0xCC, 0x8B, 0xCD, 0x03, 0x55, 0xCC, + 0x8C, 0xCD, 0x03, 0x55, 0xCC, 0x8F, 0xCD, 0x03, + 0x55, 0xCC, 0x91, 0xCD, 0x03, 0x55, 0xCC, 0xA3, + // Bytes 3200 - 323f + 0xB9, 0x03, 0x55, 0xCC, 0xA4, 0xB9, 0x03, 0x55, + 0xCC, 0xA8, 0xA9, 0x03, 0x55, 0xCC, 0xAD, 0xB9, + 0x03, 0x55, 0xCC, 0xB0, 0xB9, 0x03, 0x56, 0xCC, + 0x83, 0xCD, 0x03, 0x56, 0xCC, 0xA3, 0xB9, 0x03, + 0x57, 0xCC, 0x80, 0xCD, 0x03, 0x57, 0xCC, 0x81, + 0xCD, 0x03, 0x57, 0xCC, 0x82, 0xCD, 0x03, 0x57, + 0xCC, 0x87, 0xCD, 0x03, 0x57, 0xCC, 0x88, 0xCD, + 0x03, 0x57, 0xCC, 0xA3, 0xB9, 0x03, 0x58, 0xCC, + // Bytes 3240 - 327f + 0x87, 0xCD, 0x03, 0x58, 0xCC, 0x88, 0xCD, 0x03, + 0x59, 0xCC, 0x80, 0xCD, 0x03, 0x59, 0xCC, 0x81, + 0xCD, 0x03, 0x59, 0xCC, 0x82, 0xCD, 0x03, 0x59, + 0xCC, 0x83, 0xCD, 0x03, 0x59, 0xCC, 0x84, 0xCD, + 0x03, 0x59, 0xCC, 0x87, 0xCD, 0x03, 0x59, 0xCC, + 0x88, 0xCD, 0x03, 0x59, 0xCC, 0x89, 0xCD, 0x03, + 0x59, 0xCC, 0xA3, 0xB9, 0x03, 0x5A, 0xCC, 0x81, + 0xCD, 0x03, 0x5A, 0xCC, 0x82, 0xCD, 0x03, 0x5A, + // Bytes 3280 - 32bf + 0xCC, 0x87, 0xCD, 0x03, 0x5A, 0xCC, 0x8C, 0xCD, + 0x03, 0x5A, 0xCC, 0xA3, 0xB9, 0x03, 0x5A, 0xCC, + 0xB1, 0xB9, 0x03, 0x61, 0xCC, 0x80, 0xCD, 0x03, + 0x61, 0xCC, 0x81, 0xCD, 0x03, 0x61, 0xCC, 0x83, + 0xCD, 0x03, 0x61, 0xCC, 0x84, 0xCD, 0x03, 0x61, + 0xCC, 0x89, 0xCD, 0x03, 0x61, 0xCC, 0x8C, 0xCD, + 0x03, 0x61, 0xCC, 0x8F, 0xCD, 0x03, 0x61, 0xCC, + 0x91, 0xCD, 0x03, 0x61, 0xCC, 0xA5, 0xB9, 0x03, + // Bytes 32c0 - 32ff + 0x61, 0xCC, 0xA8, 0xA9, 0x03, 0x62, 0xCC, 0x87, + 0xCD, 0x03, 0x62, 0xCC, 0xA3, 0xB9, 0x03, 0x62, + 0xCC, 0xB1, 0xB9, 0x03, 0x63, 0xCC, 0x81, 0xCD, + 0x03, 0x63, 0xCC, 0x82, 0xCD, 0x03, 0x63, 0xCC, + 0x87, 0xCD, 0x03, 0x63, 0xCC, 0x8C, 0xCD, 0x03, + 0x64, 0xCC, 0x87, 0xCD, 0x03, 0x64, 0xCC, 0x8C, + 0xCD, 0x03, 0x64, 0xCC, 0xA3, 0xB9, 0x03, 0x64, + 0xCC, 0xA7, 0xA9, 0x03, 0x64, 0xCC, 0xAD, 0xB9, + // Bytes 3300 - 333f + 0x03, 0x64, 0xCC, 0xB1, 0xB9, 0x03, 0x65, 0xCC, + 0x80, 0xCD, 0x03, 0x65, 0xCC, 0x81, 0xCD, 0x03, + 0x65, 0xCC, 0x83, 0xCD, 0x03, 0x65, 0xCC, 0x86, + 0xCD, 0x03, 0x65, 0xCC, 0x87, 0xCD, 0x03, 0x65, + 0xCC, 0x88, 0xCD, 0x03, 0x65, 0xCC, 0x89, 0xCD, + 0x03, 0x65, 0xCC, 0x8C, 0xCD, 0x03, 0x65, 0xCC, + 0x8F, 0xCD, 0x03, 0x65, 0xCC, 0x91, 0xCD, 0x03, + 0x65, 0xCC, 0xA8, 0xA9, 0x03, 0x65, 0xCC, 0xAD, + // Bytes 3340 - 337f + 0xB9, 0x03, 0x65, 0xCC, 0xB0, 0xB9, 0x03, 0x66, + 0xCC, 0x87, 0xCD, 0x03, 0x67, 0xCC, 0x81, 0xCD, + 0x03, 0x67, 0xCC, 0x82, 0xCD, 0x03, 0x67, 0xCC, + 0x84, 0xCD, 0x03, 0x67, 0xCC, 0x86, 0xCD, 0x03, + 0x67, 0xCC, 0x87, 0xCD, 0x03, 0x67, 0xCC, 0x8C, + 0xCD, 0x03, 0x67, 0xCC, 0xA7, 0xA9, 0x03, 0x68, + 0xCC, 0x82, 0xCD, 0x03, 0x68, 0xCC, 0x87, 0xCD, + 0x03, 0x68, 0xCC, 0x88, 0xCD, 0x03, 0x68, 0xCC, + // Bytes 3380 - 33bf + 0x8C, 0xCD, 0x03, 0x68, 0xCC, 0xA3, 0xB9, 0x03, + 0x68, 0xCC, 0xA7, 0xA9, 0x03, 0x68, 0xCC, 0xAE, + 0xB9, 0x03, 0x68, 0xCC, 0xB1, 0xB9, 0x03, 0x69, + 0xCC, 0x80, 0xCD, 0x03, 0x69, 0xCC, 0x81, 0xCD, + 0x03, 0x69, 0xCC, 0x82, 0xCD, 0x03, 0x69, 0xCC, + 0x83, 0xCD, 0x03, 0x69, 0xCC, 0x84, 0xCD, 0x03, + 0x69, 0xCC, 0x86, 0xCD, 0x03, 0x69, 0xCC, 0x89, + 0xCD, 0x03, 0x69, 0xCC, 0x8C, 0xCD, 0x03, 0x69, + // Bytes 33c0 - 33ff + 0xCC, 0x8F, 0xCD, 0x03, 0x69, 0xCC, 0x91, 0xCD, + 0x03, 0x69, 0xCC, 0xA3, 0xB9, 0x03, 0x69, 0xCC, + 0xA8, 0xA9, 0x03, 0x69, 0xCC, 0xB0, 0xB9, 0x03, + 0x6A, 0xCC, 0x82, 0xCD, 0x03, 0x6A, 0xCC, 0x8C, + 0xCD, 0x03, 0x6B, 0xCC, 0x81, 0xCD, 0x03, 0x6B, + 0xCC, 0x8C, 0xCD, 0x03, 0x6B, 0xCC, 0xA3, 0xB9, + 0x03, 0x6B, 0xCC, 0xA7, 0xA9, 0x03, 0x6B, 0xCC, + 0xB1, 0xB9, 0x03, 0x6C, 0xCC, 0x81, 0xCD, 0x03, + // Bytes 3400 - 343f + 0x6C, 0xCC, 0x8C, 0xCD, 0x03, 0x6C, 0xCC, 0xA7, + 0xA9, 0x03, 0x6C, 0xCC, 0xAD, 0xB9, 0x03, 0x6C, + 0xCC, 0xB1, 0xB9, 0x03, 0x6D, 0xCC, 0x81, 0xCD, + 0x03, 0x6D, 0xCC, 0x87, 0xCD, 0x03, 0x6D, 0xCC, + 0xA3, 0xB9, 0x03, 0x6E, 0xCC, 0x80, 0xCD, 0x03, + 0x6E, 0xCC, 0x81, 0xCD, 0x03, 0x6E, 0xCC, 0x83, + 0xCD, 0x03, 0x6E, 0xCC, 0x87, 0xCD, 0x03, 0x6E, + 0xCC, 0x8C, 0xCD, 0x03, 0x6E, 0xCC, 0xA3, 0xB9, + // Bytes 3440 - 347f + 0x03, 0x6E, 0xCC, 0xA7, 0xA9, 0x03, 0x6E, 0xCC, + 0xAD, 0xB9, 0x03, 0x6E, 0xCC, 0xB1, 0xB9, 0x03, + 0x6F, 0xCC, 0x80, 0xCD, 0x03, 0x6F, 0xCC, 0x81, + 0xCD, 0x03, 0x6F, 0xCC, 0x86, 0xCD, 0x03, 0x6F, + 0xCC, 0x89, 0xCD, 0x03, 0x6F, 0xCC, 0x8B, 0xCD, + 0x03, 0x6F, 0xCC, 0x8C, 0xCD, 0x03, 0x6F, 0xCC, + 0x8F, 0xCD, 0x03, 0x6F, 0xCC, 0x91, 0xCD, 0x03, + 0x70, 0xCC, 0x81, 0xCD, 0x03, 0x70, 0xCC, 0x87, + // Bytes 3480 - 34bf + 0xCD, 0x03, 0x72, 0xCC, 0x81, 0xCD, 0x03, 0x72, + 0xCC, 0x87, 0xCD, 0x03, 0x72, 0xCC, 0x8C, 0xCD, + 0x03, 0x72, 0xCC, 0x8F, 0xCD, 0x03, 0x72, 0xCC, + 0x91, 0xCD, 0x03, 0x72, 0xCC, 0xA7, 0xA9, 0x03, + 0x72, 0xCC, 0xB1, 0xB9, 0x03, 0x73, 0xCC, 0x82, + 0xCD, 0x03, 0x73, 0xCC, 0x87, 0xCD, 0x03, 0x73, + 0xCC, 0xA6, 0xB9, 0x03, 0x73, 0xCC, 0xA7, 0xA9, + 0x03, 0x74, 0xCC, 0x87, 0xCD, 0x03, 0x74, 0xCC, + // Bytes 34c0 - 34ff + 0x88, 0xCD, 0x03, 0x74, 0xCC, 0x8C, 0xCD, 0x03, + 0x74, 0xCC, 0xA3, 0xB9, 0x03, 0x74, 0xCC, 0xA6, + 0xB9, 0x03, 0x74, 0xCC, 0xA7, 0xA9, 0x03, 0x74, + 0xCC, 0xAD, 0xB9, 0x03, 0x74, 0xCC, 0xB1, 0xB9, + 0x03, 0x75, 0xCC, 0x80, 0xCD, 0x03, 0x75, 0xCC, + 0x81, 0xCD, 0x03, 0x75, 0xCC, 0x82, 0xCD, 0x03, + 0x75, 0xCC, 0x86, 0xCD, 0x03, 0x75, 0xCC, 0x89, + 0xCD, 0x03, 0x75, 0xCC, 0x8A, 0xCD, 0x03, 0x75, + // Bytes 3500 - 353f + 0xCC, 0x8B, 0xCD, 0x03, 0x75, 0xCC, 0x8C, 0xCD, + 0x03, 0x75, 0xCC, 0x8F, 0xCD, 0x03, 0x75, 0xCC, + 0x91, 0xCD, 0x03, 0x75, 0xCC, 0xA3, 0xB9, 0x03, + 0x75, 0xCC, 0xA4, 0xB9, 0x03, 0x75, 0xCC, 0xA8, + 0xA9, 0x03, 0x75, 0xCC, 0xAD, 0xB9, 0x03, 0x75, + 0xCC, 0xB0, 0xB9, 0x03, 0x76, 0xCC, 0x83, 0xCD, + 0x03, 0x76, 0xCC, 0xA3, 0xB9, 0x03, 0x77, 0xCC, + 0x80, 0xCD, 0x03, 0x77, 0xCC, 0x81, 0xCD, 0x03, + // Bytes 3540 - 357f + 0x77, 0xCC, 0x82, 0xCD, 0x03, 0x77, 0xCC, 0x87, + 0xCD, 0x03, 0x77, 0xCC, 0x88, 0xCD, 0x03, 0x77, + 0xCC, 0x8A, 0xCD, 0x03, 0x77, 0xCC, 0xA3, 0xB9, + 0x03, 0x78, 0xCC, 0x87, 0xCD, 0x03, 0x78, 0xCC, + 0x88, 0xCD, 0x03, 0x79, 0xCC, 0x80, 0xCD, 0x03, + 0x79, 0xCC, 0x81, 0xCD, 0x03, 0x79, 0xCC, 0x82, + 0xCD, 0x03, 0x79, 0xCC, 0x83, 0xCD, 0x03, 0x79, + 0xCC, 0x84, 0xCD, 0x03, 0x79, 0xCC, 0x87, 0xCD, + // Bytes 3580 - 35bf + 0x03, 0x79, 0xCC, 0x88, 0xCD, 0x03, 0x79, 0xCC, + 0x89, 0xCD, 0x03, 0x79, 0xCC, 0x8A, 0xCD, 0x03, + 0x79, 0xCC, 0xA3, 0xB9, 0x03, 0x7A, 0xCC, 0x81, + 0xCD, 0x03, 0x7A, 0xCC, 0x82, 0xCD, 0x03, 0x7A, + 0xCC, 0x87, 0xCD, 0x03, 0x7A, 0xCC, 0x8C, 0xCD, + 0x03, 0x7A, 0xCC, 0xA3, 0xB9, 0x03, 0x7A, 0xCC, + 0xB1, 0xB9, 0x04, 0xC2, 0xA8, 0xCC, 0x80, 0xCE, + 0x04, 0xC2, 0xA8, 0xCC, 0x81, 0xCE, 0x04, 0xC2, + // Bytes 35c0 - 35ff + 0xA8, 0xCD, 0x82, 0xCE, 0x04, 0xC3, 0x86, 0xCC, + 0x81, 0xCD, 0x04, 0xC3, 0x86, 0xCC, 0x84, 0xCD, + 0x04, 0xC3, 0x98, 0xCC, 0x81, 0xCD, 0x04, 0xC3, + 0xA6, 0xCC, 0x81, 0xCD, 0x04, 0xC3, 0xA6, 0xCC, + 0x84, 0xCD, 0x04, 0xC3, 0xB8, 0xCC, 0x81, 0xCD, + 0x04, 0xC5, 0xBF, 0xCC, 0x87, 0xCD, 0x04, 0xC6, + 0xB7, 0xCC, 0x8C, 0xCD, 0x04, 0xCA, 0x92, 0xCC, + 0x8C, 0xCD, 0x04, 0xCE, 0x91, 0xCC, 0x80, 0xCD, + // Bytes 3600 - 363f + 0x04, 0xCE, 0x91, 0xCC, 0x81, 0xCD, 0x04, 0xCE, + 0x91, 0xCC, 0x84, 0xCD, 0x04, 0xCE, 0x91, 0xCC, + 0x86, 0xCD, 0x04, 0xCE, 0x91, 0xCD, 0x85, 0xDD, + 0x04, 0xCE, 0x95, 0xCC, 0x80, 0xCD, 0x04, 0xCE, + 0x95, 0xCC, 0x81, 0xCD, 0x04, 0xCE, 0x97, 0xCC, + 0x80, 0xCD, 0x04, 0xCE, 0x97, 0xCC, 0x81, 0xCD, + 0x04, 0xCE, 0x97, 0xCD, 0x85, 0xDD, 0x04, 0xCE, + 0x99, 0xCC, 0x80, 0xCD, 0x04, 0xCE, 0x99, 0xCC, + // Bytes 3640 - 367f + 0x81, 0xCD, 0x04, 0xCE, 0x99, 0xCC, 0x84, 0xCD, + 0x04, 0xCE, 0x99, 0xCC, 0x86, 0xCD, 0x04, 0xCE, + 0x99, 0xCC, 0x88, 0xCD, 0x04, 0xCE, 0x9F, 0xCC, + 0x80, 0xCD, 0x04, 0xCE, 0x9F, 0xCC, 0x81, 0xCD, + 0x04, 0xCE, 0xA1, 0xCC, 0x94, 0xCD, 0x04, 0xCE, + 0xA5, 0xCC, 0x80, 0xCD, 0x04, 0xCE, 0xA5, 0xCC, + 0x81, 0xCD, 0x04, 0xCE, 0xA5, 0xCC, 0x84, 0xCD, + 0x04, 0xCE, 0xA5, 0xCC, 0x86, 0xCD, 0x04, 0xCE, + // Bytes 3680 - 36bf + 0xA5, 0xCC, 0x88, 0xCD, 0x04, 0xCE, 0xA9, 0xCC, + 0x80, 0xCD, 0x04, 0xCE, 0xA9, 0xCC, 0x81, 0xCD, + 0x04, 0xCE, 0xA9, 0xCD, 0x85, 0xDD, 0x04, 0xCE, + 0xB1, 0xCC, 0x84, 0xCD, 0x04, 0xCE, 0xB1, 0xCC, + 0x86, 0xCD, 0x04, 0xCE, 0xB1, 0xCD, 0x85, 0xDD, + 0x04, 0xCE, 0xB5, 0xCC, 0x80, 0xCD, 0x04, 0xCE, + 0xB5, 0xCC, 0x81, 0xCD, 0x04, 0xCE, 0xB7, 0xCD, + 0x85, 0xDD, 0x04, 0xCE, 0xB9, 0xCC, 0x80, 0xCD, + // Bytes 36c0 - 36ff + 0x04, 0xCE, 0xB9, 0xCC, 0x81, 0xCD, 0x04, 0xCE, + 0xB9, 0xCC, 0x84, 0xCD, 0x04, 0xCE, 0xB9, 0xCC, + 0x86, 0xCD, 0x04, 0xCE, 0xB9, 0xCD, 0x82, 0xCD, + 0x04, 0xCE, 0xBF, 0xCC, 0x80, 0xCD, 0x04, 0xCE, + 0xBF, 0xCC, 0x81, 0xCD, 0x04, 0xCF, 0x81, 0xCC, + 0x93, 0xCD, 0x04, 0xCF, 0x81, 0xCC, 0x94, 0xCD, + 0x04, 0xCF, 0x85, 0xCC, 0x80, 0xCD, 0x04, 0xCF, + 0x85, 0xCC, 0x81, 0xCD, 0x04, 0xCF, 0x85, 0xCC, + // Bytes 3700 - 373f + 0x84, 0xCD, 0x04, 0xCF, 0x85, 0xCC, 0x86, 0xCD, + 0x04, 0xCF, 0x85, 0xCD, 0x82, 0xCD, 0x04, 0xCF, + 0x89, 0xCD, 0x85, 0xDD, 0x04, 0xCF, 0x92, 0xCC, + 0x81, 0xCD, 0x04, 0xCF, 0x92, 0xCC, 0x88, 0xCD, + 0x04, 0xD0, 0x86, 0xCC, 0x88, 0xCD, 0x04, 0xD0, + 0x90, 0xCC, 0x86, 0xCD, 0x04, 0xD0, 0x90, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0x93, 0xCC, 0x81, 0xCD, + 0x04, 0xD0, 0x95, 0xCC, 0x80, 0xCD, 0x04, 0xD0, + // Bytes 3740 - 377f + 0x95, 0xCC, 0x86, 0xCD, 0x04, 0xD0, 0x95, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0x96, 0xCC, 0x86, 0xCD, + 0x04, 0xD0, 0x96, 0xCC, 0x88, 0xCD, 0x04, 0xD0, + 0x97, 0xCC, 0x88, 0xCD, 0x04, 0xD0, 0x98, 0xCC, + 0x80, 0xCD, 0x04, 0xD0, 0x98, 0xCC, 0x84, 0xCD, + 0x04, 0xD0, 0x98, 0xCC, 0x86, 0xCD, 0x04, 0xD0, + 0x98, 0xCC, 0x88, 0xCD, 0x04, 0xD0, 0x9A, 0xCC, + 0x81, 0xCD, 0x04, 0xD0, 0x9E, 0xCC, 0x88, 0xCD, + // Bytes 3780 - 37bf + 0x04, 0xD0, 0xA3, 0xCC, 0x84, 0xCD, 0x04, 0xD0, + 0xA3, 0xCC, 0x86, 0xCD, 0x04, 0xD0, 0xA3, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0xA3, 0xCC, 0x8B, 0xCD, + 0x04, 0xD0, 0xA7, 0xCC, 0x88, 0xCD, 0x04, 0xD0, + 0xAB, 0xCC, 0x88, 0xCD, 0x04, 0xD0, 0xAD, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0xB0, 0xCC, 0x86, 0xCD, + 0x04, 0xD0, 0xB0, 0xCC, 0x88, 0xCD, 0x04, 0xD0, + 0xB3, 0xCC, 0x81, 0xCD, 0x04, 0xD0, 0xB5, 0xCC, + // Bytes 37c0 - 37ff + 0x80, 0xCD, 0x04, 0xD0, 0xB5, 0xCC, 0x86, 0xCD, + 0x04, 0xD0, 0xB5, 0xCC, 0x88, 0xCD, 0x04, 0xD0, + 0xB6, 0xCC, 0x86, 0xCD, 0x04, 0xD0, 0xB6, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0xB7, 0xCC, 0x88, 0xCD, + 0x04, 0xD0, 0xB8, 0xCC, 0x80, 0xCD, 0x04, 0xD0, + 0xB8, 0xCC, 0x84, 0xCD, 0x04, 0xD0, 0xB8, 0xCC, + 0x86, 0xCD, 0x04, 0xD0, 0xB8, 0xCC, 0x88, 0xCD, + 0x04, 0xD0, 0xBA, 0xCC, 0x81, 0xCD, 0x04, 0xD0, + // Bytes 3800 - 383f + 0xBE, 0xCC, 0x88, 0xCD, 0x04, 0xD1, 0x83, 0xCC, + 0x84, 0xCD, 0x04, 0xD1, 0x83, 0xCC, 0x86, 0xCD, + 0x04, 0xD1, 0x83, 0xCC, 0x88, 0xCD, 0x04, 0xD1, + 0x83, 0xCC, 0x8B, 0xCD, 0x04, 0xD1, 0x87, 0xCC, + 0x88, 0xCD, 0x04, 0xD1, 0x8B, 0xCC, 0x88, 0xCD, + 0x04, 0xD1, 0x8D, 0xCC, 0x88, 0xCD, 0x04, 0xD1, + 0x96, 0xCC, 0x88, 0xCD, 0x04, 0xD1, 0xB4, 0xCC, + 0x8F, 0xCD, 0x04, 0xD1, 0xB5, 0xCC, 0x8F, 0xCD, + // Bytes 3840 - 387f + 0x04, 0xD3, 0x98, 0xCC, 0x88, 0xCD, 0x04, 0xD3, + 0x99, 0xCC, 0x88, 0xCD, 0x04, 0xD3, 0xA8, 0xCC, + 0x88, 0xCD, 0x04, 0xD3, 0xA9, 0xCC, 0x88, 0xCD, + 0x04, 0xD8, 0xA7, 0xD9, 0x93, 0xCD, 0x04, 0xD8, + 0xA7, 0xD9, 0x94, 0xCD, 0x04, 0xD8, 0xA7, 0xD9, + 0x95, 0xB9, 0x04, 0xD9, 0x88, 0xD9, 0x94, 0xCD, + 0x04, 0xD9, 0x8A, 0xD9, 0x94, 0xCD, 0x04, 0xDB, + 0x81, 0xD9, 0x94, 0xCD, 0x04, 0xDB, 0x92, 0xD9, + // Bytes 3880 - 38bf + 0x94, 0xCD, 0x04, 0xDB, 0x95, 0xD9, 0x94, 0xCD, + 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x80, 0xCE, 0x05, + 0x41, 0xCC, 0x82, 0xCC, 0x81, 0xCE, 0x05, 0x41, + 0xCC, 0x82, 0xCC, 0x83, 0xCE, 0x05, 0x41, 0xCC, + 0x82, 0xCC, 0x89, 0xCE, 0x05, 0x41, 0xCC, 0x86, + 0xCC, 0x80, 0xCE, 0x05, 0x41, 0xCC, 0x86, 0xCC, + 0x81, 0xCE, 0x05, 0x41, 0xCC, 0x86, 0xCC, 0x83, + 0xCE, 0x05, 0x41, 0xCC, 0x86, 0xCC, 0x89, 0xCE, + // Bytes 38c0 - 38ff + 0x05, 0x41, 0xCC, 0x87, 0xCC, 0x84, 0xCE, 0x05, + 0x41, 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, 0x41, + 0xCC, 0x8A, 0xCC, 0x81, 0xCE, 0x05, 0x41, 0xCC, + 0xA3, 0xCC, 0x82, 0xCE, 0x05, 0x41, 0xCC, 0xA3, + 0xCC, 0x86, 0xCE, 0x05, 0x43, 0xCC, 0xA7, 0xCC, + 0x81, 0xCE, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x80, + 0xCE, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x81, 0xCE, + 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x83, 0xCE, 0x05, + // Bytes 3900 - 393f + 0x45, 0xCC, 0x82, 0xCC, 0x89, 0xCE, 0x05, 0x45, + 0xCC, 0x84, 0xCC, 0x80, 0xCE, 0x05, 0x45, 0xCC, + 0x84, 0xCC, 0x81, 0xCE, 0x05, 0x45, 0xCC, 0xA3, + 0xCC, 0x82, 0xCE, 0x05, 0x45, 0xCC, 0xA7, 0xCC, + 0x86, 0xCE, 0x05, 0x49, 0xCC, 0x88, 0xCC, 0x81, + 0xCE, 0x05, 0x4C, 0xCC, 0xA3, 0xCC, 0x84, 0xCE, + 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x80, 0xCE, 0x05, + 0x4F, 0xCC, 0x82, 0xCC, 0x81, 0xCE, 0x05, 0x4F, + // Bytes 3940 - 397f + 0xCC, 0x82, 0xCC, 0x83, 0xCE, 0x05, 0x4F, 0xCC, + 0x82, 0xCC, 0x89, 0xCE, 0x05, 0x4F, 0xCC, 0x83, + 0xCC, 0x81, 0xCE, 0x05, 0x4F, 0xCC, 0x83, 0xCC, + 0x84, 0xCE, 0x05, 0x4F, 0xCC, 0x83, 0xCC, 0x88, + 0xCE, 0x05, 0x4F, 0xCC, 0x84, 0xCC, 0x80, 0xCE, + 0x05, 0x4F, 0xCC, 0x84, 0xCC, 0x81, 0xCE, 0x05, + 0x4F, 0xCC, 0x87, 0xCC, 0x84, 0xCE, 0x05, 0x4F, + 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, 0x4F, 0xCC, + // Bytes 3980 - 39bf + 0x9B, 0xCC, 0x80, 0xCE, 0x05, 0x4F, 0xCC, 0x9B, + 0xCC, 0x81, 0xCE, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, + 0x83, 0xCE, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, 0x89, + 0xCE, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, 0xA3, 0xBA, + 0x05, 0x4F, 0xCC, 0xA3, 0xCC, 0x82, 0xCE, 0x05, + 0x4F, 0xCC, 0xA8, 0xCC, 0x84, 0xCE, 0x05, 0x52, + 0xCC, 0xA3, 0xCC, 0x84, 0xCE, 0x05, 0x53, 0xCC, + 0x81, 0xCC, 0x87, 0xCE, 0x05, 0x53, 0xCC, 0x8C, + // Bytes 39c0 - 39ff + 0xCC, 0x87, 0xCE, 0x05, 0x53, 0xCC, 0xA3, 0xCC, + 0x87, 0xCE, 0x05, 0x55, 0xCC, 0x83, 0xCC, 0x81, + 0xCE, 0x05, 0x55, 0xCC, 0x84, 0xCC, 0x88, 0xCE, + 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x80, 0xCE, 0x05, + 0x55, 0xCC, 0x88, 0xCC, 0x81, 0xCE, 0x05, 0x55, + 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, 0x55, 0xCC, + 0x88, 0xCC, 0x8C, 0xCE, 0x05, 0x55, 0xCC, 0x9B, + 0xCC, 0x80, 0xCE, 0x05, 0x55, 0xCC, 0x9B, 0xCC, + // Bytes 3a00 - 3a3f + 0x81, 0xCE, 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0x83, + 0xCE, 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0x89, 0xCE, + 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0xA3, 0xBA, 0x05, + 0x61, 0xCC, 0x82, 0xCC, 0x80, 0xCE, 0x05, 0x61, + 0xCC, 0x82, 0xCC, 0x81, 0xCE, 0x05, 0x61, 0xCC, + 0x82, 0xCC, 0x83, 0xCE, 0x05, 0x61, 0xCC, 0x82, + 0xCC, 0x89, 0xCE, 0x05, 0x61, 0xCC, 0x86, 0xCC, + 0x80, 0xCE, 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x81, + // Bytes 3a40 - 3a7f + 0xCE, 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x83, 0xCE, + 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x89, 0xCE, 0x05, + 0x61, 0xCC, 0x87, 0xCC, 0x84, 0xCE, 0x05, 0x61, + 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, 0x61, 0xCC, + 0x8A, 0xCC, 0x81, 0xCE, 0x05, 0x61, 0xCC, 0xA3, + 0xCC, 0x82, 0xCE, 0x05, 0x61, 0xCC, 0xA3, 0xCC, + 0x86, 0xCE, 0x05, 0x63, 0xCC, 0xA7, 0xCC, 0x81, + 0xCE, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x80, 0xCE, + // Bytes 3a80 - 3abf + 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x81, 0xCE, 0x05, + 0x65, 0xCC, 0x82, 0xCC, 0x83, 0xCE, 0x05, 0x65, + 0xCC, 0x82, 0xCC, 0x89, 0xCE, 0x05, 0x65, 0xCC, + 0x84, 0xCC, 0x80, 0xCE, 0x05, 0x65, 0xCC, 0x84, + 0xCC, 0x81, 0xCE, 0x05, 0x65, 0xCC, 0xA3, 0xCC, + 0x82, 0xCE, 0x05, 0x65, 0xCC, 0xA7, 0xCC, 0x86, + 0xCE, 0x05, 0x69, 0xCC, 0x88, 0xCC, 0x81, 0xCE, + 0x05, 0x6C, 0xCC, 0xA3, 0xCC, 0x84, 0xCE, 0x05, + // Bytes 3ac0 - 3aff + 0x6F, 0xCC, 0x82, 0xCC, 0x80, 0xCE, 0x05, 0x6F, + 0xCC, 0x82, 0xCC, 0x81, 0xCE, 0x05, 0x6F, 0xCC, + 0x82, 0xCC, 0x83, 0xCE, 0x05, 0x6F, 0xCC, 0x82, + 0xCC, 0x89, 0xCE, 0x05, 0x6F, 0xCC, 0x83, 0xCC, + 0x81, 0xCE, 0x05, 0x6F, 0xCC, 0x83, 0xCC, 0x84, + 0xCE, 0x05, 0x6F, 0xCC, 0x83, 0xCC, 0x88, 0xCE, + 0x05, 0x6F, 0xCC, 0x84, 0xCC, 0x80, 0xCE, 0x05, + 0x6F, 0xCC, 0x84, 0xCC, 0x81, 0xCE, 0x05, 0x6F, + // Bytes 3b00 - 3b3f + 0xCC, 0x87, 0xCC, 0x84, 0xCE, 0x05, 0x6F, 0xCC, + 0x88, 0xCC, 0x84, 0xCE, 0x05, 0x6F, 0xCC, 0x9B, + 0xCC, 0x80, 0xCE, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, + 0x81, 0xCE, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0x83, + 0xCE, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0x89, 0xCE, + 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0xA3, 0xBA, 0x05, + 0x6F, 0xCC, 0xA3, 0xCC, 0x82, 0xCE, 0x05, 0x6F, + 0xCC, 0xA8, 0xCC, 0x84, 0xCE, 0x05, 0x72, 0xCC, + // Bytes 3b40 - 3b7f + 0xA3, 0xCC, 0x84, 0xCE, 0x05, 0x73, 0xCC, 0x81, + 0xCC, 0x87, 0xCE, 0x05, 0x73, 0xCC, 0x8C, 0xCC, + 0x87, 0xCE, 0x05, 0x73, 0xCC, 0xA3, 0xCC, 0x87, + 0xCE, 0x05, 0x75, 0xCC, 0x83, 0xCC, 0x81, 0xCE, + 0x05, 0x75, 0xCC, 0x84, 0xCC, 0x88, 0xCE, 0x05, + 0x75, 0xCC, 0x88, 0xCC, 0x80, 0xCE, 0x05, 0x75, + 0xCC, 0x88, 0xCC, 0x81, 0xCE, 0x05, 0x75, 0xCC, + 0x88, 0xCC, 0x84, 0xCE, 0x05, 0x75, 0xCC, 0x88, + // Bytes 3b80 - 3bbf + 0xCC, 0x8C, 0xCE, 0x05, 0x75, 0xCC, 0x9B, 0xCC, + 0x80, 0xCE, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x81, + 0xCE, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x83, 0xCE, + 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x89, 0xCE, 0x05, + 0x75, 0xCC, 0x9B, 0xCC, 0xA3, 0xBA, 0x05, 0xE1, + 0xBE, 0xBF, 0xCC, 0x80, 0xCE, 0x05, 0xE1, 0xBE, + 0xBF, 0xCC, 0x81, 0xCE, 0x05, 0xE1, 0xBE, 0xBF, + 0xCD, 0x82, 0xCE, 0x05, 0xE1, 0xBF, 0xBE, 0xCC, + // Bytes 3bc0 - 3bff + 0x80, 0xCE, 0x05, 0xE1, 0xBF, 0xBE, 0xCC, 0x81, + 0xCE, 0x05, 0xE1, 0xBF, 0xBE, 0xCD, 0x82, 0xCE, + 0x05, 0xE2, 0x86, 0x90, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x86, 0x92, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x86, 0x94, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, + 0x90, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, 0x92, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, 0x94, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x83, 0xCC, 0xB8, + // Bytes 3c00 - 3c3f + 0x05, 0x05, 0xE2, 0x88, 0x88, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x88, 0x8B, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x88, 0xA3, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x88, 0xA5, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x88, + 0xBC, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x83, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x85, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x88, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x89, 0x8D, 0xCC, 0xB8, 0x05, + // Bytes 3c40 - 3c7f + 0x05, 0xE2, 0x89, 0xA1, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x89, 0xA4, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x89, 0xA5, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0xB2, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB3, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB6, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB7, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x89, 0xBA, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0xBB, 0xCC, 0xB8, 0x05, 0x05, + // Bytes 3c80 - 3cbf + 0xE2, 0x89, 0xBC, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x89, 0xBD, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + 0x82, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x83, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x86, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x87, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0x91, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x8A, 0x92, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x8A, 0xA2, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + // Bytes 3cc0 - 3cff + 0x8A, 0xA8, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + 0xA9, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xAB, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB2, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB3, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0xB4, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x8A, 0xB5, 0xCC, 0xB8, 0x05, 0x06, + 0xCE, 0x91, 0xCC, 0x93, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x85, 0xDE, 0x06, + // Bytes 3d00 - 3d3f + 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x06, + // Bytes 3d40 - 3d7f + 0xCE, 0x99, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x06, + 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0x99, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x06, + 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x06, + // Bytes 3d80 - 3dbf + 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0xA5, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x06, + 0xCE, 0xA9, 0xCC, 0x93, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xA9, 0xCC, 0x94, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB1, 0xCC, 0x80, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB1, 0xCC, 0x81, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x85, 0xDE, 0x06, + // Bytes 3dc0 - 3dff + 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB1, 0xCD, 0x82, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0xB7, 0xCC, 0x80, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB7, 0xCC, 0x81, 0xCD, 0x85, 0xDE, 0x06, + // Bytes 3e00 - 3e3f + 0xCE, 0xB7, 0xCC, 0x93, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB7, 0xCC, 0x94, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB7, 0xCD, 0x82, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0xB9, 0xCC, 0x88, 0xCD, 0x82, 0xCE, 0x06, + 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x06, + // Bytes 3e40 - 3e7f + 0xCE, 0xB9, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x06, + 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0xB9, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x06, + 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x06, + // Bytes 3e80 - 3ebf + 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x80, 0xCE, 0x06, + 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x81, 0xCE, 0x06, + 0xCF, 0x85, 0xCC, 0x88, 0xCD, 0x82, 0xCE, 0x06, + 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x06, + 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x06, + 0xCF, 0x85, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x06, + 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x06, + 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x06, + // Bytes 3ec0 - 3eff + 0xCF, 0x85, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x06, + 0xCF, 0x89, 0xCC, 0x80, 0xCD, 0x85, 0xDE, 0x06, + 0xCF, 0x89, 0xCC, 0x81, 0xCD, 0x85, 0xDE, 0x06, + 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x85, 0xDE, 0x06, + 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x85, 0xDE, 0x06, + 0xCF, 0x89, 0xCD, 0x82, 0xCD, 0x85, 0xDE, 0x06, + 0xE0, 0xA4, 0xA8, 0xE0, 0xA4, 0xBC, 0x0D, 0x06, + 0xE0, 0xA4, 0xB0, 0xE0, 0xA4, 0xBC, 0x0D, 0x06, + // Bytes 3f00 - 3f3f + 0xE0, 0xA4, 0xB3, 0xE0, 0xA4, 0xBC, 0x0D, 0x06, + 0xE0, 0xB1, 0x86, 0xE0, 0xB1, 0x96, 0x89, 0x06, + 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8A, 0x15, 0x06, + 0xE3, 0x81, 0x86, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x8B, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x8D, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x8F, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x91, 0xE3, 0x82, 0x99, 0x11, 0x06, + // Bytes 3f40 - 3f7f + 0xE3, 0x81, 0x93, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x95, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x97, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x99, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x9B, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x9D, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x9F, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0xA1, 0xE3, 0x82, 0x99, 0x11, 0x06, + // Bytes 3f80 - 3fbf + 0xE3, 0x81, 0xA4, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0xA6, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0xA8, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x9A, 0x11, 0x06, + 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x9A, 0x11, 0x06, + 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x99, 0x11, 0x06, + // Bytes 3fc0 - 3fff + 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x9A, 0x11, 0x06, + 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x9A, 0x11, 0x06, + 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x9A, 0x11, 0x06, + 0xE3, 0x82, 0x9D, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xA6, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x11, 0x06, + // Bytes 4000 - 403f + 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xB1, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xB3, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xB5, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xB9, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xBB, 0xE3, 0x82, 0x99, 0x11, 0x06, + // Bytes 4040 - 407f + 0xE3, 0x82, 0xBD, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x81, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x84, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x86, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0x11, 0x06, + // Bytes 4080 - 40bf + 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, 0x11, 0x06, + 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x9A, 0x11, 0x06, + 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0x11, 0x06, + 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0x11, 0x06, + // Bytes 40c0 - 40ff + 0xE3, 0x83, 0xAF, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0xB0, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0xB1, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0xB2, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0xBD, 0xE3, 0x82, 0x99, 0x11, 0x08, + 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x81, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x91, 0xCC, 0x93, + // Bytes 4100 - 413f + 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x91, + 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x82, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x97, 0xCC, 0x93, + 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x97, + 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85, + // Bytes 4140 - 417f + 0xDF, 0x08, 0xCE, 0x97, 0xCC, 0x94, 0xCC, 0x80, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x97, 0xCC, 0x94, + 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x97, + 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x81, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xA9, 0xCC, 0x93, + 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xA9, + // Bytes 4180 - 41bf + 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, 0x82, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB1, 0xCC, 0x93, + 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB1, + 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xB1, 0xCC, 0x94, 0xCC, 0x80, + // Bytes 41c0 - 41ff + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB1, 0xCC, 0x94, + 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB1, + 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x81, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB7, 0xCC, 0x93, + 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB7, + 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, + // Bytes 4200 - 423f + 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xB7, 0xCC, 0x94, 0xCD, 0x82, + 0xCD, 0x85, 0xDF, 0x08, 0xCF, 0x89, 0xCC, 0x93, + 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, 0xCF, 0x89, + 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, + 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85, + 0xDF, 0x08, 0xCF, 0x89, 0xCC, 0x94, 0xCC, 0x80, + 0xCD, 0x85, 0xDF, 0x08, 0xCF, 0x89, 0xCC, 0x94, + // Bytes 4240 - 427f + 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, 0xCF, 0x89, + 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, + 0xF0, 0x91, 0x82, 0x99, 0xF0, 0x91, 0x82, 0xBA, + 0x0D, 0x08, 0xF0, 0x91, 0x82, 0x9B, 0xF0, 0x91, + 0x82, 0xBA, 0x0D, 0x08, 0xF0, 0x91, 0x82, 0xA5, + 0xF0, 0x91, 0x82, 0xBA, 0x0D, 0x42, 0xC2, 0xB4, + 0x01, 0x43, 0x20, 0xCC, 0x81, 0xCD, 0x43, 0x20, + 0xCC, 0x83, 0xCD, 0x43, 0x20, 0xCC, 0x84, 0xCD, + // Bytes 4280 - 42bf + 0x43, 0x20, 0xCC, 0x85, 0xCD, 0x43, 0x20, 0xCC, + 0x86, 0xCD, 0x43, 0x20, 0xCC, 0x87, 0xCD, 0x43, + 0x20, 0xCC, 0x88, 0xCD, 0x43, 0x20, 0xCC, 0x8A, + 0xCD, 0x43, 0x20, 0xCC, 0x8B, 0xCD, 0x43, 0x20, + 0xCC, 0x93, 0xCD, 0x43, 0x20, 0xCC, 0x94, 0xCD, + 0x43, 0x20, 0xCC, 0xA7, 0xA9, 0x43, 0x20, 0xCC, + 0xA8, 0xA9, 0x43, 0x20, 0xCC, 0xB3, 0xB9, 0x43, + 0x20, 0xCD, 0x82, 0xCD, 0x43, 0x20, 0xCD, 0x85, + // Bytes 42c0 - 42ff + 0xDD, 0x43, 0x20, 0xD9, 0x8B, 0x5D, 0x43, 0x20, + 0xD9, 0x8C, 0x61, 0x43, 0x20, 0xD9, 0x8D, 0x65, + 0x43, 0x20, 0xD9, 0x8E, 0x69, 0x43, 0x20, 0xD9, + 0x8F, 0x6D, 0x43, 0x20, 0xD9, 0x90, 0x71, 0x43, + 0x20, 0xD9, 0x91, 0x75, 0x43, 0x20, 0xD9, 0x92, + 0x79, 0x43, 0x41, 0xCC, 0x8A, 0xCD, 0x43, 0x73, + 0xCC, 0x87, 0xCD, 0x44, 0x20, 0xE3, 0x82, 0x99, + 0x11, 0x44, 0x20, 0xE3, 0x82, 0x9A, 0x11, 0x44, + // Bytes 4300 - 433f + 0xC2, 0xA8, 0xCC, 0x81, 0xCE, 0x44, 0xCE, 0x91, + 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0x95, 0xCC, 0x81, + 0xCD, 0x44, 0xCE, 0x97, 0xCC, 0x81, 0xCD, 0x44, + 0xCE, 0x99, 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0x9F, + 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0xA5, 0xCC, 0x81, + 0xCD, 0x44, 0xCE, 0xA5, 0xCC, 0x88, 0xCD, 0x44, + 0xCE, 0xA9, 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0xB1, + 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0xB5, 0xCC, 0x81, + // Bytes 4340 - 437f + 0xCD, 0x44, 0xCE, 0xB7, 0xCC, 0x81, 0xCD, 0x44, + 0xCE, 0xB9, 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0xBF, + 0xCC, 0x81, 0xCD, 0x44, 0xCF, 0x85, 0xCC, 0x81, + 0xCD, 0x44, 0xCF, 0x89, 0xCC, 0x81, 0xCD, 0x44, + 0xD7, 0x90, 0xD6, 0xB7, 0x35, 0x44, 0xD7, 0x90, + 0xD6, 0xB8, 0x39, 0x44, 0xD7, 0x90, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0x91, 0xD6, 0xBC, 0x45, 0x44, + 0xD7, 0x91, 0xD6, 0xBF, 0x4D, 0x44, 0xD7, 0x92, + // Bytes 4380 - 43bf + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x93, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0x94, 0xD6, 0xBC, 0x45, 0x44, + 0xD7, 0x95, 0xD6, 0xB9, 0x3D, 0x44, 0xD7, 0x95, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x96, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0x98, 0xD6, 0xBC, 0x45, 0x44, + 0xD7, 0x99, 0xD6, 0xB4, 0x29, 0x44, 0xD7, 0x99, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x9A, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0x9B, 0xD6, 0xBC, 0x45, 0x44, + // Bytes 43c0 - 43ff + 0xD7, 0x9B, 0xD6, 0xBF, 0x4D, 0x44, 0xD7, 0x9C, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x9E, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0xA0, 0xD6, 0xBC, 0x45, 0x44, + 0xD7, 0xA1, 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA3, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA4, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0xA4, 0xD6, 0xBF, 0x4D, 0x44, + 0xD7, 0xA6, 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA7, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA8, 0xD6, 0xBC, + // Bytes 4400 - 443f + 0x45, 0x44, 0xD7, 0xA9, 0xD6, 0xBC, 0x45, 0x44, + 0xD7, 0xA9, 0xD7, 0x81, 0x51, 0x44, 0xD7, 0xA9, + 0xD7, 0x82, 0x55, 0x44, 0xD7, 0xAA, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0xB2, 0xD6, 0xB7, 0x35, 0x44, + 0xD8, 0xA7, 0xD9, 0x8B, 0x5D, 0x44, 0xD8, 0xA7, + 0xD9, 0x93, 0xCD, 0x44, 0xD8, 0xA7, 0xD9, 0x94, + 0xCD, 0x44, 0xD8, 0xA7, 0xD9, 0x95, 0xB9, 0x44, + 0xD8, 0xB0, 0xD9, 0xB0, 0x7D, 0x44, 0xD8, 0xB1, + // Bytes 4440 - 447f + 0xD9, 0xB0, 0x7D, 0x44, 0xD9, 0x80, 0xD9, 0x8B, + 0x5D, 0x44, 0xD9, 0x80, 0xD9, 0x8E, 0x69, 0x44, + 0xD9, 0x80, 0xD9, 0x8F, 0x6D, 0x44, 0xD9, 0x80, + 0xD9, 0x90, 0x71, 0x44, 0xD9, 0x80, 0xD9, 0x91, + 0x75, 0x44, 0xD9, 0x80, 0xD9, 0x92, 0x79, 0x44, + 0xD9, 0x87, 0xD9, 0xB0, 0x7D, 0x44, 0xD9, 0x88, + 0xD9, 0x94, 0xCD, 0x44, 0xD9, 0x89, 0xD9, 0xB0, + 0x7D, 0x44, 0xD9, 0x8A, 0xD9, 0x94, 0xCD, 0x44, + // Bytes 4480 - 44bf + 0xDB, 0x92, 0xD9, 0x94, 0xCD, 0x44, 0xDB, 0x95, + 0xD9, 0x94, 0xCD, 0x45, 0x20, 0xCC, 0x88, 0xCC, + 0x80, 0xCE, 0x45, 0x20, 0xCC, 0x88, 0xCC, 0x81, + 0xCE, 0x45, 0x20, 0xCC, 0x88, 0xCD, 0x82, 0xCE, + 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x45, + 0x20, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x45, 0x20, + 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x45, 0x20, 0xCC, + 0x94, 0xCC, 0x80, 0xCE, 0x45, 0x20, 0xCC, 0x94, + // Bytes 44c0 - 44ff + 0xCC, 0x81, 0xCE, 0x45, 0x20, 0xCC, 0x94, 0xCD, + 0x82, 0xCE, 0x45, 0x20, 0xD9, 0x8C, 0xD9, 0x91, + 0x76, 0x45, 0x20, 0xD9, 0x8D, 0xD9, 0x91, 0x76, + 0x45, 0x20, 0xD9, 0x8E, 0xD9, 0x91, 0x76, 0x45, + 0x20, 0xD9, 0x8F, 0xD9, 0x91, 0x76, 0x45, 0x20, + 0xD9, 0x90, 0xD9, 0x91, 0x76, 0x45, 0x20, 0xD9, + 0x91, 0xD9, 0xB0, 0x7E, 0x45, 0xE2, 0xAB, 0x9D, + 0xCC, 0xB8, 0x05, 0x46, 0xCE, 0xB9, 0xCC, 0x88, + // Bytes 4500 - 453f + 0xCC, 0x81, 0xCE, 0x46, 0xCF, 0x85, 0xCC, 0x88, + 0xCC, 0x81, 0xCE, 0x46, 0xD7, 0xA9, 0xD6, 0xBC, + 0xD7, 0x81, 0x52, 0x46, 0xD7, 0xA9, 0xD6, 0xBC, + 0xD7, 0x82, 0x56, 0x46, 0xD9, 0x80, 0xD9, 0x8E, + 0xD9, 0x91, 0x76, 0x46, 0xD9, 0x80, 0xD9, 0x8F, + 0xD9, 0x91, 0x76, 0x46, 0xD9, 0x80, 0xD9, 0x90, + 0xD9, 0x91, 0x76, 0x46, 0xE0, 0xA4, 0x95, 0xE0, + 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, 0x96, 0xE0, + // Bytes 4540 - 457f + 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, 0x97, 0xE0, + 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, 0x9C, 0xE0, + 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, 0xA1, 0xE0, + 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, 0xA2, 0xE0, + 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, 0xAB, 0xE0, + 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, 0xAF, 0xE0, + 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA6, 0xA1, 0xE0, + 0xA6, 0xBC, 0x0D, 0x46, 0xE0, 0xA6, 0xA2, 0xE0, + // Bytes 4580 - 45bf + 0xA6, 0xBC, 0x0D, 0x46, 0xE0, 0xA6, 0xAF, 0xE0, + 0xA6, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, 0x96, 0xE0, + 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, 0x97, 0xE0, + 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, 0x9C, 0xE0, + 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, 0xAB, 0xE0, + 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, 0xB2, 0xE0, + 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, 0xB8, 0xE0, + 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xAC, 0xA1, 0xE0, + // Bytes 45c0 - 45ff + 0xAC, 0xBC, 0x0D, 0x46, 0xE0, 0xAC, 0xA2, 0xE0, + 0xAC, 0xBC, 0x0D, 0x46, 0xE0, 0xBE, 0xB2, 0xE0, + 0xBE, 0x80, 0xA1, 0x46, 0xE0, 0xBE, 0xB3, 0xE0, + 0xBE, 0x80, 0xA1, 0x46, 0xE3, 0x83, 0x86, 0xE3, + 0x82, 0x99, 0x11, 0x48, 0xF0, 0x9D, 0x85, 0x97, + 0xF0, 0x9D, 0x85, 0xA5, 0xB1, 0x48, 0xF0, 0x9D, + 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xB1, 0x48, + 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, 0xA5, + // Bytes 4600 - 463f + 0xB1, 0x48, 0xF0, 0x9D, 0x86, 0xBA, 0xF0, 0x9D, + 0x85, 0xA5, 0xB1, 0x49, 0xE0, 0xBE, 0xB2, 0xE0, + 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0xA2, 0x49, 0xE0, + 0xBE, 0xB3, 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, 0x80, + 0xA2, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, + 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAE, 0xB2, 0x4C, + 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, + 0xF0, 0x9D, 0x85, 0xAF, 0xB2, 0x4C, 0xF0, 0x9D, + // Bytes 4640 - 467f + 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, + 0x85, 0xB0, 0xB2, 0x4C, 0xF0, 0x9D, 0x85, 0x98, + 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xB1, + 0xB2, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, + 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xB2, 0xB2, 0x4C, + 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, 0xA5, + 0xF0, 0x9D, 0x85, 0xAE, 0xB2, 0x4C, 0xF0, 0x9D, + 0x86, 0xB9, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, + // Bytes 4680 - 46bf + 0x85, 0xAF, 0xB2, 0x4C, 0xF0, 0x9D, 0x86, 0xBA, + 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAE, + 0xB2, 0x4C, 0xF0, 0x9D, 0x86, 0xBA, 0xF0, 0x9D, + 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xB2, 0x83, + 0x41, 0xCC, 0x82, 0xCD, 0x83, 0x41, 0xCC, 0x86, + 0xCD, 0x83, 0x41, 0xCC, 0x87, 0xCD, 0x83, 0x41, + 0xCC, 0x88, 0xCD, 0x83, 0x41, 0xCC, 0x8A, 0xCD, + 0x83, 0x41, 0xCC, 0xA3, 0xB9, 0x83, 0x43, 0xCC, + // Bytes 46c0 - 46ff + 0xA7, 0xA9, 0x83, 0x45, 0xCC, 0x82, 0xCD, 0x83, + 0x45, 0xCC, 0x84, 0xCD, 0x83, 0x45, 0xCC, 0xA3, + 0xB9, 0x83, 0x45, 0xCC, 0xA7, 0xA9, 0x83, 0x49, + 0xCC, 0x88, 0xCD, 0x83, 0x4C, 0xCC, 0xA3, 0xB9, + 0x83, 0x4F, 0xCC, 0x82, 0xCD, 0x83, 0x4F, 0xCC, + 0x83, 0xCD, 0x83, 0x4F, 0xCC, 0x84, 0xCD, 0x83, + 0x4F, 0xCC, 0x87, 0xCD, 0x83, 0x4F, 0xCC, 0x88, + 0xCD, 0x83, 0x4F, 0xCC, 0x9B, 0xB1, 0x83, 0x4F, + // Bytes 4700 - 473f + 0xCC, 0xA3, 0xB9, 0x83, 0x4F, 0xCC, 0xA8, 0xA9, + 0x83, 0x52, 0xCC, 0xA3, 0xB9, 0x83, 0x53, 0xCC, + 0x81, 0xCD, 0x83, 0x53, 0xCC, 0x8C, 0xCD, 0x83, + 0x53, 0xCC, 0xA3, 0xB9, 0x83, 0x55, 0xCC, 0x83, + 0xCD, 0x83, 0x55, 0xCC, 0x84, 0xCD, 0x83, 0x55, + 0xCC, 0x88, 0xCD, 0x83, 0x55, 0xCC, 0x9B, 0xB1, + 0x83, 0x61, 0xCC, 0x82, 0xCD, 0x83, 0x61, 0xCC, + 0x86, 0xCD, 0x83, 0x61, 0xCC, 0x87, 0xCD, 0x83, + // Bytes 4740 - 477f + 0x61, 0xCC, 0x88, 0xCD, 0x83, 0x61, 0xCC, 0x8A, + 0xCD, 0x83, 0x61, 0xCC, 0xA3, 0xB9, 0x83, 0x63, + 0xCC, 0xA7, 0xA9, 0x83, 0x65, 0xCC, 0x82, 0xCD, + 0x83, 0x65, 0xCC, 0x84, 0xCD, 0x83, 0x65, 0xCC, + 0xA3, 0xB9, 0x83, 0x65, 0xCC, 0xA7, 0xA9, 0x83, + 0x69, 0xCC, 0x88, 0xCD, 0x83, 0x6C, 0xCC, 0xA3, + 0xB9, 0x83, 0x6F, 0xCC, 0x82, 0xCD, 0x83, 0x6F, + 0xCC, 0x83, 0xCD, 0x83, 0x6F, 0xCC, 0x84, 0xCD, + // Bytes 4780 - 47bf + 0x83, 0x6F, 0xCC, 0x87, 0xCD, 0x83, 0x6F, 0xCC, + 0x88, 0xCD, 0x83, 0x6F, 0xCC, 0x9B, 0xB1, 0x83, + 0x6F, 0xCC, 0xA3, 0xB9, 0x83, 0x6F, 0xCC, 0xA8, + 0xA9, 0x83, 0x72, 0xCC, 0xA3, 0xB9, 0x83, 0x73, + 0xCC, 0x81, 0xCD, 0x83, 0x73, 0xCC, 0x8C, 0xCD, + 0x83, 0x73, 0xCC, 0xA3, 0xB9, 0x83, 0x75, 0xCC, + 0x83, 0xCD, 0x83, 0x75, 0xCC, 0x84, 0xCD, 0x83, + 0x75, 0xCC, 0x88, 0xCD, 0x83, 0x75, 0xCC, 0x9B, + // Bytes 47c0 - 47ff + 0xB1, 0x84, 0xCE, 0x91, 0xCC, 0x93, 0xCD, 0x84, + 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0x95, + 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0x95, 0xCC, 0x94, + 0xCD, 0x84, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x84, + 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0x99, + 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0x99, 0xCC, 0x94, + 0xCD, 0x84, 0xCE, 0x9F, 0xCC, 0x93, 0xCD, 0x84, + 0xCE, 0x9F, 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0xA5, + // Bytes 4800 - 483f + 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0xA9, 0xCC, 0x93, + 0xCD, 0x84, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, 0x84, + 0xCE, 0xB1, 0xCC, 0x80, 0xCD, 0x84, 0xCE, 0xB1, + 0xCC, 0x81, 0xCD, 0x84, 0xCE, 0xB1, 0xCC, 0x93, + 0xCD, 0x84, 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x84, + 0xCE, 0xB1, 0xCD, 0x82, 0xCD, 0x84, 0xCE, 0xB5, + 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0xB5, 0xCC, 0x94, + 0xCD, 0x84, 0xCE, 0xB7, 0xCC, 0x80, 0xCD, 0x84, + // Bytes 4840 - 487f + 0xCE, 0xB7, 0xCC, 0x81, 0xCD, 0x84, 0xCE, 0xB7, + 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0xB7, 0xCC, 0x94, + 0xCD, 0x84, 0xCE, 0xB7, 0xCD, 0x82, 0xCD, 0x84, + 0xCE, 0xB9, 0xCC, 0x88, 0xCD, 0x84, 0xCE, 0xB9, + 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0xB9, 0xCC, 0x94, + 0xCD, 0x84, 0xCE, 0xBF, 0xCC, 0x93, 0xCD, 0x84, + 0xCE, 0xBF, 0xCC, 0x94, 0xCD, 0x84, 0xCF, 0x85, + 0xCC, 0x88, 0xCD, 0x84, 0xCF, 0x85, 0xCC, 0x93, + // Bytes 4880 - 48bf + 0xCD, 0x84, 0xCF, 0x85, 0xCC, 0x94, 0xCD, 0x84, + 0xCF, 0x89, 0xCC, 0x80, 0xCD, 0x84, 0xCF, 0x89, + 0xCC, 0x81, 0xCD, 0x84, 0xCF, 0x89, 0xCC, 0x93, + 0xCD, 0x84, 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x84, + 0xCF, 0x89, 0xCD, 0x82, 0xCD, 0x86, 0xCE, 0x91, + 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0x91, + 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0x91, + 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0x91, + // Bytes 48c0 - 48ff + 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0x91, + 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0x91, + 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0x97, + 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0x97, + 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0x97, + 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0x97, + 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0x97, + 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0x97, + // Bytes 4900 - 493f + 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0xA9, + 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0xA9, + 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0xA9, + 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0xA9, + 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0xA9, + 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0xA9, + 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0xB1, + 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0xB1, + // Bytes 4940 - 497f + 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0xB1, + 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0xB1, + 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0xB1, + 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0xB1, + 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0xB7, + 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0xB7, + 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0xB7, + 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0xB7, + // Bytes 4980 - 49bf + 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0xB7, + 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0xB7, + 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, 0xCF, 0x89, + 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, 0xCF, 0x89, + 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, 0xCF, 0x89, + 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, 0xCF, 0x89, + 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, 0xCF, 0x89, + 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, 0xCF, 0x89, + // Bytes 49c0 - 49ff + 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x42, 0xCC, 0x80, + 0xCD, 0x33, 0x42, 0xCC, 0x81, 0xCD, 0x33, 0x42, + 0xCC, 0x93, 0xCD, 0x33, 0x43, 0xE1, 0x85, 0xA1, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA2, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xA3, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xA4, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA5, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA6, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xA7, 0x01, 0x00, 0x43, 0xE1, + // Bytes 4a00 - 4a3f + 0x85, 0xA8, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA9, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAA, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xAB, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xAC, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAD, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAE, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xAF, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xB0, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB1, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB2, 0x01, 0x00, + // Bytes 4a40 - 4a7f + 0x43, 0xE1, 0x85, 0xB3, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xB4, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB5, + 0x01, 0x00, 0x43, 0xE1, 0x86, 0xAA, 0x01, 0x00, + 0x43, 0xE1, 0x86, 0xAC, 0x01, 0x00, 0x43, 0xE1, + 0x86, 0xAD, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB0, + 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB1, 0x01, 0x00, + 0x43, 0xE1, 0x86, 0xB2, 0x01, 0x00, 0x43, 0xE1, + 0x86, 0xB3, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB4, + // Bytes 4a80 - 4abf + 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB5, 0x01, 0x00, + 0x44, 0xCC, 0x88, 0xCC, 0x81, 0xCE, 0x33, 0x43, + 0xE3, 0x82, 0x99, 0x11, 0x04, 0x43, 0xE3, 0x82, + 0x9A, 0x11, 0x04, 0x46, 0xE0, 0xBD, 0xB1, 0xE0, + 0xBD, 0xB2, 0xA2, 0x27, 0x46, 0xE0, 0xBD, 0xB1, + 0xE0, 0xBD, 0xB4, 0xA6, 0x27, 0x46, 0xE0, 0xBD, + 0xB1, 0xE0, 0xBE, 0x80, 0xA2, 0x27, 0x00, 0x01, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfcTrie. Total size: 10680 bytes (10.43 KiB). Checksum: a555db76d4becdd2. +type nfcTrie struct{} + +func newNfcTrie(i int) *nfcTrie { + return &nfcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 46: + return uint16(nfcValues[n<<6+uint32(b)]) + default: + n -= 46 + return uint16(nfcSparse.lookup(n, b)) + } +} + +// nfcValues: 48 blocks, 3072 entries, 6144 bytes +// The third block is the zero block. +var nfcValues = [3072]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x2f86, 0xc1: 0x2f8b, 0xc2: 0x469f, 0xc3: 0x2f90, 0xc4: 0x46ae, 0xc5: 0x46b3, + 0xc6: 0xa000, 0xc7: 0x46bd, 0xc8: 0x2ff9, 0xc9: 0x2ffe, 0xca: 0x46c2, 0xcb: 0x3012, + 0xcc: 0x3085, 0xcd: 0x308a, 0xce: 0x308f, 0xcf: 0x46d6, 0xd1: 0x311b, + 0xd2: 0x313e, 0xd3: 0x3143, 0xd4: 0x46e0, 0xd5: 0x46e5, 0xd6: 0x46f4, + 0xd8: 0xa000, 0xd9: 0x31ca, 0xda: 0x31cf, 0xdb: 0x31d4, 0xdc: 0x4726, 0xdd: 0x324c, + 0xe0: 0x3292, 0xe1: 0x3297, 0xe2: 0x4730, 0xe3: 0x329c, + 0xe4: 0x473f, 0xe5: 0x4744, 0xe6: 0xa000, 0xe7: 0x474e, 0xe8: 0x3305, 0xe9: 0x330a, + 0xea: 0x4753, 0xeb: 0x331e, 0xec: 0x3396, 0xed: 0x339b, 0xee: 0x33a0, 0xef: 0x4767, + 0xf1: 0x342c, 0xf2: 0x344f, 0xf3: 0x3454, 0xf4: 0x4771, 0xf5: 0x4776, + 0xf6: 0x4785, 0xf8: 0xa000, 0xf9: 0x34e0, 0xfa: 0x34e5, 0xfb: 0x34ea, + 0xfc: 0x47b7, 0xfd: 0x3567, 0xff: 0x3580, + // Block 0x4, offset 0x100 + 0x100: 0x2f95, 0x101: 0x32a1, 0x102: 0x46a4, 0x103: 0x4735, 0x104: 0x2fb3, 0x105: 0x32bf, + 0x106: 0x2fc7, 0x107: 0x32d3, 0x108: 0x2fcc, 0x109: 0x32d8, 0x10a: 0x2fd1, 0x10b: 0x32dd, + 0x10c: 0x2fd6, 0x10d: 0x32e2, 0x10e: 0x2fe0, 0x10f: 0x32ec, + 0x112: 0x46c7, 0x113: 0x4758, 0x114: 0x3008, 0x115: 0x3314, 0x116: 0x300d, 0x117: 0x3319, + 0x118: 0x302b, 0x119: 0x3337, 0x11a: 0x301c, 0x11b: 0x3328, 0x11c: 0x3044, 0x11d: 0x3350, + 0x11e: 0x304e, 0x11f: 0x335a, 0x120: 0x3053, 0x121: 0x335f, 0x122: 0x305d, 0x123: 0x3369, + 0x124: 0x3062, 0x125: 0x336e, 0x128: 0x3094, 0x129: 0x33a5, + 0x12a: 0x3099, 0x12b: 0x33aa, 0x12c: 0x309e, 0x12d: 0x33af, 0x12e: 0x30c1, 0x12f: 0x33cd, + 0x130: 0x30a3, 0x134: 0x30cb, 0x135: 0x33d7, + 0x136: 0x30df, 0x137: 0x33f0, 0x139: 0x30e9, 0x13a: 0x33fa, 0x13b: 0x30f3, + 0x13c: 0x3404, 0x13d: 0x30ee, 0x13e: 0x33ff, + // Block 0x5, offset 0x140 + 0x143: 0x3116, 0x144: 0x3427, 0x145: 0x312f, + 0x146: 0x3440, 0x147: 0x3125, 0x148: 0x3436, + 0x14c: 0x46ea, 0x14d: 0x477b, 0x14e: 0x3148, 0x14f: 0x3459, 0x150: 0x3152, 0x151: 0x3463, + 0x154: 0x3170, 0x155: 0x3481, 0x156: 0x3189, 0x157: 0x349a, + 0x158: 0x317a, 0x159: 0x348b, 0x15a: 0x470d, 0x15b: 0x479e, 0x15c: 0x3193, 0x15d: 0x34a4, + 0x15e: 0x31a2, 0x15f: 0x34b3, 0x160: 0x4712, 0x161: 0x47a3, 0x162: 0x31bb, 0x163: 0x34d1, + 0x164: 0x31ac, 0x165: 0x34c2, 0x168: 0x471c, 0x169: 0x47ad, + 0x16a: 0x4721, 0x16b: 0x47b2, 0x16c: 0x31d9, 0x16d: 0x34ef, 0x16e: 0x31e3, 0x16f: 0x34f9, + 0x170: 0x31e8, 0x171: 0x34fe, 0x172: 0x3206, 0x173: 0x351c, 0x174: 0x3229, 0x175: 0x353f, + 0x176: 0x3251, 0x177: 0x356c, 0x178: 0x3265, 0x179: 0x3274, 0x17a: 0x3594, 0x17b: 0x327e, + 0x17c: 0x359e, 0x17d: 0x3283, 0x17e: 0x35a3, 0x17f: 0xa000, + // Block 0x6, offset 0x180 + 0x184: 0x8100, 0x185: 0x8100, + 0x186: 0x8100, + 0x18d: 0x2f9f, 0x18e: 0x32ab, 0x18f: 0x30ad, 0x190: 0x33b9, 0x191: 0x3157, + 0x192: 0x3468, 0x193: 0x31ed, 0x194: 0x3503, 0x195: 0x39e6, 0x196: 0x3b75, 0x197: 0x39df, + 0x198: 0x3b6e, 0x199: 0x39ed, 0x19a: 0x3b7c, 0x19b: 0x39d8, 0x19c: 0x3b67, + 0x19e: 0x38c7, 0x19f: 0x3a56, 0x1a0: 0x38c0, 0x1a1: 0x3a4f, 0x1a2: 0x35ca, 0x1a3: 0x35dc, + 0x1a6: 0x3058, 0x1a7: 0x3364, 0x1a8: 0x30d5, 0x1a9: 0x33e6, + 0x1aa: 0x4703, 0x1ab: 0x4794, 0x1ac: 0x39a7, 0x1ad: 0x3b36, 0x1ae: 0x35ee, 0x1af: 0x35f4, + 0x1b0: 0x33dc, 0x1b4: 0x303f, 0x1b5: 0x334b, + 0x1b8: 0x3111, 0x1b9: 0x3422, 0x1ba: 0x38ce, 0x1bb: 0x3a5d, + 0x1bc: 0x35c4, 0x1bd: 0x35d6, 0x1be: 0x35d0, 0x1bf: 0x35e2, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x2fa4, 0x1c1: 0x32b0, 0x1c2: 0x2fa9, 0x1c3: 0x32b5, 0x1c4: 0x3021, 0x1c5: 0x332d, + 0x1c6: 0x3026, 0x1c7: 0x3332, 0x1c8: 0x30b2, 0x1c9: 0x33be, 0x1ca: 0x30b7, 0x1cb: 0x33c3, + 0x1cc: 0x315c, 0x1cd: 0x346d, 0x1ce: 0x3161, 0x1cf: 0x3472, 0x1d0: 0x317f, 0x1d1: 0x3490, + 0x1d2: 0x3184, 0x1d3: 0x3495, 0x1d4: 0x31f2, 0x1d5: 0x3508, 0x1d6: 0x31f7, 0x1d7: 0x350d, + 0x1d8: 0x319d, 0x1d9: 0x34ae, 0x1da: 0x31b6, 0x1db: 0x34cc, + 0x1de: 0x3071, 0x1df: 0x337d, + 0x1e6: 0x46a9, 0x1e7: 0x473a, 0x1e8: 0x46d1, 0x1e9: 0x4762, + 0x1ea: 0x3976, 0x1eb: 0x3b05, 0x1ec: 0x3953, 0x1ed: 0x3ae2, 0x1ee: 0x46ef, 0x1ef: 0x4780, + 0x1f0: 0x396f, 0x1f1: 0x3afe, 0x1f2: 0x325b, 0x1f3: 0x3576, + // Block 0x8, offset 0x200 + 0x200: 0x9933, 0x201: 0x9933, 0x202: 0x9933, 0x203: 0x9933, 0x204: 0x9933, 0x205: 0x8133, + 0x206: 0x9933, 0x207: 0x9933, 0x208: 0x9933, 0x209: 0x9933, 0x20a: 0x9933, 0x20b: 0x9933, + 0x20c: 0x9933, 0x20d: 0x8133, 0x20e: 0x8133, 0x20f: 0x9933, 0x210: 0x8133, 0x211: 0x9933, + 0x212: 0x8133, 0x213: 0x9933, 0x214: 0x9933, 0x215: 0x8134, 0x216: 0x812e, 0x217: 0x812e, + 0x218: 0x812e, 0x219: 0x812e, 0x21a: 0x8134, 0x21b: 0x992c, 0x21c: 0x812e, 0x21d: 0x812e, + 0x21e: 0x812e, 0x21f: 0x812e, 0x220: 0x812e, 0x221: 0x812a, 0x222: 0x812a, 0x223: 0x992e, + 0x224: 0x992e, 0x225: 0x992e, 0x226: 0x992e, 0x227: 0x992a, 0x228: 0x992a, 0x229: 0x812e, + 0x22a: 0x812e, 0x22b: 0x812e, 0x22c: 0x812e, 0x22d: 0x992e, 0x22e: 0x992e, 0x22f: 0x812e, + 0x230: 0x992e, 0x231: 0x992e, 0x232: 0x812e, 0x233: 0x812e, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812e, 0x23a: 0x812e, 0x23b: 0x812e, + 0x23c: 0x812e, 0x23d: 0x8133, 0x23e: 0x8133, 0x23f: 0x8133, + // Block 0x9, offset 0x240 + 0x240: 0x49c5, 0x241: 0x49ca, 0x242: 0x9933, 0x243: 0x49cf, 0x244: 0x4a88, 0x245: 0x9937, + 0x246: 0x8133, 0x247: 0x812e, 0x248: 0x812e, 0x249: 0x812e, 0x24a: 0x8133, 0x24b: 0x8133, + 0x24c: 0x8133, 0x24d: 0x812e, 0x24e: 0x812e, 0x250: 0x8133, 0x251: 0x8133, + 0x252: 0x8133, 0x253: 0x812e, 0x254: 0x812e, 0x255: 0x812e, 0x256: 0x812e, 0x257: 0x8133, + 0x258: 0x8134, 0x259: 0x812e, 0x25a: 0x812e, 0x25b: 0x8133, 0x25c: 0x8135, 0x25d: 0x8136, + 0x25e: 0x8136, 0x25f: 0x8135, 0x260: 0x8136, 0x261: 0x8136, 0x262: 0x8135, 0x263: 0x8133, + 0x264: 0x8133, 0x265: 0x8133, 0x266: 0x8133, 0x267: 0x8133, 0x268: 0x8133, 0x269: 0x8133, + 0x26a: 0x8133, 0x26b: 0x8133, 0x26c: 0x8133, 0x26d: 0x8133, 0x26e: 0x8133, 0x26f: 0x8133, + 0x274: 0x0173, + 0x27a: 0x8100, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x8100, 0x285: 0x35b8, + 0x286: 0x3600, 0x287: 0x00ce, 0x288: 0x361e, 0x289: 0x362a, 0x28a: 0x363c, + 0x28c: 0x365a, 0x28e: 0x366c, 0x28f: 0x368a, 0x290: 0x3e1f, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x364e, 0x2ab: 0x367e, 0x2ac: 0x4815, 0x2ad: 0x36ae, 0x2ae: 0x483f, 0x2af: 0x36c0, + 0x2b0: 0x3e87, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x3738, 0x2c1: 0x3744, 0x2c3: 0x3732, + 0x2c6: 0xa000, 0x2c7: 0x3720, + 0x2cc: 0x3774, 0x2cd: 0x375c, 0x2ce: 0x3786, 0x2d0: 0xa000, + 0x2d3: 0xa000, 0x2d5: 0xa000, 0x2d6: 0xa000, 0x2d7: 0xa000, + 0x2d8: 0xa000, 0x2d9: 0x3768, 0x2da: 0xa000, + 0x2de: 0xa000, 0x2e3: 0xa000, + 0x2e7: 0xa000, + 0x2eb: 0xa000, 0x2ed: 0xa000, + 0x2f0: 0xa000, 0x2f3: 0xa000, 0x2f5: 0xa000, + 0x2f6: 0xa000, 0x2f7: 0xa000, 0x2f8: 0xa000, 0x2f9: 0x37ec, 0x2fa: 0xa000, + 0x2fe: 0xa000, + // Block 0xc, offset 0x300 + 0x301: 0x374a, 0x302: 0x37ce, + 0x310: 0x3726, 0x311: 0x37aa, + 0x312: 0x372c, 0x313: 0x37b0, 0x316: 0x373e, 0x317: 0x37c2, + 0x318: 0xa000, 0x319: 0xa000, 0x31a: 0x3840, 0x31b: 0x3846, 0x31c: 0x3750, 0x31d: 0x37d4, + 0x31e: 0x3756, 0x31f: 0x37da, 0x322: 0x3762, 0x323: 0x37e6, + 0x324: 0x376e, 0x325: 0x37f2, 0x326: 0x377a, 0x327: 0x37fe, 0x328: 0xa000, 0x329: 0xa000, + 0x32a: 0x384c, 0x32b: 0x3852, 0x32c: 0x37a4, 0x32d: 0x3828, 0x32e: 0x3780, 0x32f: 0x3804, + 0x330: 0x378c, 0x331: 0x3810, 0x332: 0x3792, 0x333: 0x3816, 0x334: 0x3798, 0x335: 0x381c, + 0x338: 0x379e, 0x339: 0x3822, + // Block 0xd, offset 0x340 + 0x351: 0x812e, + 0x352: 0x8133, 0x353: 0x8133, 0x354: 0x8133, 0x355: 0x8133, 0x356: 0x812e, 0x357: 0x8133, + 0x358: 0x8133, 0x359: 0x8133, 0x35a: 0x812f, 0x35b: 0x812e, 0x35c: 0x8133, 0x35d: 0x8133, + 0x35e: 0x8133, 0x35f: 0x8133, 0x360: 0x8133, 0x361: 0x8133, 0x362: 0x812e, 0x363: 0x812e, + 0x364: 0x812e, 0x365: 0x812e, 0x366: 0x812e, 0x367: 0x812e, 0x368: 0x8133, 0x369: 0x8133, + 0x36a: 0x812e, 0x36b: 0x8133, 0x36c: 0x8133, 0x36d: 0x812f, 0x36e: 0x8132, 0x36f: 0x8133, + 0x370: 0x8106, 0x371: 0x8107, 0x372: 0x8108, 0x373: 0x8109, 0x374: 0x810a, 0x375: 0x810b, + 0x376: 0x810c, 0x377: 0x810d, 0x378: 0x810e, 0x379: 0x810f, 0x37a: 0x810f, 0x37b: 0x8110, + 0x37c: 0x8111, 0x37d: 0x8112, 0x37f: 0x8113, + // Block 0xe, offset 0x380 + 0x388: 0xa000, 0x38a: 0xa000, 0x38b: 0x8117, + 0x38c: 0x8118, 0x38d: 0x8119, 0x38e: 0x811a, 0x38f: 0x811b, 0x390: 0x811c, 0x391: 0x811d, + 0x392: 0x811e, 0x393: 0x9933, 0x394: 0x9933, 0x395: 0x992e, 0x396: 0x812e, 0x397: 0x8133, + 0x398: 0x8133, 0x399: 0x8133, 0x39a: 0x8133, 0x39b: 0x8133, 0x39c: 0x812e, 0x39d: 0x8133, + 0x39e: 0x8133, 0x39f: 0x812e, + 0x3b0: 0x811f, + // Block 0xf, offset 0x3c0 + 0x3d3: 0x812e, 0x3d4: 0x8133, 0x3d5: 0x8133, 0x3d6: 0x8133, 0x3d7: 0x8133, + 0x3d8: 0x8133, 0x3d9: 0x8133, 0x3da: 0x8133, 0x3db: 0x8133, 0x3dc: 0x8133, 0x3dd: 0x8133, + 0x3de: 0x8133, 0x3df: 0x8133, 0x3e0: 0x8133, 0x3e1: 0x8133, 0x3e3: 0x812e, + 0x3e4: 0x8133, 0x3e5: 0x8133, 0x3e6: 0x812e, 0x3e7: 0x8133, 0x3e8: 0x8133, 0x3e9: 0x812e, + 0x3ea: 0x8133, 0x3eb: 0x8133, 0x3ec: 0x8133, 0x3ed: 0x812e, 0x3ee: 0x812e, 0x3ef: 0x812e, + 0x3f0: 0x8117, 0x3f1: 0x8118, 0x3f2: 0x8119, 0x3f3: 0x8133, 0x3f4: 0x8133, 0x3f5: 0x8133, + 0x3f6: 0x812e, 0x3f7: 0x8133, 0x3f8: 0x8133, 0x3f9: 0x812e, 0x3fa: 0x812e, 0x3fb: 0x8133, + 0x3fc: 0x8133, 0x3fd: 0x8133, 0x3fe: 0x8133, 0x3ff: 0x8133, + // Block 0x10, offset 0x400 + 0x405: 0xa000, + 0x406: 0x2d33, 0x407: 0xa000, 0x408: 0x2d3b, 0x409: 0xa000, 0x40a: 0x2d43, 0x40b: 0xa000, + 0x40c: 0x2d4b, 0x40d: 0xa000, 0x40e: 0x2d53, 0x411: 0xa000, + 0x412: 0x2d5b, + 0x434: 0x8103, 0x435: 0x9900, + 0x43a: 0xa000, 0x43b: 0x2d63, + 0x43c: 0xa000, 0x43d: 0x2d6b, 0x43e: 0xa000, 0x43f: 0xa000, + // Block 0x11, offset 0x440 + 0x440: 0x8133, 0x441: 0x8133, 0x442: 0x812e, 0x443: 0x8133, 0x444: 0x8133, 0x445: 0x8133, + 0x446: 0x8133, 0x447: 0x8133, 0x448: 0x8133, 0x449: 0x8133, 0x44a: 0x812e, 0x44b: 0x8133, + 0x44c: 0x8133, 0x44d: 0x8136, 0x44e: 0x812b, 0x44f: 0x812e, 0x450: 0x812a, 0x451: 0x8133, + 0x452: 0x8133, 0x453: 0x8133, 0x454: 0x8133, 0x455: 0x8133, 0x456: 0x8133, 0x457: 0x8133, + 0x458: 0x8133, 0x459: 0x8133, 0x45a: 0x8133, 0x45b: 0x8133, 0x45c: 0x8133, 0x45d: 0x8133, + 0x45e: 0x8133, 0x45f: 0x8133, 0x460: 0x8133, 0x461: 0x8133, 0x462: 0x8133, 0x463: 0x8133, + 0x464: 0x8133, 0x465: 0x8133, 0x466: 0x8133, 0x467: 0x8133, 0x468: 0x8133, 0x469: 0x8133, + 0x46a: 0x8133, 0x46b: 0x8133, 0x46c: 0x8133, 0x46d: 0x8133, 0x46e: 0x8133, 0x46f: 0x8133, + 0x470: 0x8133, 0x471: 0x8133, 0x472: 0x8133, 0x473: 0x8133, 0x474: 0x8133, 0x475: 0x8133, + 0x476: 0x8134, 0x477: 0x8132, 0x478: 0x8132, 0x479: 0x812e, 0x47b: 0x8133, + 0x47c: 0x8135, 0x47d: 0x812e, 0x47e: 0x8133, 0x47f: 0x812e, + // Block 0x12, offset 0x480 + 0x480: 0x2fae, 0x481: 0x32ba, 0x482: 0x2fb8, 0x483: 0x32c4, 0x484: 0x2fbd, 0x485: 0x32c9, + 0x486: 0x2fc2, 0x487: 0x32ce, 0x488: 0x38e3, 0x489: 0x3a72, 0x48a: 0x2fdb, 0x48b: 0x32e7, + 0x48c: 0x2fe5, 0x48d: 0x32f1, 0x48e: 0x2ff4, 0x48f: 0x3300, 0x490: 0x2fea, 0x491: 0x32f6, + 0x492: 0x2fef, 0x493: 0x32fb, 0x494: 0x3906, 0x495: 0x3a95, 0x496: 0x390d, 0x497: 0x3a9c, + 0x498: 0x3030, 0x499: 0x333c, 0x49a: 0x3035, 0x49b: 0x3341, 0x49c: 0x391b, 0x49d: 0x3aaa, + 0x49e: 0x303a, 0x49f: 0x3346, 0x4a0: 0x3049, 0x4a1: 0x3355, 0x4a2: 0x3067, 0x4a3: 0x3373, + 0x4a4: 0x3076, 0x4a5: 0x3382, 0x4a6: 0x306c, 0x4a7: 0x3378, 0x4a8: 0x307b, 0x4a9: 0x3387, + 0x4aa: 0x3080, 0x4ab: 0x338c, 0x4ac: 0x30c6, 0x4ad: 0x33d2, 0x4ae: 0x3922, 0x4af: 0x3ab1, + 0x4b0: 0x30d0, 0x4b1: 0x33e1, 0x4b2: 0x30da, 0x4b3: 0x33eb, 0x4b4: 0x30e4, 0x4b5: 0x33f5, + 0x4b6: 0x46db, 0x4b7: 0x476c, 0x4b8: 0x3929, 0x4b9: 0x3ab8, 0x4ba: 0x30fd, 0x4bb: 0x340e, + 0x4bc: 0x30f8, 0x4bd: 0x3409, 0x4be: 0x3102, 0x4bf: 0x3413, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x3107, 0x4c1: 0x3418, 0x4c2: 0x310c, 0x4c3: 0x341d, 0x4c4: 0x3120, 0x4c5: 0x3431, + 0x4c6: 0x312a, 0x4c7: 0x343b, 0x4c8: 0x3139, 0x4c9: 0x344a, 0x4ca: 0x3134, 0x4cb: 0x3445, + 0x4cc: 0x394c, 0x4cd: 0x3adb, 0x4ce: 0x395a, 0x4cf: 0x3ae9, 0x4d0: 0x3961, 0x4d1: 0x3af0, + 0x4d2: 0x3968, 0x4d3: 0x3af7, 0x4d4: 0x3166, 0x4d5: 0x3477, 0x4d6: 0x316b, 0x4d7: 0x347c, + 0x4d8: 0x3175, 0x4d9: 0x3486, 0x4da: 0x4708, 0x4db: 0x4799, 0x4dc: 0x39ae, 0x4dd: 0x3b3d, + 0x4de: 0x318e, 0x4df: 0x349f, 0x4e0: 0x3198, 0x4e1: 0x34a9, 0x4e2: 0x4717, 0x4e3: 0x47a8, + 0x4e4: 0x39b5, 0x4e5: 0x3b44, 0x4e6: 0x39bc, 0x4e7: 0x3b4b, 0x4e8: 0x39c3, 0x4e9: 0x3b52, + 0x4ea: 0x31a7, 0x4eb: 0x34b8, 0x4ec: 0x31b1, 0x4ed: 0x34c7, 0x4ee: 0x31c5, 0x4ef: 0x34db, + 0x4f0: 0x31c0, 0x4f1: 0x34d6, 0x4f2: 0x3201, 0x4f3: 0x3517, 0x4f4: 0x3210, 0x4f5: 0x3526, + 0x4f6: 0x320b, 0x4f7: 0x3521, 0x4f8: 0x39ca, 0x4f9: 0x3b59, 0x4fa: 0x39d1, 0x4fb: 0x3b60, + 0x4fc: 0x3215, 0x4fd: 0x352b, 0x4fe: 0x321a, 0x4ff: 0x3530, + // Block 0x14, offset 0x500 + 0x500: 0x321f, 0x501: 0x3535, 0x502: 0x3224, 0x503: 0x353a, 0x504: 0x3233, 0x505: 0x3549, + 0x506: 0x322e, 0x507: 0x3544, 0x508: 0x3238, 0x509: 0x3553, 0x50a: 0x323d, 0x50b: 0x3558, + 0x50c: 0x3242, 0x50d: 0x355d, 0x50e: 0x3260, 0x50f: 0x357b, 0x510: 0x3279, 0x511: 0x3599, + 0x512: 0x3288, 0x513: 0x35a8, 0x514: 0x328d, 0x515: 0x35ad, 0x516: 0x3391, 0x517: 0x34bd, + 0x518: 0x354e, 0x519: 0x358a, 0x51b: 0x35e8, + 0x520: 0x46b8, 0x521: 0x4749, 0x522: 0x2f9a, 0x523: 0x32a6, + 0x524: 0x388f, 0x525: 0x3a1e, 0x526: 0x3888, 0x527: 0x3a17, 0x528: 0x389d, 0x529: 0x3a2c, + 0x52a: 0x3896, 0x52b: 0x3a25, 0x52c: 0x38d5, 0x52d: 0x3a64, 0x52e: 0x38ab, 0x52f: 0x3a3a, + 0x530: 0x38a4, 0x531: 0x3a33, 0x532: 0x38b9, 0x533: 0x3a48, 0x534: 0x38b2, 0x535: 0x3a41, + 0x536: 0x38dc, 0x537: 0x3a6b, 0x538: 0x46cc, 0x539: 0x475d, 0x53a: 0x3017, 0x53b: 0x3323, + 0x53c: 0x3003, 0x53d: 0x330f, 0x53e: 0x38f1, 0x53f: 0x3a80, + // Block 0x15, offset 0x540 + 0x540: 0x38ea, 0x541: 0x3a79, 0x542: 0x38ff, 0x543: 0x3a8e, 0x544: 0x38f8, 0x545: 0x3a87, + 0x546: 0x3914, 0x547: 0x3aa3, 0x548: 0x30a8, 0x549: 0x33b4, 0x54a: 0x30bc, 0x54b: 0x33c8, + 0x54c: 0x46fe, 0x54d: 0x478f, 0x54e: 0x314d, 0x54f: 0x345e, 0x550: 0x3937, 0x551: 0x3ac6, + 0x552: 0x3930, 0x553: 0x3abf, 0x554: 0x3945, 0x555: 0x3ad4, 0x556: 0x393e, 0x557: 0x3acd, + 0x558: 0x39a0, 0x559: 0x3b2f, 0x55a: 0x3984, 0x55b: 0x3b13, 0x55c: 0x397d, 0x55d: 0x3b0c, + 0x55e: 0x3992, 0x55f: 0x3b21, 0x560: 0x398b, 0x561: 0x3b1a, 0x562: 0x3999, 0x563: 0x3b28, + 0x564: 0x31fc, 0x565: 0x3512, 0x566: 0x31de, 0x567: 0x34f4, 0x568: 0x39fb, 0x569: 0x3b8a, + 0x56a: 0x39f4, 0x56b: 0x3b83, 0x56c: 0x3a09, 0x56d: 0x3b98, 0x56e: 0x3a02, 0x56f: 0x3b91, + 0x570: 0x3a10, 0x571: 0x3b9f, 0x572: 0x3247, 0x573: 0x3562, 0x574: 0x326f, 0x575: 0x358f, + 0x576: 0x326a, 0x577: 0x3585, 0x578: 0x3256, 0x579: 0x3571, + // Block 0x16, offset 0x580 + 0x580: 0x481b, 0x581: 0x4821, 0x582: 0x4935, 0x583: 0x494d, 0x584: 0x493d, 0x585: 0x4955, + 0x586: 0x4945, 0x587: 0x495d, 0x588: 0x47c1, 0x589: 0x47c7, 0x58a: 0x48a5, 0x58b: 0x48bd, + 0x58c: 0x48ad, 0x58d: 0x48c5, 0x58e: 0x48b5, 0x58f: 0x48cd, 0x590: 0x482d, 0x591: 0x4833, + 0x592: 0x3dcf, 0x593: 0x3ddf, 0x594: 0x3dd7, 0x595: 0x3de7, + 0x598: 0x47cd, 0x599: 0x47d3, 0x59a: 0x3cff, 0x59b: 0x3d0f, 0x59c: 0x3d07, 0x59d: 0x3d17, + 0x5a0: 0x4845, 0x5a1: 0x484b, 0x5a2: 0x4965, 0x5a3: 0x497d, + 0x5a4: 0x496d, 0x5a5: 0x4985, 0x5a6: 0x4975, 0x5a7: 0x498d, 0x5a8: 0x47d9, 0x5a9: 0x47df, + 0x5aa: 0x48d5, 0x5ab: 0x48ed, 0x5ac: 0x48dd, 0x5ad: 0x48f5, 0x5ae: 0x48e5, 0x5af: 0x48fd, + 0x5b0: 0x485d, 0x5b1: 0x4863, 0x5b2: 0x3e2f, 0x5b3: 0x3e47, 0x5b4: 0x3e37, 0x5b5: 0x3e4f, + 0x5b6: 0x3e3f, 0x5b7: 0x3e57, 0x5b8: 0x47e5, 0x5b9: 0x47eb, 0x5ba: 0x3d2f, 0x5bb: 0x3d47, + 0x5bc: 0x3d37, 0x5bd: 0x3d4f, 0x5be: 0x3d3f, 0x5bf: 0x3d57, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x4869, 0x5c1: 0x486f, 0x5c2: 0x3e5f, 0x5c3: 0x3e6f, 0x5c4: 0x3e67, 0x5c5: 0x3e77, + 0x5c8: 0x47f1, 0x5c9: 0x47f7, 0x5ca: 0x3d5f, 0x5cb: 0x3d6f, + 0x5cc: 0x3d67, 0x5cd: 0x3d77, 0x5d0: 0x487b, 0x5d1: 0x4881, + 0x5d2: 0x3e97, 0x5d3: 0x3eaf, 0x5d4: 0x3e9f, 0x5d5: 0x3eb7, 0x5d6: 0x3ea7, 0x5d7: 0x3ebf, + 0x5d9: 0x47fd, 0x5db: 0x3d7f, 0x5dd: 0x3d87, + 0x5df: 0x3d8f, 0x5e0: 0x4893, 0x5e1: 0x4899, 0x5e2: 0x4995, 0x5e3: 0x49ad, + 0x5e4: 0x499d, 0x5e5: 0x49b5, 0x5e6: 0x49a5, 0x5e7: 0x49bd, 0x5e8: 0x4803, 0x5e9: 0x4809, + 0x5ea: 0x4905, 0x5eb: 0x491d, 0x5ec: 0x490d, 0x5ed: 0x4925, 0x5ee: 0x4915, 0x5ef: 0x492d, + 0x5f0: 0x480f, 0x5f1: 0x4335, 0x5f2: 0x36a8, 0x5f3: 0x433b, 0x5f4: 0x4839, 0x5f5: 0x4341, + 0x5f6: 0x36ba, 0x5f7: 0x4347, 0x5f8: 0x36d8, 0x5f9: 0x434d, 0x5fa: 0x36f0, 0x5fb: 0x4353, + 0x5fc: 0x4887, 0x5fd: 0x4359, + // Block 0x18, offset 0x600 + 0x600: 0x3db7, 0x601: 0x3dbf, 0x602: 0x419b, 0x603: 0x41b9, 0x604: 0x41a5, 0x605: 0x41c3, + 0x606: 0x41af, 0x607: 0x41cd, 0x608: 0x3cef, 0x609: 0x3cf7, 0x60a: 0x40e7, 0x60b: 0x4105, + 0x60c: 0x40f1, 0x60d: 0x410f, 0x60e: 0x40fb, 0x60f: 0x4119, 0x610: 0x3dff, 0x611: 0x3e07, + 0x612: 0x41d7, 0x613: 0x41f5, 0x614: 0x41e1, 0x615: 0x41ff, 0x616: 0x41eb, 0x617: 0x4209, + 0x618: 0x3d1f, 0x619: 0x3d27, 0x61a: 0x4123, 0x61b: 0x4141, 0x61c: 0x412d, 0x61d: 0x414b, + 0x61e: 0x4137, 0x61f: 0x4155, 0x620: 0x3ed7, 0x621: 0x3edf, 0x622: 0x4213, 0x623: 0x4231, + 0x624: 0x421d, 0x625: 0x423b, 0x626: 0x4227, 0x627: 0x4245, 0x628: 0x3d97, 0x629: 0x3d9f, + 0x62a: 0x415f, 0x62b: 0x417d, 0x62c: 0x4169, 0x62d: 0x4187, 0x62e: 0x4173, 0x62f: 0x4191, + 0x630: 0x369c, 0x631: 0x3696, 0x632: 0x3da7, 0x633: 0x36a2, 0x634: 0x3daf, + 0x636: 0x4827, 0x637: 0x3dc7, 0x638: 0x360c, 0x639: 0x3606, 0x63a: 0x35fa, 0x63b: 0x4305, + 0x63c: 0x3612, 0x63d: 0x8100, 0x63e: 0x01d6, 0x63f: 0xa100, + // Block 0x19, offset 0x640 + 0x640: 0x8100, 0x641: 0x35be, 0x642: 0x3def, 0x643: 0x36b4, 0x644: 0x3df7, + 0x646: 0x4851, 0x647: 0x3e0f, 0x648: 0x3618, 0x649: 0x430b, 0x64a: 0x3624, 0x64b: 0x4311, + 0x64c: 0x3630, 0x64d: 0x3ba6, 0x64e: 0x3bad, 0x64f: 0x3bb4, 0x650: 0x36cc, 0x651: 0x36c6, + 0x652: 0x3e17, 0x653: 0x44fb, 0x656: 0x36d2, 0x657: 0x3e27, + 0x658: 0x3648, 0x659: 0x3642, 0x65a: 0x3636, 0x65b: 0x4317, 0x65d: 0x3bbb, + 0x65e: 0x3bc2, 0x65f: 0x3bc9, 0x660: 0x3702, 0x661: 0x36fc, 0x662: 0x3e7f, 0x663: 0x4503, + 0x664: 0x36e4, 0x665: 0x36ea, 0x666: 0x3708, 0x667: 0x3e8f, 0x668: 0x3678, 0x669: 0x3672, + 0x66a: 0x3666, 0x66b: 0x4323, 0x66c: 0x3660, 0x66d: 0x35b2, 0x66e: 0x42ff, 0x66f: 0x0081, + 0x672: 0x3ec7, 0x673: 0x370e, 0x674: 0x3ecf, + 0x676: 0x489f, 0x677: 0x3ee7, 0x678: 0x3654, 0x679: 0x431d, 0x67a: 0x3684, 0x67b: 0x432f, + 0x67c: 0x3690, 0x67d: 0x426d, 0x67e: 0xa100, + // Block 0x1a, offset 0x680 + 0x681: 0x3c1d, 0x683: 0xa000, 0x684: 0x3c24, 0x685: 0xa000, + 0x687: 0x3c2b, 0x688: 0xa000, 0x689: 0x3c32, + 0x68d: 0xa000, + 0x6a0: 0x2f7c, 0x6a1: 0xa000, 0x6a2: 0x3c40, + 0x6a4: 0xa000, 0x6a5: 0xa000, + 0x6ad: 0x3c39, 0x6ae: 0x2f77, 0x6af: 0x2f81, + 0x6b0: 0x3c47, 0x6b1: 0x3c4e, 0x6b2: 0xa000, 0x6b3: 0xa000, 0x6b4: 0x3c55, 0x6b5: 0x3c5c, + 0x6b6: 0xa000, 0x6b7: 0xa000, 0x6b8: 0x3c63, 0x6b9: 0x3c6a, 0x6ba: 0xa000, 0x6bb: 0xa000, + 0x6bc: 0xa000, 0x6bd: 0xa000, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3c71, 0x6c1: 0x3c78, 0x6c2: 0xa000, 0x6c3: 0xa000, 0x6c4: 0x3c8d, 0x6c5: 0x3c94, + 0x6c6: 0xa000, 0x6c7: 0xa000, 0x6c8: 0x3c9b, 0x6c9: 0x3ca2, + 0x6d1: 0xa000, + 0x6d2: 0xa000, + 0x6e2: 0xa000, + 0x6e8: 0xa000, 0x6e9: 0xa000, + 0x6eb: 0xa000, 0x6ec: 0x3cb7, 0x6ed: 0x3cbe, 0x6ee: 0x3cc5, 0x6ef: 0x3ccc, + 0x6f2: 0xa000, 0x6f3: 0xa000, 0x6f4: 0xa000, 0x6f5: 0xa000, + // Block 0x1c, offset 0x700 + 0x706: 0xa000, 0x70b: 0xa000, + 0x70c: 0x3f1f, 0x70d: 0xa000, 0x70e: 0x3f27, 0x70f: 0xa000, 0x710: 0x3f2f, 0x711: 0xa000, + 0x712: 0x3f37, 0x713: 0xa000, 0x714: 0x3f3f, 0x715: 0xa000, 0x716: 0x3f47, 0x717: 0xa000, + 0x718: 0x3f4f, 0x719: 0xa000, 0x71a: 0x3f57, 0x71b: 0xa000, 0x71c: 0x3f5f, 0x71d: 0xa000, + 0x71e: 0x3f67, 0x71f: 0xa000, 0x720: 0x3f6f, 0x721: 0xa000, 0x722: 0x3f77, + 0x724: 0xa000, 0x725: 0x3f7f, 0x726: 0xa000, 0x727: 0x3f87, 0x728: 0xa000, 0x729: 0x3f8f, + 0x72f: 0xa000, + 0x730: 0x3f97, 0x731: 0x3f9f, 0x732: 0xa000, 0x733: 0x3fa7, 0x734: 0x3faf, 0x735: 0xa000, + 0x736: 0x3fb7, 0x737: 0x3fbf, 0x738: 0xa000, 0x739: 0x3fc7, 0x73a: 0x3fcf, 0x73b: 0xa000, + 0x73c: 0x3fd7, 0x73d: 0x3fdf, + // Block 0x1d, offset 0x740 + 0x754: 0x3f17, + 0x759: 0x9904, 0x75a: 0x9904, 0x75b: 0x8100, 0x75c: 0x8100, 0x75d: 0xa000, + 0x75e: 0x3fe7, + 0x766: 0xa000, + 0x76b: 0xa000, 0x76c: 0x3ff7, 0x76d: 0xa000, 0x76e: 0x3fff, 0x76f: 0xa000, + 0x770: 0x4007, 0x771: 0xa000, 0x772: 0x400f, 0x773: 0xa000, 0x774: 0x4017, 0x775: 0xa000, + 0x776: 0x401f, 0x777: 0xa000, 0x778: 0x4027, 0x779: 0xa000, 0x77a: 0x402f, 0x77b: 0xa000, + 0x77c: 0x4037, 0x77d: 0xa000, 0x77e: 0x403f, 0x77f: 0xa000, + // Block 0x1e, offset 0x780 + 0x780: 0x4047, 0x781: 0xa000, 0x782: 0x404f, 0x784: 0xa000, 0x785: 0x4057, + 0x786: 0xa000, 0x787: 0x405f, 0x788: 0xa000, 0x789: 0x4067, + 0x78f: 0xa000, 0x790: 0x406f, 0x791: 0x4077, + 0x792: 0xa000, 0x793: 0x407f, 0x794: 0x4087, 0x795: 0xa000, 0x796: 0x408f, 0x797: 0x4097, + 0x798: 0xa000, 0x799: 0x409f, 0x79a: 0x40a7, 0x79b: 0xa000, 0x79c: 0x40af, 0x79d: 0x40b7, + 0x7af: 0xa000, + 0x7b0: 0xa000, 0x7b1: 0xa000, 0x7b2: 0xa000, 0x7b4: 0x3fef, + 0x7b7: 0x40bf, 0x7b8: 0x40c7, 0x7b9: 0x40cf, 0x7ba: 0x40d7, + 0x7bd: 0xa000, 0x7be: 0x40df, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x137a, 0x7c1: 0x0cfe, 0x7c2: 0x13d6, 0x7c3: 0x13a2, 0x7c4: 0x0e5a, 0x7c5: 0x06ee, + 0x7c6: 0x08e2, 0x7c7: 0x162e, 0x7c8: 0x162e, 0x7c9: 0x0a0e, 0x7ca: 0x1462, 0x7cb: 0x0946, + 0x7cc: 0x0a0a, 0x7cd: 0x0bf2, 0x7ce: 0x0fd2, 0x7cf: 0x1162, 0x7d0: 0x129a, 0x7d1: 0x12d6, + 0x7d2: 0x130a, 0x7d3: 0x141e, 0x7d4: 0x0d76, 0x7d5: 0x0e02, 0x7d6: 0x0eae, 0x7d7: 0x0f46, + 0x7d8: 0x1262, 0x7d9: 0x144a, 0x7da: 0x1576, 0x7db: 0x0712, 0x7dc: 0x08b6, 0x7dd: 0x0d8a, + 0x7de: 0x0ed2, 0x7df: 0x1296, 0x7e0: 0x15c6, 0x7e1: 0x0ab6, 0x7e2: 0x0e7a, 0x7e3: 0x1286, + 0x7e4: 0x131a, 0x7e5: 0x0c26, 0x7e6: 0x11be, 0x7e7: 0x12e2, 0x7e8: 0x0b22, 0x7e9: 0x0d12, + 0x7ea: 0x0e1a, 0x7eb: 0x0f1e, 0x7ec: 0x142a, 0x7ed: 0x0752, 0x7ee: 0x07ea, 0x7ef: 0x0856, + 0x7f0: 0x0c8e, 0x7f1: 0x0d82, 0x7f2: 0x0ece, 0x7f3: 0x0ff2, 0x7f4: 0x117a, 0x7f5: 0x128e, + 0x7f6: 0x12a6, 0x7f7: 0x13ca, 0x7f8: 0x14f2, 0x7f9: 0x15a6, 0x7fa: 0x15c2, 0x7fb: 0x102e, + 0x7fc: 0x106e, 0x7fd: 0x1126, 0x7fe: 0x1246, 0x7ff: 0x147e, + // Block 0x20, offset 0x800 + 0x800: 0x15ce, 0x801: 0x134e, 0x802: 0x09ca, 0x803: 0x0b3e, 0x804: 0x10de, 0x805: 0x119e, + 0x806: 0x0f02, 0x807: 0x1036, 0x808: 0x139a, 0x809: 0x14ea, 0x80a: 0x09c6, 0x80b: 0x0a92, + 0x80c: 0x0d7a, 0x80d: 0x0e2e, 0x80e: 0x0e62, 0x80f: 0x1116, 0x810: 0x113e, 0x811: 0x14aa, + 0x812: 0x0852, 0x813: 0x11aa, 0x814: 0x07f6, 0x815: 0x07f2, 0x816: 0x109a, 0x817: 0x112a, + 0x818: 0x125e, 0x819: 0x14b2, 0x81a: 0x136a, 0x81b: 0x0c2a, 0x81c: 0x0d76, 0x81d: 0x135a, + 0x81e: 0x06fa, 0x81f: 0x0a66, 0x820: 0x0b96, 0x821: 0x0f32, 0x822: 0x0fb2, 0x823: 0x0876, + 0x824: 0x103e, 0x825: 0x0762, 0x826: 0x0b7a, 0x827: 0x06da, 0x828: 0x0dee, 0x829: 0x0ca6, + 0x82a: 0x1112, 0x82b: 0x08ca, 0x82c: 0x09b6, 0x82d: 0x0ffe, 0x82e: 0x1266, 0x82f: 0x133e, + 0x830: 0x0dba, 0x831: 0x13fa, 0x832: 0x0de6, 0x833: 0x0c3a, 0x834: 0x121e, 0x835: 0x0c5a, + 0x836: 0x0fae, 0x837: 0x072e, 0x838: 0x07aa, 0x839: 0x07ee, 0x83a: 0x0d56, 0x83b: 0x10fe, + 0x83c: 0x11f6, 0x83d: 0x134a, 0x83e: 0x145e, 0x83f: 0x085e, + // Block 0x21, offset 0x840 + 0x840: 0x0912, 0x841: 0x0a1a, 0x842: 0x0b32, 0x843: 0x0cc2, 0x844: 0x0e7e, 0x845: 0x1042, + 0x846: 0x149a, 0x847: 0x157e, 0x848: 0x15d2, 0x849: 0x15ea, 0x84a: 0x083a, 0x84b: 0x0cf6, + 0x84c: 0x0da6, 0x84d: 0x13ee, 0x84e: 0x0afe, 0x84f: 0x0bda, 0x850: 0x0bf6, 0x851: 0x0c86, + 0x852: 0x0e6e, 0x853: 0x0eba, 0x854: 0x0f6a, 0x855: 0x108e, 0x856: 0x1132, 0x857: 0x1196, + 0x858: 0x13de, 0x859: 0x126e, 0x85a: 0x1406, 0x85b: 0x1482, 0x85c: 0x0812, 0x85d: 0x083e, + 0x85e: 0x0926, 0x85f: 0x0eaa, 0x860: 0x12f6, 0x861: 0x133e, 0x862: 0x0b1e, 0x863: 0x0b8e, + 0x864: 0x0c52, 0x865: 0x0db2, 0x866: 0x10da, 0x867: 0x0f26, 0x868: 0x073e, 0x869: 0x0982, + 0x86a: 0x0a66, 0x86b: 0x0aca, 0x86c: 0x0b9a, 0x86d: 0x0f42, 0x86e: 0x0f5e, 0x86f: 0x116e, + 0x870: 0x118e, 0x871: 0x1466, 0x872: 0x14e6, 0x873: 0x14f6, 0x874: 0x1532, 0x875: 0x0756, + 0x876: 0x1082, 0x877: 0x1452, 0x878: 0x14ce, 0x879: 0x0bb2, 0x87a: 0x071a, 0x87b: 0x077a, + 0x87c: 0x0a6a, 0x87d: 0x0a8a, 0x87e: 0x0cb2, 0x87f: 0x0d76, + // Block 0x22, offset 0x880 + 0x880: 0x0ec6, 0x881: 0x0fce, 0x882: 0x127a, 0x883: 0x141a, 0x884: 0x1626, 0x885: 0x0ce6, + 0x886: 0x14a6, 0x887: 0x0836, 0x888: 0x0d32, 0x889: 0x0d3e, 0x88a: 0x0e12, 0x88b: 0x0e4a, + 0x88c: 0x0f4e, 0x88d: 0x0faa, 0x88e: 0x102a, 0x88f: 0x110e, 0x890: 0x153e, 0x891: 0x07b2, + 0x892: 0x0c06, 0x893: 0x14b6, 0x894: 0x076a, 0x895: 0x0aae, 0x896: 0x0e32, 0x897: 0x13e2, + 0x898: 0x0b6a, 0x899: 0x0bba, 0x89a: 0x0d46, 0x89b: 0x0f32, 0x89c: 0x14be, 0x89d: 0x081a, + 0x89e: 0x0902, 0x89f: 0x0a9a, 0x8a0: 0x0cd6, 0x8a1: 0x0d22, 0x8a2: 0x0d62, 0x8a3: 0x0df6, + 0x8a4: 0x0f4a, 0x8a5: 0x0fbe, 0x8a6: 0x115a, 0x8a7: 0x12fa, 0x8a8: 0x1306, 0x8a9: 0x145a, + 0x8aa: 0x14da, 0x8ab: 0x0886, 0x8ac: 0x0e4e, 0x8ad: 0x0906, 0x8ae: 0x0eca, 0x8af: 0x0f6e, + 0x8b0: 0x128a, 0x8b1: 0x14c2, 0x8b2: 0x15ae, 0x8b3: 0x15d6, 0x8b4: 0x0d3a, 0x8b5: 0x0e2a, + 0x8b6: 0x11c6, 0x8b7: 0x10ba, 0x8b8: 0x10c6, 0x8b9: 0x10ea, 0x8ba: 0x0f1a, 0x8bb: 0x0ea2, + 0x8bc: 0x1366, 0x8bd: 0x0736, 0x8be: 0x122e, 0x8bf: 0x081e, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x080e, 0x8c1: 0x0b0e, 0x8c2: 0x0c2e, 0x8c3: 0x10f6, 0x8c4: 0x0a56, 0x8c5: 0x0e06, + 0x8c6: 0x0cf2, 0x8c7: 0x13ea, 0x8c8: 0x12ea, 0x8c9: 0x14ae, 0x8ca: 0x1326, 0x8cb: 0x0b2a, + 0x8cc: 0x078a, 0x8cd: 0x095e, 0x8d0: 0x09b2, + 0x8d2: 0x0ce2, 0x8d5: 0x07fa, 0x8d6: 0x0f22, 0x8d7: 0x0fe6, + 0x8d8: 0x104a, 0x8d9: 0x1066, 0x8da: 0x106a, 0x8db: 0x107e, 0x8dc: 0x14fe, 0x8dd: 0x10ee, + 0x8de: 0x1172, 0x8e0: 0x1292, 0x8e2: 0x1356, + 0x8e5: 0x140a, 0x8e6: 0x1436, + 0x8ea: 0x1552, 0x8eb: 0x1556, 0x8ec: 0x155a, 0x8ed: 0x15be, 0x8ee: 0x142e, 0x8ef: 0x14ca, + 0x8f0: 0x075a, 0x8f1: 0x077e, 0x8f2: 0x0792, 0x8f3: 0x084e, 0x8f4: 0x085a, 0x8f5: 0x089a, + 0x8f6: 0x094e, 0x8f7: 0x096a, 0x8f8: 0x0972, 0x8f9: 0x09ae, 0x8fa: 0x09ba, 0x8fb: 0x0a96, + 0x8fc: 0x0a9e, 0x8fd: 0x0ba6, 0x8fe: 0x0bce, 0x8ff: 0x0bd6, + // Block 0x24, offset 0x900 + 0x900: 0x0bee, 0x901: 0x0c9a, 0x902: 0x0cca, 0x903: 0x0cea, 0x904: 0x0d5a, 0x905: 0x0e1e, + 0x906: 0x0e3a, 0x907: 0x0e6a, 0x908: 0x0ebe, 0x909: 0x0ede, 0x90a: 0x0f52, 0x90b: 0x1032, + 0x90c: 0x104e, 0x90d: 0x1056, 0x90e: 0x1052, 0x90f: 0x105a, 0x910: 0x105e, 0x911: 0x1062, + 0x912: 0x1076, 0x913: 0x107a, 0x914: 0x109e, 0x915: 0x10b2, 0x916: 0x10ce, 0x917: 0x1132, + 0x918: 0x113a, 0x919: 0x1142, 0x91a: 0x1156, 0x91b: 0x117e, 0x91c: 0x11ce, 0x91d: 0x1202, + 0x91e: 0x1202, 0x91f: 0x126a, 0x920: 0x1312, 0x921: 0x132a, 0x922: 0x135e, 0x923: 0x1362, + 0x924: 0x13a6, 0x925: 0x13aa, 0x926: 0x1402, 0x927: 0x140a, 0x928: 0x14de, 0x929: 0x1522, + 0x92a: 0x153a, 0x92b: 0x0b9e, 0x92c: 0x1721, 0x92d: 0x11e6, + 0x930: 0x06e2, 0x931: 0x07e6, 0x932: 0x07a6, 0x933: 0x074e, 0x934: 0x078e, 0x935: 0x07ba, + 0x936: 0x084a, 0x937: 0x0866, 0x938: 0x094e, 0x939: 0x093a, 0x93a: 0x094a, 0x93b: 0x0966, + 0x93c: 0x09b2, 0x93d: 0x09c2, 0x93e: 0x0a06, 0x93f: 0x0a12, + // Block 0x25, offset 0x940 + 0x940: 0x0a2e, 0x941: 0x0a3e, 0x942: 0x0b26, 0x943: 0x0b2e, 0x944: 0x0b5e, 0x945: 0x0b7e, + 0x946: 0x0bae, 0x947: 0x0bc6, 0x948: 0x0bb6, 0x949: 0x0bd6, 0x94a: 0x0bca, 0x94b: 0x0bee, + 0x94c: 0x0c0a, 0x94d: 0x0c62, 0x94e: 0x0c6e, 0x94f: 0x0c76, 0x950: 0x0c9e, 0x951: 0x0ce2, + 0x952: 0x0d12, 0x953: 0x0d16, 0x954: 0x0d2a, 0x955: 0x0daa, 0x956: 0x0dba, 0x957: 0x0e12, + 0x958: 0x0e5e, 0x959: 0x0e56, 0x95a: 0x0e6a, 0x95b: 0x0e86, 0x95c: 0x0ebe, 0x95d: 0x1016, + 0x95e: 0x0ee2, 0x95f: 0x0f16, 0x960: 0x0f22, 0x961: 0x0f62, 0x962: 0x0f7e, 0x963: 0x0fa2, + 0x964: 0x0fc6, 0x965: 0x0fca, 0x966: 0x0fe6, 0x967: 0x0fea, 0x968: 0x0ffa, 0x969: 0x100e, + 0x96a: 0x100a, 0x96b: 0x103a, 0x96c: 0x10b6, 0x96d: 0x10ce, 0x96e: 0x10e6, 0x96f: 0x111e, + 0x970: 0x1132, 0x971: 0x114e, 0x972: 0x117e, 0x973: 0x1232, 0x974: 0x125a, 0x975: 0x12ce, + 0x976: 0x1316, 0x977: 0x1322, 0x978: 0x132a, 0x979: 0x1342, 0x97a: 0x1356, 0x97b: 0x1346, + 0x97c: 0x135e, 0x97d: 0x135a, 0x97e: 0x1352, 0x97f: 0x1362, + // Block 0x26, offset 0x980 + 0x980: 0x136e, 0x981: 0x13aa, 0x982: 0x13e6, 0x983: 0x1416, 0x984: 0x144e, 0x985: 0x146e, + 0x986: 0x14ba, 0x987: 0x14de, 0x988: 0x14fe, 0x989: 0x1512, 0x98a: 0x1522, 0x98b: 0x152e, + 0x98c: 0x153a, 0x98d: 0x158e, 0x98e: 0x162e, 0x98f: 0x16b8, 0x990: 0x16b3, 0x991: 0x16e5, + 0x992: 0x060a, 0x993: 0x0632, 0x994: 0x0636, 0x995: 0x1767, 0x996: 0x1794, 0x997: 0x180c, + 0x998: 0x161a, 0x999: 0x162a, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x06fe, 0x9c1: 0x06f6, 0x9c2: 0x0706, 0x9c3: 0x164a, 0x9c4: 0x074a, 0x9c5: 0x075a, + 0x9c6: 0x075e, 0x9c7: 0x0766, 0x9c8: 0x076e, 0x9c9: 0x0772, 0x9ca: 0x077e, 0x9cb: 0x0776, + 0x9cc: 0x05b6, 0x9cd: 0x165e, 0x9ce: 0x0792, 0x9cf: 0x0796, 0x9d0: 0x079a, 0x9d1: 0x07b6, + 0x9d2: 0x164f, 0x9d3: 0x05ba, 0x9d4: 0x07a2, 0x9d5: 0x07c2, 0x9d6: 0x1659, 0x9d7: 0x07d2, + 0x9d8: 0x07da, 0x9d9: 0x073a, 0x9da: 0x07e2, 0x9db: 0x07e6, 0x9dc: 0x1834, 0x9dd: 0x0802, + 0x9de: 0x080a, 0x9df: 0x05c2, 0x9e0: 0x0822, 0x9e1: 0x0826, 0x9e2: 0x082e, 0x9e3: 0x0832, + 0x9e4: 0x05c6, 0x9e5: 0x084a, 0x9e6: 0x084e, 0x9e7: 0x085a, 0x9e8: 0x0866, 0x9e9: 0x086a, + 0x9ea: 0x086e, 0x9eb: 0x0876, 0x9ec: 0x0896, 0x9ed: 0x089a, 0x9ee: 0x08a2, 0x9ef: 0x08b2, + 0x9f0: 0x08ba, 0x9f1: 0x08be, 0x9f2: 0x08be, 0x9f3: 0x08be, 0x9f4: 0x166d, 0x9f5: 0x0e96, + 0x9f6: 0x08d2, 0x9f7: 0x08da, 0x9f8: 0x1672, 0x9f9: 0x08e6, 0x9fa: 0x08ee, 0x9fb: 0x08f6, + 0x9fc: 0x091e, 0x9fd: 0x090a, 0x9fe: 0x0916, 0x9ff: 0x091a, + // Block 0x28, offset 0xa00 + 0xa00: 0x0922, 0xa01: 0x092a, 0xa02: 0x092e, 0xa03: 0x0936, 0xa04: 0x093e, 0xa05: 0x0942, + 0xa06: 0x0942, 0xa07: 0x094a, 0xa08: 0x0952, 0xa09: 0x0956, 0xa0a: 0x0962, 0xa0b: 0x0986, + 0xa0c: 0x096a, 0xa0d: 0x098a, 0xa0e: 0x096e, 0xa0f: 0x0976, 0xa10: 0x080e, 0xa11: 0x09d2, + 0xa12: 0x099a, 0xa13: 0x099e, 0xa14: 0x09a2, 0xa15: 0x0996, 0xa16: 0x09aa, 0xa17: 0x09a6, + 0xa18: 0x09be, 0xa19: 0x1677, 0xa1a: 0x09da, 0xa1b: 0x09de, 0xa1c: 0x09e6, 0xa1d: 0x09f2, + 0xa1e: 0x09fa, 0xa1f: 0x0a16, 0xa20: 0x167c, 0xa21: 0x1681, 0xa22: 0x0a22, 0xa23: 0x0a26, + 0xa24: 0x0a2a, 0xa25: 0x0a1e, 0xa26: 0x0a32, 0xa27: 0x05ca, 0xa28: 0x05ce, 0xa29: 0x0a3a, + 0xa2a: 0x0a42, 0xa2b: 0x0a42, 0xa2c: 0x1686, 0xa2d: 0x0a5e, 0xa2e: 0x0a62, 0xa2f: 0x0a66, + 0xa30: 0x0a6e, 0xa31: 0x168b, 0xa32: 0x0a76, 0xa33: 0x0a7a, 0xa34: 0x0b52, 0xa35: 0x0a82, + 0xa36: 0x05d2, 0xa37: 0x0a8e, 0xa38: 0x0a9e, 0xa39: 0x0aaa, 0xa3a: 0x0aa6, 0xa3b: 0x1695, + 0xa3c: 0x0ab2, 0xa3d: 0x169a, 0xa3e: 0x0abe, 0xa3f: 0x0aba, + // Block 0x29, offset 0xa40 + 0xa40: 0x0ac2, 0xa41: 0x0ad2, 0xa42: 0x0ad6, 0xa43: 0x05d6, 0xa44: 0x0ae6, 0xa45: 0x0aee, + 0xa46: 0x0af2, 0xa47: 0x0af6, 0xa48: 0x05da, 0xa49: 0x169f, 0xa4a: 0x05de, 0xa4b: 0x0b12, + 0xa4c: 0x0b16, 0xa4d: 0x0b1a, 0xa4e: 0x0b22, 0xa4f: 0x1866, 0xa50: 0x0b3a, 0xa51: 0x16a9, + 0xa52: 0x16a9, 0xa53: 0x11da, 0xa54: 0x0b4a, 0xa55: 0x0b4a, 0xa56: 0x05e2, 0xa57: 0x16cc, + 0xa58: 0x179e, 0xa59: 0x0b5a, 0xa5a: 0x0b62, 0xa5b: 0x05e6, 0xa5c: 0x0b76, 0xa5d: 0x0b86, + 0xa5e: 0x0b8a, 0xa5f: 0x0b92, 0xa60: 0x0ba2, 0xa61: 0x05ee, 0xa62: 0x05ea, 0xa63: 0x0ba6, + 0xa64: 0x16ae, 0xa65: 0x0baa, 0xa66: 0x0bbe, 0xa67: 0x0bc2, 0xa68: 0x0bc6, 0xa69: 0x0bc2, + 0xa6a: 0x0bd2, 0xa6b: 0x0bd6, 0xa6c: 0x0be6, 0xa6d: 0x0bde, 0xa6e: 0x0be2, 0xa6f: 0x0bea, + 0xa70: 0x0bee, 0xa71: 0x0bf2, 0xa72: 0x0bfe, 0xa73: 0x0c02, 0xa74: 0x0c1a, 0xa75: 0x0c22, + 0xa76: 0x0c32, 0xa77: 0x0c46, 0xa78: 0x16bd, 0xa79: 0x0c42, 0xa7a: 0x0c36, 0xa7b: 0x0c4e, + 0xa7c: 0x0c56, 0xa7d: 0x0c6a, 0xa7e: 0x16c2, 0xa7f: 0x0c72, + // Block 0x2a, offset 0xa80 + 0xa80: 0x0c66, 0xa81: 0x0c5e, 0xa82: 0x05f2, 0xa83: 0x0c7a, 0xa84: 0x0c82, 0xa85: 0x0c8a, + 0xa86: 0x0c7e, 0xa87: 0x05f6, 0xa88: 0x0c9a, 0xa89: 0x0ca2, 0xa8a: 0x16c7, 0xa8b: 0x0cce, + 0xa8c: 0x0d02, 0xa8d: 0x0cde, 0xa8e: 0x0602, 0xa8f: 0x0cea, 0xa90: 0x05fe, 0xa91: 0x05fa, + 0xa92: 0x07c6, 0xa93: 0x07ca, 0xa94: 0x0d06, 0xa95: 0x0cee, 0xa96: 0x11ae, 0xa97: 0x0666, + 0xa98: 0x0d12, 0xa99: 0x0d16, 0xa9a: 0x0d1a, 0xa9b: 0x0d2e, 0xa9c: 0x0d26, 0xa9d: 0x16e0, + 0xa9e: 0x0606, 0xa9f: 0x0d42, 0xaa0: 0x0d36, 0xaa1: 0x0d52, 0xaa2: 0x0d5a, 0xaa3: 0x16ea, + 0xaa4: 0x0d5e, 0xaa5: 0x0d4a, 0xaa6: 0x0d66, 0xaa7: 0x060a, 0xaa8: 0x0d6a, 0xaa9: 0x0d6e, + 0xaaa: 0x0d72, 0xaab: 0x0d7e, 0xaac: 0x16ef, 0xaad: 0x0d86, 0xaae: 0x060e, 0xaaf: 0x0d92, + 0xab0: 0x16f4, 0xab1: 0x0d96, 0xab2: 0x0612, 0xab3: 0x0da2, 0xab4: 0x0dae, 0xab5: 0x0dba, + 0xab6: 0x0dbe, 0xab7: 0x16f9, 0xab8: 0x1690, 0xab9: 0x16fe, 0xaba: 0x0dde, 0xabb: 0x1703, + 0xabc: 0x0dea, 0xabd: 0x0df2, 0xabe: 0x0de2, 0xabf: 0x0dfe, + // Block 0x2b, offset 0xac0 + 0xac0: 0x0e0e, 0xac1: 0x0e1e, 0xac2: 0x0e12, 0xac3: 0x0e16, 0xac4: 0x0e22, 0xac5: 0x0e26, + 0xac6: 0x1708, 0xac7: 0x0e0a, 0xac8: 0x0e3e, 0xac9: 0x0e42, 0xaca: 0x0616, 0xacb: 0x0e56, + 0xacc: 0x0e52, 0xacd: 0x170d, 0xace: 0x0e36, 0xacf: 0x0e72, 0xad0: 0x1712, 0xad1: 0x1717, + 0xad2: 0x0e76, 0xad3: 0x0e8a, 0xad4: 0x0e86, 0xad5: 0x0e82, 0xad6: 0x061a, 0xad7: 0x0e8e, + 0xad8: 0x0e9e, 0xad9: 0x0e9a, 0xada: 0x0ea6, 0xadb: 0x1654, 0xadc: 0x0eb6, 0xadd: 0x171c, + 0xade: 0x0ec2, 0xadf: 0x1726, 0xae0: 0x0ed6, 0xae1: 0x0ee2, 0xae2: 0x0ef6, 0xae3: 0x172b, + 0xae4: 0x0f0a, 0xae5: 0x0f0e, 0xae6: 0x1730, 0xae7: 0x1735, 0xae8: 0x0f2a, 0xae9: 0x0f3a, + 0xaea: 0x061e, 0xaeb: 0x0f3e, 0xaec: 0x0622, 0xaed: 0x0622, 0xaee: 0x0f56, 0xaef: 0x0f5a, + 0xaf0: 0x0f62, 0xaf1: 0x0f66, 0xaf2: 0x0f72, 0xaf3: 0x0626, 0xaf4: 0x0f8a, 0xaf5: 0x173a, + 0xaf6: 0x0fa6, 0xaf7: 0x173f, 0xaf8: 0x0fb2, 0xaf9: 0x16a4, 0xafa: 0x0fc2, 0xafb: 0x1744, + 0xafc: 0x1749, 0xafd: 0x174e, 0xafe: 0x062a, 0xaff: 0x062e, + // Block 0x2c, offset 0xb00 + 0xb00: 0x0ffa, 0xb01: 0x1758, 0xb02: 0x1753, 0xb03: 0x175d, 0xb04: 0x1762, 0xb05: 0x1002, + 0xb06: 0x1006, 0xb07: 0x1006, 0xb08: 0x100e, 0xb09: 0x0636, 0xb0a: 0x1012, 0xb0b: 0x063a, + 0xb0c: 0x063e, 0xb0d: 0x176c, 0xb0e: 0x1026, 0xb0f: 0x102e, 0xb10: 0x103a, 0xb11: 0x0642, + 0xb12: 0x1771, 0xb13: 0x105e, 0xb14: 0x1776, 0xb15: 0x177b, 0xb16: 0x107e, 0xb17: 0x1096, + 0xb18: 0x0646, 0xb19: 0x109e, 0xb1a: 0x10a2, 0xb1b: 0x10a6, 0xb1c: 0x1780, 0xb1d: 0x1785, + 0xb1e: 0x1785, 0xb1f: 0x10be, 0xb20: 0x064a, 0xb21: 0x178a, 0xb22: 0x10d2, 0xb23: 0x10d6, + 0xb24: 0x064e, 0xb25: 0x178f, 0xb26: 0x10f2, 0xb27: 0x0652, 0xb28: 0x1102, 0xb29: 0x10fa, + 0xb2a: 0x110a, 0xb2b: 0x1799, 0xb2c: 0x1122, 0xb2d: 0x0656, 0xb2e: 0x112e, 0xb2f: 0x1136, + 0xb30: 0x1146, 0xb31: 0x065a, 0xb32: 0x17a3, 0xb33: 0x17a8, 0xb34: 0x065e, 0xb35: 0x17ad, + 0xb36: 0x115e, 0xb37: 0x17b2, 0xb38: 0x116a, 0xb39: 0x1176, 0xb3a: 0x117e, 0xb3b: 0x17b7, + 0xb3c: 0x17bc, 0xb3d: 0x1192, 0xb3e: 0x17c1, 0xb3f: 0x119a, + // Block 0x2d, offset 0xb40 + 0xb40: 0x16d1, 0xb41: 0x0662, 0xb42: 0x11b2, 0xb43: 0x11b6, 0xb44: 0x066a, 0xb45: 0x11ba, + 0xb46: 0x0a36, 0xb47: 0x17c6, 0xb48: 0x17cb, 0xb49: 0x16d6, 0xb4a: 0x16db, 0xb4b: 0x11da, + 0xb4c: 0x11de, 0xb4d: 0x13f6, 0xb4e: 0x066e, 0xb4f: 0x120a, 0xb50: 0x1206, 0xb51: 0x120e, + 0xb52: 0x0842, 0xb53: 0x1212, 0xb54: 0x1216, 0xb55: 0x121a, 0xb56: 0x1222, 0xb57: 0x17d0, + 0xb58: 0x121e, 0xb59: 0x1226, 0xb5a: 0x123a, 0xb5b: 0x123e, 0xb5c: 0x122a, 0xb5d: 0x1242, + 0xb5e: 0x1256, 0xb5f: 0x126a, 0xb60: 0x1236, 0xb61: 0x124a, 0xb62: 0x124e, 0xb63: 0x1252, + 0xb64: 0x17d5, 0xb65: 0x17df, 0xb66: 0x17da, 0xb67: 0x0672, 0xb68: 0x1272, 0xb69: 0x1276, + 0xb6a: 0x127e, 0xb6b: 0x17f3, 0xb6c: 0x1282, 0xb6d: 0x17e4, 0xb6e: 0x0676, 0xb6f: 0x067a, + 0xb70: 0x17e9, 0xb71: 0x17ee, 0xb72: 0x067e, 0xb73: 0x12a2, 0xb74: 0x12a6, 0xb75: 0x12aa, + 0xb76: 0x12ae, 0xb77: 0x12ba, 0xb78: 0x12b6, 0xb79: 0x12c2, 0xb7a: 0x12be, 0xb7b: 0x12ce, + 0xb7c: 0x12c6, 0xb7d: 0x12ca, 0xb7e: 0x12d2, 0xb7f: 0x0682, + // Block 0x2e, offset 0xb80 + 0xb80: 0x12da, 0xb81: 0x12de, 0xb82: 0x0686, 0xb83: 0x12ee, 0xb84: 0x12f2, 0xb85: 0x17f8, + 0xb86: 0x12fe, 0xb87: 0x1302, 0xb88: 0x068a, 0xb89: 0x130e, 0xb8a: 0x05be, 0xb8b: 0x17fd, + 0xb8c: 0x1802, 0xb8d: 0x068e, 0xb8e: 0x0692, 0xb8f: 0x133a, 0xb90: 0x1352, 0xb91: 0x136e, + 0xb92: 0x137e, 0xb93: 0x1807, 0xb94: 0x1392, 0xb95: 0x1396, 0xb96: 0x13ae, 0xb97: 0x13ba, + 0xb98: 0x1811, 0xb99: 0x1663, 0xb9a: 0x13c6, 0xb9b: 0x13c2, 0xb9c: 0x13ce, 0xb9d: 0x1668, + 0xb9e: 0x13da, 0xb9f: 0x13e6, 0xba0: 0x1816, 0xba1: 0x181b, 0xba2: 0x1426, 0xba3: 0x1432, + 0xba4: 0x143a, 0xba5: 0x1820, 0xba6: 0x143e, 0xba7: 0x146a, 0xba8: 0x1476, 0xba9: 0x147a, + 0xbaa: 0x1472, 0xbab: 0x1486, 0xbac: 0x148a, 0xbad: 0x1825, 0xbae: 0x1496, 0xbaf: 0x0696, + 0xbb0: 0x149e, 0xbb1: 0x182a, 0xbb2: 0x069a, 0xbb3: 0x14d6, 0xbb4: 0x0ac6, 0xbb5: 0x14ee, + 0xbb6: 0x182f, 0xbb7: 0x1839, 0xbb8: 0x069e, 0xbb9: 0x06a2, 0xbba: 0x1516, 0xbbb: 0x183e, + 0xbbc: 0x06a6, 0xbbd: 0x1843, 0xbbe: 0x152e, 0xbbf: 0x152e, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x1536, 0xbc1: 0x1848, 0xbc2: 0x154e, 0xbc3: 0x06aa, 0xbc4: 0x155e, 0xbc5: 0x156a, + 0xbc6: 0x1572, 0xbc7: 0x157a, 0xbc8: 0x06ae, 0xbc9: 0x184d, 0xbca: 0x158e, 0xbcb: 0x15aa, + 0xbcc: 0x15b6, 0xbcd: 0x06b2, 0xbce: 0x06b6, 0xbcf: 0x15ba, 0xbd0: 0x1852, 0xbd1: 0x06ba, + 0xbd2: 0x1857, 0xbd3: 0x185c, 0xbd4: 0x1861, 0xbd5: 0x15de, 0xbd6: 0x06be, 0xbd7: 0x15f2, + 0xbd8: 0x15fa, 0xbd9: 0x15fe, 0xbda: 0x1606, 0xbdb: 0x160e, 0xbdc: 0x1616, 0xbdd: 0x186b, +} + +// nfcIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var nfcIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x2e, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x2f, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x30, 0xcb: 0x31, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x32, + 0xd0: 0x09, 0xd1: 0x33, 0xd2: 0x34, 0xd3: 0x0a, 0xd6: 0x0b, 0xd7: 0x35, + 0xd8: 0x36, 0xd9: 0x0c, 0xdb: 0x37, 0xdc: 0x38, 0xdd: 0x39, 0xdf: 0x3a, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x3b, 0x121: 0x3c, 0x123: 0x0d, 0x124: 0x3d, 0x125: 0x3e, 0x126: 0x3f, 0x127: 0x40, + 0x128: 0x41, 0x129: 0x42, 0x12a: 0x43, 0x12b: 0x44, 0x12c: 0x3f, 0x12d: 0x45, 0x12e: 0x46, 0x12f: 0x47, + 0x131: 0x48, 0x132: 0x49, 0x133: 0x4a, 0x134: 0x4b, 0x135: 0x4c, 0x137: 0x4d, + 0x138: 0x4e, 0x139: 0x4f, 0x13a: 0x50, 0x13b: 0x51, 0x13c: 0x52, 0x13d: 0x53, 0x13e: 0x54, 0x13f: 0x55, + // Block 0x5, offset 0x140 + 0x140: 0x56, 0x142: 0x57, 0x144: 0x58, 0x145: 0x59, 0x146: 0x5a, 0x147: 0x5b, + 0x14d: 0x5c, + 0x15c: 0x5d, 0x15f: 0x5e, + 0x162: 0x5f, 0x164: 0x60, + 0x168: 0x61, 0x169: 0x62, 0x16a: 0x63, 0x16b: 0x64, 0x16c: 0x0e, 0x16d: 0x65, 0x16e: 0x66, 0x16f: 0x67, + 0x170: 0x68, 0x173: 0x69, 0x177: 0x0f, + 0x178: 0x10, 0x179: 0x11, 0x17a: 0x12, 0x17b: 0x13, 0x17c: 0x14, 0x17d: 0x15, 0x17e: 0x16, 0x17f: 0x17, + // Block 0x6, offset 0x180 + 0x180: 0x6a, 0x183: 0x6b, 0x184: 0x6c, 0x186: 0x6d, 0x187: 0x6e, + 0x188: 0x6f, 0x189: 0x18, 0x18a: 0x19, 0x18b: 0x70, 0x18c: 0x71, + 0x1ab: 0x72, + 0x1b3: 0x73, 0x1b5: 0x74, 0x1b7: 0x75, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x76, 0x1c1: 0x1a, 0x1c2: 0x1b, 0x1c3: 0x1c, 0x1c4: 0x77, 0x1c5: 0x78, + 0x1c9: 0x79, 0x1cc: 0x7a, 0x1cd: 0x7b, + // Block 0x8, offset 0x200 + 0x219: 0x7c, 0x21a: 0x7d, 0x21b: 0x7e, + 0x220: 0x7f, 0x223: 0x80, 0x224: 0x81, 0x225: 0x82, 0x226: 0x83, 0x227: 0x84, + 0x22a: 0x85, 0x22b: 0x86, 0x22f: 0x87, + 0x230: 0x88, 0x231: 0x89, 0x232: 0x8a, 0x233: 0x8b, 0x234: 0x8c, 0x235: 0x8d, 0x236: 0x8e, 0x237: 0x88, + 0x238: 0x89, 0x239: 0x8a, 0x23a: 0x8b, 0x23b: 0x8c, 0x23c: 0x8d, 0x23d: 0x8e, 0x23e: 0x88, 0x23f: 0x89, + // Block 0x9, offset 0x240 + 0x240: 0x8a, 0x241: 0x8b, 0x242: 0x8c, 0x243: 0x8d, 0x244: 0x8e, 0x245: 0x88, 0x246: 0x89, 0x247: 0x8a, + 0x248: 0x8b, 0x249: 0x8c, 0x24a: 0x8d, 0x24b: 0x8e, 0x24c: 0x88, 0x24d: 0x89, 0x24e: 0x8a, 0x24f: 0x8b, + 0x250: 0x8c, 0x251: 0x8d, 0x252: 0x8e, 0x253: 0x88, 0x254: 0x89, 0x255: 0x8a, 0x256: 0x8b, 0x257: 0x8c, + 0x258: 0x8d, 0x259: 0x8e, 0x25a: 0x88, 0x25b: 0x89, 0x25c: 0x8a, 0x25d: 0x8b, 0x25e: 0x8c, 0x25f: 0x8d, + 0x260: 0x8e, 0x261: 0x88, 0x262: 0x89, 0x263: 0x8a, 0x264: 0x8b, 0x265: 0x8c, 0x266: 0x8d, 0x267: 0x8e, + 0x268: 0x88, 0x269: 0x89, 0x26a: 0x8a, 0x26b: 0x8b, 0x26c: 0x8c, 0x26d: 0x8d, 0x26e: 0x8e, 0x26f: 0x88, + 0x270: 0x89, 0x271: 0x8a, 0x272: 0x8b, 0x273: 0x8c, 0x274: 0x8d, 0x275: 0x8e, 0x276: 0x88, 0x277: 0x89, + 0x278: 0x8a, 0x279: 0x8b, 0x27a: 0x8c, 0x27b: 0x8d, 0x27c: 0x8e, 0x27d: 0x88, 0x27e: 0x89, 0x27f: 0x8a, + // Block 0xa, offset 0x280 + 0x280: 0x8b, 0x281: 0x8c, 0x282: 0x8d, 0x283: 0x8e, 0x284: 0x88, 0x285: 0x89, 0x286: 0x8a, 0x287: 0x8b, + 0x288: 0x8c, 0x289: 0x8d, 0x28a: 0x8e, 0x28b: 0x88, 0x28c: 0x89, 0x28d: 0x8a, 0x28e: 0x8b, 0x28f: 0x8c, + 0x290: 0x8d, 0x291: 0x8e, 0x292: 0x88, 0x293: 0x89, 0x294: 0x8a, 0x295: 0x8b, 0x296: 0x8c, 0x297: 0x8d, + 0x298: 0x8e, 0x299: 0x88, 0x29a: 0x89, 0x29b: 0x8a, 0x29c: 0x8b, 0x29d: 0x8c, 0x29e: 0x8d, 0x29f: 0x8e, + 0x2a0: 0x88, 0x2a1: 0x89, 0x2a2: 0x8a, 0x2a3: 0x8b, 0x2a4: 0x8c, 0x2a5: 0x8d, 0x2a6: 0x8e, 0x2a7: 0x88, + 0x2a8: 0x89, 0x2a9: 0x8a, 0x2aa: 0x8b, 0x2ab: 0x8c, 0x2ac: 0x8d, 0x2ad: 0x8e, 0x2ae: 0x88, 0x2af: 0x89, + 0x2b0: 0x8a, 0x2b1: 0x8b, 0x2b2: 0x8c, 0x2b3: 0x8d, 0x2b4: 0x8e, 0x2b5: 0x88, 0x2b6: 0x89, 0x2b7: 0x8a, + 0x2b8: 0x8b, 0x2b9: 0x8c, 0x2ba: 0x8d, 0x2bb: 0x8e, 0x2bc: 0x88, 0x2bd: 0x89, 0x2be: 0x8a, 0x2bf: 0x8b, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x8c, 0x2c1: 0x8d, 0x2c2: 0x8e, 0x2c3: 0x88, 0x2c4: 0x89, 0x2c5: 0x8a, 0x2c6: 0x8b, 0x2c7: 0x8c, + 0x2c8: 0x8d, 0x2c9: 0x8e, 0x2ca: 0x88, 0x2cb: 0x89, 0x2cc: 0x8a, 0x2cd: 0x8b, 0x2ce: 0x8c, 0x2cf: 0x8d, + 0x2d0: 0x8e, 0x2d1: 0x88, 0x2d2: 0x89, 0x2d3: 0x8a, 0x2d4: 0x8b, 0x2d5: 0x8c, 0x2d6: 0x8d, 0x2d7: 0x8e, + 0x2d8: 0x88, 0x2d9: 0x89, 0x2da: 0x8a, 0x2db: 0x8b, 0x2dc: 0x8c, 0x2dd: 0x8d, 0x2de: 0x8f, + // Block 0xc, offset 0x300 + 0x324: 0x1d, 0x325: 0x1e, 0x326: 0x1f, 0x327: 0x20, + 0x328: 0x21, 0x329: 0x22, 0x32a: 0x23, 0x32b: 0x24, 0x32c: 0x90, 0x32d: 0x91, 0x32e: 0x92, + 0x331: 0x93, 0x332: 0x94, 0x333: 0x95, 0x334: 0x96, + 0x338: 0x97, 0x339: 0x98, 0x33a: 0x99, 0x33b: 0x9a, 0x33e: 0x9b, 0x33f: 0x9c, + // Block 0xd, offset 0x340 + 0x347: 0x9d, + 0x34b: 0x9e, 0x34d: 0x9f, + 0x368: 0xa0, 0x36b: 0xa1, + 0x374: 0xa2, + 0x37a: 0xa3, 0x37d: 0xa4, + // Block 0xe, offset 0x380 + 0x381: 0xa5, 0x382: 0xa6, 0x384: 0xa7, 0x385: 0x83, 0x387: 0xa8, + 0x388: 0xa9, 0x38b: 0xaa, 0x38c: 0xab, 0x38d: 0xac, + 0x391: 0xad, 0x392: 0xae, 0x393: 0xaf, 0x396: 0xb0, 0x397: 0xb1, + 0x398: 0x74, 0x39a: 0xb2, 0x39c: 0xb3, + 0x3a0: 0xb4, 0x3a4: 0xb5, 0x3a5: 0xb6, 0x3a7: 0xb7, + 0x3a8: 0xb8, 0x3a9: 0xb9, 0x3aa: 0xba, + 0x3b0: 0x74, 0x3b5: 0xbb, 0x3b6: 0xbc, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xbd, 0x3ec: 0xbe, + 0x3ff: 0xbf, + // Block 0x10, offset 0x400 + 0x432: 0xc0, + // Block 0x11, offset 0x440 + 0x445: 0xc1, 0x446: 0xc2, 0x447: 0xc3, + 0x449: 0xc4, + // Block 0x12, offset 0x480 + 0x480: 0xc5, 0x484: 0xbe, + 0x48b: 0xc6, + 0x4a3: 0xc7, 0x4a5: 0xc8, + // Block 0x13, offset 0x4c0 + 0x4c8: 0xc9, + // Block 0x14, offset 0x500 + 0x520: 0x25, 0x521: 0x26, 0x522: 0x27, 0x523: 0x28, 0x524: 0x29, 0x525: 0x2a, 0x526: 0x2b, 0x527: 0x2c, + 0x528: 0x2d, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfcSparseOffset: 156 entries, 312 bytes +var nfcSparseOffset = []uint16{0x0, 0x5, 0x9, 0xb, 0xd, 0x18, 0x28, 0x2a, 0x2f, 0x3a, 0x49, 0x56, 0x5e, 0x63, 0x68, 0x6a, 0x72, 0x79, 0x7c, 0x84, 0x88, 0x8c, 0x8e, 0x90, 0x99, 0x9d, 0xa4, 0xa9, 0xac, 0xb6, 0xb9, 0xc0, 0xc8, 0xcb, 0xcd, 0xd0, 0xd2, 0xd7, 0xe8, 0xf4, 0xf6, 0xfc, 0xfe, 0x100, 0x102, 0x104, 0x106, 0x108, 0x10b, 0x10e, 0x110, 0x113, 0x116, 0x11a, 0x120, 0x122, 0x12b, 0x12d, 0x130, 0x132, 0x13d, 0x141, 0x14f, 0x152, 0x158, 0x15e, 0x169, 0x16d, 0x16f, 0x171, 0x173, 0x175, 0x177, 0x17d, 0x181, 0x183, 0x185, 0x18d, 0x191, 0x194, 0x196, 0x198, 0x19b, 0x19e, 0x1a0, 0x1a2, 0x1a4, 0x1a6, 0x1ac, 0x1af, 0x1b1, 0x1b8, 0x1be, 0x1c4, 0x1cc, 0x1d2, 0x1d8, 0x1de, 0x1e2, 0x1f0, 0x1f9, 0x1fc, 0x1ff, 0x201, 0x204, 0x206, 0x20a, 0x20f, 0x211, 0x213, 0x218, 0x21e, 0x220, 0x222, 0x224, 0x22a, 0x22d, 0x22f, 0x231, 0x237, 0x23a, 0x242, 0x249, 0x24c, 0x24f, 0x251, 0x254, 0x25c, 0x260, 0x267, 0x26a, 0x270, 0x272, 0x275, 0x277, 0x27a, 0x27f, 0x281, 0x283, 0x285, 0x287, 0x289, 0x28c, 0x28e, 0x290, 0x292, 0x294, 0x296, 0x2a3, 0x2ad, 0x2af, 0x2b1, 0x2b7, 0x2b9, 0x2bb, 0x2be} + +// nfcSparseValues: 704 entries, 2816 bytes +var nfcSparseValues = [704]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x04}, + {value: 0xa100, lo: 0xa8, hi: 0xa8}, + {value: 0x8100, lo: 0xaf, hi: 0xaf}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb8, hi: 0xb8}, + // Block 0x1, offset 0x5 + {value: 0x0091, lo: 0x03}, + {value: 0x46f9, lo: 0xa0, hi: 0xa1}, + {value: 0x472b, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x9 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + // Block 0x3, offset 0xb + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x98, hi: 0x9d}, + // Block 0x4, offset 0xd + {value: 0x0006, lo: 0x0a}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x85, hi: 0x85}, + {value: 0xa000, lo: 0x89, hi: 0x89}, + {value: 0x4857, lo: 0x8a, hi: 0x8a}, + {value: 0x4875, lo: 0x8b, hi: 0x8b}, + {value: 0x36de, lo: 0x8c, hi: 0x8c}, + {value: 0x36f6, lo: 0x8d, hi: 0x8d}, + {value: 0x488d, lo: 0x8e, hi: 0x8e}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x3714, lo: 0x93, hi: 0x94}, + // Block 0x5, offset 0x18 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x37bc, lo: 0x90, hi: 0x90}, + {value: 0x37c8, lo: 0x91, hi: 0x91}, + {value: 0x37b6, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x382e, lo: 0x97, hi: 0x97}, + {value: 0x37f8, lo: 0x9c, hi: 0x9c}, + {value: 0x37e0, lo: 0x9d, hi: 0x9d}, + {value: 0x380a, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x3834, lo: 0xb6, hi: 0xb6}, + {value: 0x383a, lo: 0xb7, hi: 0xb7}, + // Block 0x6, offset 0x28 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x83, hi: 0x87}, + // Block 0x7, offset 0x2a + {value: 0x0001, lo: 0x04}, + {value: 0x8114, lo: 0x81, hi: 0x82}, + {value: 0x8133, lo: 0x84, hi: 0x84}, + {value: 0x812e, lo: 0x85, hi: 0x85}, + {value: 0x810e, lo: 0x87, hi: 0x87}, + // Block 0x8, offset 0x2f + {value: 0x0000, lo: 0x0a}, + {value: 0x8133, lo: 0x90, hi: 0x97}, + {value: 0x811a, lo: 0x98, hi: 0x98}, + {value: 0x811b, lo: 0x99, hi: 0x99}, + {value: 0x811c, lo: 0x9a, hi: 0x9a}, + {value: 0x3858, lo: 0xa2, hi: 0xa2}, + {value: 0x385e, lo: 0xa3, hi: 0xa3}, + {value: 0x386a, lo: 0xa4, hi: 0xa4}, + {value: 0x3864, lo: 0xa5, hi: 0xa5}, + {value: 0x3870, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x9, offset 0x3a + {value: 0x0000, lo: 0x0e}, + {value: 0x3882, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x3876, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x387c, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8133, lo: 0x96, hi: 0x9c}, + {value: 0x8133, lo: 0x9f, hi: 0xa2}, + {value: 0x812e, lo: 0xa3, hi: 0xa3}, + {value: 0x8133, lo: 0xa4, hi: 0xa4}, + {value: 0x8133, lo: 0xa7, hi: 0xa8}, + {value: 0x812e, lo: 0xaa, hi: 0xaa}, + {value: 0x8133, lo: 0xab, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + // Block 0xa, offset 0x49 + {value: 0x0000, lo: 0x0c}, + {value: 0x8120, lo: 0x91, hi: 0x91}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + {value: 0x812e, lo: 0xb1, hi: 0xb1}, + {value: 0x8133, lo: 0xb2, hi: 0xb3}, + {value: 0x812e, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb5, hi: 0xb6}, + {value: 0x812e, lo: 0xb7, hi: 0xb9}, + {value: 0x8133, lo: 0xba, hi: 0xba}, + {value: 0x812e, lo: 0xbb, hi: 0xbc}, + {value: 0x8133, lo: 0xbd, hi: 0xbd}, + {value: 0x812e, lo: 0xbe, hi: 0xbe}, + {value: 0x8133, lo: 0xbf, hi: 0xbf}, + // Block 0xb, offset 0x56 + {value: 0x0005, lo: 0x07}, + {value: 0x8133, lo: 0x80, hi: 0x80}, + {value: 0x8133, lo: 0x81, hi: 0x81}, + {value: 0x812e, lo: 0x82, hi: 0x83}, + {value: 0x812e, lo: 0x84, hi: 0x85}, + {value: 0x812e, lo: 0x86, hi: 0x87}, + {value: 0x812e, lo: 0x88, hi: 0x89}, + {value: 0x8133, lo: 0x8a, hi: 0x8a}, + // Block 0xc, offset 0x5e + {value: 0x0000, lo: 0x04}, + {value: 0x8133, lo: 0xab, hi: 0xb1}, + {value: 0x812e, lo: 0xb2, hi: 0xb2}, + {value: 0x8133, lo: 0xb3, hi: 0xb3}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + // Block 0xd, offset 0x63 + {value: 0x0000, lo: 0x04}, + {value: 0x8133, lo: 0x96, hi: 0x99}, + {value: 0x8133, lo: 0x9b, hi: 0xa3}, + {value: 0x8133, lo: 0xa5, hi: 0xa7}, + {value: 0x8133, lo: 0xa9, hi: 0xad}, + // Block 0xe, offset 0x68 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x99, hi: 0x9b}, + // Block 0xf, offset 0x6a + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x3eef, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x3ef7, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x3eff, lo: 0xb4, hi: 0xb4}, + {value: 0x9903, lo: 0xbc, hi: 0xbc}, + // Block 0x10, offset 0x72 + {value: 0x0008, lo: 0x06}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x8133, lo: 0x91, hi: 0x91}, + {value: 0x812e, lo: 0x92, hi: 0x92}, + {value: 0x8133, lo: 0x93, hi: 0x93}, + {value: 0x8133, lo: 0x94, hi: 0x94}, + {value: 0x4533, lo: 0x98, hi: 0x9f}, + // Block 0x11, offset 0x79 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x12, offset 0x7c + {value: 0x0008, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cab, lo: 0x8b, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x4573, lo: 0x9c, hi: 0x9d}, + {value: 0x4583, lo: 0x9f, hi: 0x9f}, + {value: 0x8133, lo: 0xbe, hi: 0xbe}, + // Block 0x13, offset 0x84 + {value: 0x0000, lo: 0x03}, + {value: 0x45ab, lo: 0xb3, hi: 0xb3}, + {value: 0x45b3, lo: 0xb6, hi: 0xb6}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + // Block 0x14, offset 0x88 + {value: 0x0008, lo: 0x03}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x458b, lo: 0x99, hi: 0x9b}, + {value: 0x45a3, lo: 0x9e, hi: 0x9e}, + // Block 0x15, offset 0x8c + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + // Block 0x16, offset 0x8e + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + // Block 0x17, offset 0x90 + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cc3, lo: 0x88, hi: 0x88}, + {value: 0x2cbb, lo: 0x8b, hi: 0x8b}, + {value: 0x2ccb, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x45bb, lo: 0x9c, hi: 0x9c}, + {value: 0x45c3, lo: 0x9d, hi: 0x9d}, + // Block 0x18, offset 0x99 + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2cd3, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x19, offset 0x9d + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cdb, lo: 0x8a, hi: 0x8a}, + {value: 0x2ceb, lo: 0x8b, hi: 0x8b}, + {value: 0x2ce3, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1a, offset 0xa4 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x3f07, lo: 0x88, hi: 0x88}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x8121, lo: 0x95, hi: 0x96}, + // Block 0x1b, offset 0xa9 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1c, offset 0xac + {value: 0x0000, lo: 0x09}, + {value: 0x2cf3, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2cfb, lo: 0x87, hi: 0x87}, + {value: 0x2d03, lo: 0x88, hi: 0x88}, + {value: 0x2f67, lo: 0x8a, hi: 0x8a}, + {value: 0x2def, lo: 0x8b, hi: 0x8b}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1d, offset 0xb6 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1e, offset 0xb9 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2d0b, lo: 0x8a, hi: 0x8a}, + {value: 0x2d1b, lo: 0x8b, hi: 0x8b}, + {value: 0x2d13, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1f, offset 0xc0 + {value: 0x6bdd, lo: 0x07}, + {value: 0x9905, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x3f0f, lo: 0x9a, hi: 0x9a}, + {value: 0x2f6f, lo: 0x9c, hi: 0x9c}, + {value: 0x2dfa, lo: 0x9d, hi: 0x9d}, + {value: 0x2d23, lo: 0x9e, hi: 0x9f}, + // Block 0x20, offset 0xc8 + {value: 0x0000, lo: 0x02}, + {value: 0x8123, lo: 0xb8, hi: 0xb9}, + {value: 0x8105, lo: 0xba, hi: 0xba}, + // Block 0x21, offset 0xcb + {value: 0x0000, lo: 0x01}, + {value: 0x8124, lo: 0x88, hi: 0x8b}, + // Block 0x22, offset 0xcd + {value: 0x0000, lo: 0x02}, + {value: 0x8125, lo: 0xb8, hi: 0xb9}, + {value: 0x8105, lo: 0xba, hi: 0xba}, + // Block 0x23, offset 0xd0 + {value: 0x0000, lo: 0x01}, + {value: 0x8126, lo: 0x88, hi: 0x8b}, + // Block 0x24, offset 0xd2 + {value: 0x0000, lo: 0x04}, + {value: 0x812e, lo: 0x98, hi: 0x99}, + {value: 0x812e, lo: 0xb5, hi: 0xb5}, + {value: 0x812e, lo: 0xb7, hi: 0xb7}, + {value: 0x812c, lo: 0xb9, hi: 0xb9}, + // Block 0x25, offset 0xd7 + {value: 0x0000, lo: 0x10}, + {value: 0x264a, lo: 0x83, hi: 0x83}, + {value: 0x2651, lo: 0x8d, hi: 0x8d}, + {value: 0x2658, lo: 0x92, hi: 0x92}, + {value: 0x265f, lo: 0x97, hi: 0x97}, + {value: 0x2666, lo: 0x9c, hi: 0x9c}, + {value: 0x2643, lo: 0xa9, hi: 0xa9}, + {value: 0x8127, lo: 0xb1, hi: 0xb1}, + {value: 0x8128, lo: 0xb2, hi: 0xb2}, + {value: 0x4a9b, lo: 0xb3, hi: 0xb3}, + {value: 0x8129, lo: 0xb4, hi: 0xb4}, + {value: 0x4aa4, lo: 0xb5, hi: 0xb5}, + {value: 0x45cb, lo: 0xb6, hi: 0xb6}, + {value: 0x8200, lo: 0xb7, hi: 0xb7}, + {value: 0x45d3, lo: 0xb8, hi: 0xb8}, + {value: 0x8200, lo: 0xb9, hi: 0xb9}, + {value: 0x8128, lo: 0xba, hi: 0xbd}, + // Block 0x26, offset 0xe8 + {value: 0x0000, lo: 0x0b}, + {value: 0x8128, lo: 0x80, hi: 0x80}, + {value: 0x4aad, lo: 0x81, hi: 0x81}, + {value: 0x8133, lo: 0x82, hi: 0x83}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0x86, hi: 0x87}, + {value: 0x2674, lo: 0x93, hi: 0x93}, + {value: 0x267b, lo: 0x9d, hi: 0x9d}, + {value: 0x2682, lo: 0xa2, hi: 0xa2}, + {value: 0x2689, lo: 0xa7, hi: 0xa7}, + {value: 0x2690, lo: 0xac, hi: 0xac}, + {value: 0x266d, lo: 0xb9, hi: 0xb9}, + // Block 0x27, offset 0xf4 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x86, hi: 0x86}, + // Block 0x28, offset 0xf6 + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2d2b, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + {value: 0x8105, lo: 0xb9, hi: 0xba}, + // Block 0x29, offset 0xfc + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x8d, hi: 0x8d}, + // Block 0x2a, offset 0xfe + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2b, offset 0x100 + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2c, offset 0x102 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2d, offset 0x104 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2e, offset 0x106 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x9d, hi: 0x9f}, + // Block 0x2f, offset 0x108 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x94, hi: 0x94}, + {value: 0x8105, lo: 0xb4, hi: 0xb4}, + // Block 0x30, offset 0x10b + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x92, hi: 0x92}, + {value: 0x8133, lo: 0x9d, hi: 0x9d}, + // Block 0x31, offset 0x10e + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + // Block 0x32, offset 0x110 + {value: 0x0004, lo: 0x02}, + {value: 0x812f, lo: 0xb9, hi: 0xba}, + {value: 0x812e, lo: 0xbb, hi: 0xbb}, + // Block 0x33, offset 0x113 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x97, hi: 0x97}, + {value: 0x812e, lo: 0x98, hi: 0x98}, + // Block 0x34, offset 0x116 + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0xa0, hi: 0xa0}, + {value: 0x8133, lo: 0xb5, hi: 0xbc}, + {value: 0x812e, lo: 0xbf, hi: 0xbf}, + // Block 0x35, offset 0x11a + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0xb0, hi: 0xb4}, + {value: 0x812e, lo: 0xb5, hi: 0xba}, + {value: 0x8133, lo: 0xbb, hi: 0xbc}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + {value: 0x812e, lo: 0xbf, hi: 0xbf}, + // Block 0x36, offset 0x120 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x80, hi: 0x80}, + // Block 0x37, offset 0x122 + {value: 0x0000, lo: 0x08}, + {value: 0x2d73, lo: 0x80, hi: 0x80}, + {value: 0x2d7b, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2d83, lo: 0x83, hi: 0x83}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0xab, hi: 0xab}, + {value: 0x812e, lo: 0xac, hi: 0xac}, + {value: 0x8133, lo: 0xad, hi: 0xb3}, + // Block 0x38, offset 0x12b + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xaa, hi: 0xab}, + // Block 0x39, offset 0x12d + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xa6, hi: 0xa6}, + {value: 0x8105, lo: 0xb2, hi: 0xb3}, + // Block 0x3a, offset 0x130 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + // Block 0x3b, offset 0x132 + {value: 0x0000, lo: 0x0a}, + {value: 0x8133, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812e, lo: 0x95, hi: 0x99}, + {value: 0x8133, lo: 0x9a, hi: 0x9b}, + {value: 0x812e, lo: 0x9c, hi: 0x9f}, + {value: 0x8133, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x8133, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb8, hi: 0xb9}, + // Block 0x3c, offset 0x13d + {value: 0x0004, lo: 0x03}, + {value: 0x0436, lo: 0x80, hi: 0x81}, + {value: 0x8100, lo: 0x97, hi: 0x97}, + {value: 0x8100, lo: 0xbe, hi: 0xbe}, + // Block 0x3d, offset 0x141 + {value: 0x0000, lo: 0x0d}, + {value: 0x8133, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8133, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8133, lo: 0x9b, hi: 0x9c}, + {value: 0x8133, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8133, lo: 0xa7, hi: 0xa7}, + {value: 0x812e, lo: 0xa8, hi: 0xa8}, + {value: 0x8133, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812e, lo: 0xac, hi: 0xaf}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + // Block 0x3e, offset 0x14f + {value: 0x4292, lo: 0x02}, + {value: 0x01bb, lo: 0xa6, hi: 0xa6}, + {value: 0x0057, lo: 0xaa, hi: 0xab}, + // Block 0x3f, offset 0x152 + {value: 0x0007, lo: 0x05}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3bd0, lo: 0x9a, hi: 0x9b}, + {value: 0x3bde, lo: 0xae, hi: 0xae}, + // Block 0x40, offset 0x158 + {value: 0x000e, lo: 0x05}, + {value: 0x3be5, lo: 0x8d, hi: 0x8e}, + {value: 0x3bec, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x41, offset 0x15e + {value: 0x63f1, lo: 0x0a}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3bfa, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3c01, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3c08, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3c0f, lo: 0xa4, hi: 0xa5}, + {value: 0x3c16, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x42, offset 0x169 + {value: 0x0007, lo: 0x03}, + {value: 0x3c7f, lo: 0xa0, hi: 0xa1}, + {value: 0x3ca9, lo: 0xa2, hi: 0xa3}, + {value: 0x3cd3, lo: 0xaa, hi: 0xad}, + // Block 0x43, offset 0x16d + {value: 0x0004, lo: 0x01}, + {value: 0x048e, lo: 0xa9, hi: 0xaa}, + // Block 0x44, offset 0x16f + {value: 0x0000, lo: 0x01}, + {value: 0x44f4, lo: 0x9c, hi: 0x9c}, + // Block 0x45, offset 0x171 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xaf, hi: 0xb1}, + // Block 0x46, offset 0x173 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x47, offset 0x175 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xa0, hi: 0xbf}, + // Block 0x48, offset 0x177 + {value: 0x0000, lo: 0x05}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x8134, lo: 0xac, hi: 0xac}, + {value: 0x812f, lo: 0xad, hi: 0xad}, + {value: 0x8130, lo: 0xae, hi: 0xaf}, + // Block 0x49, offset 0x17d + {value: 0x0000, lo: 0x03}, + {value: 0x4ab6, lo: 0xb3, hi: 0xb3}, + {value: 0x4ab6, lo: 0xb5, hi: 0xb6}, + {value: 0x4ab6, lo: 0xba, hi: 0xbf}, + // Block 0x4a, offset 0x181 + {value: 0x0000, lo: 0x01}, + {value: 0x4ab6, lo: 0x8f, hi: 0xa3}, + // Block 0x4b, offset 0x183 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xae, hi: 0xbe}, + // Block 0x4c, offset 0x185 + {value: 0x0000, lo: 0x07}, + {value: 0x8100, lo: 0x84, hi: 0x84}, + {value: 0x8100, lo: 0x87, hi: 0x87}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + {value: 0x8100, lo: 0x9e, hi: 0x9e}, + {value: 0x8100, lo: 0xa1, hi: 0xa1}, + {value: 0x8100, lo: 0xb2, hi: 0xb2}, + {value: 0x8100, lo: 0xbb, hi: 0xbb}, + // Block 0x4d, offset 0x18d + {value: 0x0000, lo: 0x03}, + {value: 0x8100, lo: 0x80, hi: 0x80}, + {value: 0x8100, lo: 0x8b, hi: 0x8b}, + {value: 0x8100, lo: 0x8e, hi: 0x8e}, + // Block 0x4e, offset 0x191 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0xaf, hi: 0xaf}, + {value: 0x8133, lo: 0xb4, hi: 0xbd}, + // Block 0x4f, offset 0x194 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x9e, hi: 0x9f}, + // Block 0x50, offset 0x196 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb0, hi: 0xb1}, + // Block 0x51, offset 0x198 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x86, hi: 0x86}, + {value: 0x8105, lo: 0xac, hi: 0xac}, + // Block 0x52, offset 0x19b + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0xa0, hi: 0xb1}, + // Block 0x53, offset 0x19e + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xab, hi: 0xad}, + // Block 0x54, offset 0x1a0 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x93, hi: 0x93}, + // Block 0x55, offset 0x1a2 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xb3, hi: 0xb3}, + // Block 0x56, offset 0x1a4 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x80, hi: 0x80}, + // Block 0x57, offset 0x1a6 + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + {value: 0x8133, lo: 0xb2, hi: 0xb3}, + {value: 0x812e, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb7, hi: 0xb8}, + {value: 0x8133, lo: 0xbe, hi: 0xbf}, + // Block 0x58, offset 0x1ac + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x81, hi: 0x81}, + {value: 0x8105, lo: 0xb6, hi: 0xb6}, + // Block 0x59, offset 0x1af + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xad, hi: 0xad}, + // Block 0x5a, offset 0x1b1 + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x5b, offset 0x1b8 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x5c, offset 0x1be + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x5d, offset 0x1c4 + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x5e, offset 0x1cc + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x5f, offset 0x1d2 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x60, offset 0x1d8 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x61, offset 0x1de + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x62, offset 0x1e2 + {value: 0x0006, lo: 0x0d}, + {value: 0x43a7, lo: 0x9d, hi: 0x9d}, + {value: 0x8116, lo: 0x9e, hi: 0x9e}, + {value: 0x4419, lo: 0x9f, hi: 0x9f}, + {value: 0x4407, lo: 0xaa, hi: 0xab}, + {value: 0x450b, lo: 0xac, hi: 0xac}, + {value: 0x4513, lo: 0xad, hi: 0xad}, + {value: 0x435f, lo: 0xae, hi: 0xb1}, + {value: 0x437d, lo: 0xb2, hi: 0xb4}, + {value: 0x4395, lo: 0xb5, hi: 0xb6}, + {value: 0x43a1, lo: 0xb8, hi: 0xb8}, + {value: 0x43ad, lo: 0xb9, hi: 0xbb}, + {value: 0x43c5, lo: 0xbc, hi: 0xbc}, + {value: 0x43cb, lo: 0xbe, hi: 0xbe}, + // Block 0x63, offset 0x1f0 + {value: 0x0006, lo: 0x08}, + {value: 0x43d1, lo: 0x80, hi: 0x81}, + {value: 0x43dd, lo: 0x83, hi: 0x84}, + {value: 0x43ef, lo: 0x86, hi: 0x89}, + {value: 0x4413, lo: 0x8a, hi: 0x8a}, + {value: 0x438f, lo: 0x8b, hi: 0x8b}, + {value: 0x4377, lo: 0x8c, hi: 0x8c}, + {value: 0x43bf, lo: 0x8d, hi: 0x8d}, + {value: 0x43e9, lo: 0x8e, hi: 0x8e}, + // Block 0x64, offset 0x1f9 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0xa4, hi: 0xa5}, + {value: 0x8100, lo: 0xb0, hi: 0xb1}, + // Block 0x65, offset 0x1fc + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x9b, hi: 0x9d}, + {value: 0x8200, lo: 0x9e, hi: 0xa3}, + // Block 0x66, offset 0x1ff + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + // Block 0x67, offset 0x201 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x99, hi: 0x99}, + {value: 0x8200, lo: 0xb2, hi: 0xb4}, + // Block 0x68, offset 0x204 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xbc, hi: 0xbd}, + // Block 0x69, offset 0x206 + {value: 0x0000, lo: 0x03}, + {value: 0x8133, lo: 0xa0, hi: 0xa6}, + {value: 0x812e, lo: 0xa7, hi: 0xad}, + {value: 0x8133, lo: 0xae, hi: 0xaf}, + // Block 0x6a, offset 0x20a + {value: 0x0000, lo: 0x04}, + {value: 0x8100, lo: 0x89, hi: 0x8c}, + {value: 0x8100, lo: 0xb0, hi: 0xb2}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb6, hi: 0xbf}, + // Block 0x6b, offset 0x20f + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x81, hi: 0x8c}, + // Block 0x6c, offset 0x211 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xb5, hi: 0xba}, + // Block 0x6d, offset 0x213 + {value: 0x0000, lo: 0x04}, + {value: 0x4ab6, lo: 0x9e, hi: 0x9f}, + {value: 0x4ab6, lo: 0xa3, hi: 0xa3}, + {value: 0x4ab6, lo: 0xa5, hi: 0xa6}, + {value: 0x4ab6, lo: 0xaa, hi: 0xaf}, + // Block 0x6e, offset 0x218 + {value: 0x0000, lo: 0x05}, + {value: 0x4ab6, lo: 0x82, hi: 0x87}, + {value: 0x4ab6, lo: 0x8a, hi: 0x8f}, + {value: 0x4ab6, lo: 0x92, hi: 0x97}, + {value: 0x4ab6, lo: 0x9a, hi: 0x9c}, + {value: 0x8100, lo: 0xa3, hi: 0xa3}, + // Block 0x6f, offset 0x21e + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + // Block 0x70, offset 0x220 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xa0, hi: 0xa0}, + // Block 0x71, offset 0x222 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb6, hi: 0xba}, + // Block 0x72, offset 0x224 + {value: 0x002d, lo: 0x05}, + {value: 0x812e, lo: 0x8d, hi: 0x8d}, + {value: 0x8133, lo: 0x8f, hi: 0x8f}, + {value: 0x8133, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x73, offset 0x22a + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0xa5, hi: 0xa5}, + {value: 0x812e, lo: 0xa6, hi: 0xa6}, + // Block 0x74, offset 0x22d + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xa4, hi: 0xa7}, + // Block 0x75, offset 0x22f + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xab, hi: 0xac}, + // Block 0x76, offset 0x231 + {value: 0x0000, lo: 0x05}, + {value: 0x812e, lo: 0x86, hi: 0x87}, + {value: 0x8133, lo: 0x88, hi: 0x8a}, + {value: 0x812e, lo: 0x8b, hi: 0x8b}, + {value: 0x8133, lo: 0x8c, hi: 0x8c}, + {value: 0x812e, lo: 0x8d, hi: 0x90}, + // Block 0x77, offset 0x237 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x86, hi: 0x86}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x78, offset 0x23a + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x424f, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4259, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x4263, lo: 0xab, hi: 0xab}, + {value: 0x8105, lo: 0xb9, hi: 0xba}, + // Block 0x79, offset 0x242 + {value: 0x0000, lo: 0x06}, + {value: 0x8133, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2d8b, lo: 0xae, hi: 0xae}, + {value: 0x2d95, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8105, lo: 0xb3, hi: 0xb4}, + // Block 0x7a, offset 0x249 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x80, hi: 0x80}, + {value: 0x8103, lo: 0x8a, hi: 0x8a}, + // Block 0x7b, offset 0x24c + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb5, hi: 0xb5}, + {value: 0x8103, lo: 0xb6, hi: 0xb6}, + // Block 0x7c, offset 0x24f + {value: 0x0002, lo: 0x01}, + {value: 0x8103, lo: 0xa9, hi: 0xaa}, + // Block 0x7d, offset 0x251 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x7e, offset 0x254 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2d9f, lo: 0x8b, hi: 0x8b}, + {value: 0x2da9, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8133, lo: 0xa6, hi: 0xac}, + {value: 0x8133, lo: 0xb0, hi: 0xb4}, + // Block 0x7f, offset 0x25c + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0x82, hi: 0x82}, + {value: 0x8103, lo: 0x86, hi: 0x86}, + {value: 0x8133, lo: 0x9e, hi: 0x9e}, + // Block 0x80, offset 0x260 + {value: 0x6b4d, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2dbd, lo: 0xbb, hi: 0xbb}, + {value: 0x2db3, lo: 0xbc, hi: 0xbd}, + {value: 0x2dc7, lo: 0xbe, hi: 0xbe}, + // Block 0x81, offset 0x267 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x82, hi: 0x82}, + {value: 0x8103, lo: 0x83, hi: 0x83}, + // Block 0x82, offset 0x26a + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2dd1, lo: 0xba, hi: 0xba}, + {value: 0x2ddb, lo: 0xbb, hi: 0xbb}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x83, offset 0x270 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0x80, hi: 0x80}, + // Block 0x84, offset 0x272 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb6, hi: 0xb6}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + // Block 0x85, offset 0x275 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xab, hi: 0xab}, + // Block 0x86, offset 0x277 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb9, hi: 0xb9}, + {value: 0x8103, lo: 0xba, hi: 0xba}, + // Block 0x87, offset 0x27a + {value: 0x0000, lo: 0x04}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb5, hi: 0xb5}, + {value: 0x2de5, lo: 0xb8, hi: 0xb8}, + {value: 0x8105, lo: 0xbd, hi: 0xbe}, + // Block 0x88, offset 0x27f + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0x83, hi: 0x83}, + // Block 0x89, offset 0x281 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xa0, hi: 0xa0}, + // Block 0x8a, offset 0x283 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xb4, hi: 0xb4}, + // Block 0x8b, offset 0x285 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x87, hi: 0x87}, + // Block 0x8c, offset 0x287 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x99, hi: 0x99}, + // Block 0x8d, offset 0x289 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0x82, hi: 0x82}, + {value: 0x8105, lo: 0x84, hi: 0x85}, + // Block 0x8e, offset 0x28c + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x97, hi: 0x97}, + // Block 0x8f, offset 0x28e + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x90, offset 0x290 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb0, hi: 0xb6}, + // Block 0x91, offset 0x292 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb0, hi: 0xb1}, + // Block 0x92, offset 0x294 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x93, offset 0x296 + {value: 0x0000, lo: 0x0c}, + {value: 0x45e3, lo: 0x9e, hi: 0x9e}, + {value: 0x45ed, lo: 0x9f, hi: 0x9f}, + {value: 0x4621, lo: 0xa0, hi: 0xa0}, + {value: 0x462f, lo: 0xa1, hi: 0xa1}, + {value: 0x463d, lo: 0xa2, hi: 0xa2}, + {value: 0x464b, lo: 0xa3, hi: 0xa3}, + {value: 0x4659, lo: 0xa4, hi: 0xa4}, + {value: 0x812c, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8131, lo: 0xad, hi: 0xad}, + {value: 0x812c, lo: 0xae, hi: 0xb2}, + {value: 0x812e, lo: 0xbb, hi: 0xbf}, + // Block 0x94, offset 0x2a3 + {value: 0x0000, lo: 0x09}, + {value: 0x812e, lo: 0x80, hi: 0x82}, + {value: 0x8133, lo: 0x85, hi: 0x89}, + {value: 0x812e, lo: 0x8a, hi: 0x8b}, + {value: 0x8133, lo: 0xaa, hi: 0xad}, + {value: 0x45f7, lo: 0xbb, hi: 0xbb}, + {value: 0x4601, lo: 0xbc, hi: 0xbc}, + {value: 0x4667, lo: 0xbd, hi: 0xbd}, + {value: 0x4683, lo: 0xbe, hi: 0xbe}, + {value: 0x4675, lo: 0xbf, hi: 0xbf}, + // Block 0x95, offset 0x2ad + {value: 0x0000, lo: 0x01}, + {value: 0x4691, lo: 0x80, hi: 0x80}, + // Block 0x96, offset 0x2af + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x82, hi: 0x84}, + // Block 0x97, offset 0x2b1 + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0x80, hi: 0x86}, + {value: 0x8133, lo: 0x88, hi: 0x98}, + {value: 0x8133, lo: 0x9b, hi: 0xa1}, + {value: 0x8133, lo: 0xa3, hi: 0xa4}, + {value: 0x8133, lo: 0xa6, hi: 0xaa}, + // Block 0x98, offset 0x2b7 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xac, hi: 0xaf}, + // Block 0x99, offset 0x2b9 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x90, hi: 0x96}, + // Block 0x9a, offset 0x2bb + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x84, hi: 0x89}, + {value: 0x8103, lo: 0x8a, hi: 0x8a}, + // Block 0x9b, offset 0x2be + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x93, hi: 0x93}, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfkcTrie. Total size: 18768 bytes (18.33 KiB). Checksum: c51186dd2412943d. +type nfkcTrie struct{} + +func newNfkcTrie(i int) *nfkcTrie { + return &nfkcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfkcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 92: + return uint16(nfkcValues[n<<6+uint32(b)]) + default: + n -= 92 + return uint16(nfkcSparse.lookup(n, b)) + } +} + +// nfkcValues: 94 blocks, 6016 entries, 12032 bytes +// The third block is the zero block. +var nfkcValues = [6016]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x2f86, 0xc1: 0x2f8b, 0xc2: 0x469f, 0xc3: 0x2f90, 0xc4: 0x46ae, 0xc5: 0x46b3, + 0xc6: 0xa000, 0xc7: 0x46bd, 0xc8: 0x2ff9, 0xc9: 0x2ffe, 0xca: 0x46c2, 0xcb: 0x3012, + 0xcc: 0x3085, 0xcd: 0x308a, 0xce: 0x308f, 0xcf: 0x46d6, 0xd1: 0x311b, + 0xd2: 0x313e, 0xd3: 0x3143, 0xd4: 0x46e0, 0xd5: 0x46e5, 0xd6: 0x46f4, + 0xd8: 0xa000, 0xd9: 0x31ca, 0xda: 0x31cf, 0xdb: 0x31d4, 0xdc: 0x4726, 0xdd: 0x324c, + 0xe0: 0x3292, 0xe1: 0x3297, 0xe2: 0x4730, 0xe3: 0x329c, + 0xe4: 0x473f, 0xe5: 0x4744, 0xe6: 0xa000, 0xe7: 0x474e, 0xe8: 0x3305, 0xe9: 0x330a, + 0xea: 0x4753, 0xeb: 0x331e, 0xec: 0x3396, 0xed: 0x339b, 0xee: 0x33a0, 0xef: 0x4767, + 0xf1: 0x342c, 0xf2: 0x344f, 0xf3: 0x3454, 0xf4: 0x4771, 0xf5: 0x4776, + 0xf6: 0x4785, 0xf8: 0xa000, 0xf9: 0x34e0, 0xfa: 0x34e5, 0xfb: 0x34ea, + 0xfc: 0x47b7, 0xfd: 0x3567, 0xff: 0x3580, + // Block 0x4, offset 0x100 + 0x100: 0x2f95, 0x101: 0x32a1, 0x102: 0x46a4, 0x103: 0x4735, 0x104: 0x2fb3, 0x105: 0x32bf, + 0x106: 0x2fc7, 0x107: 0x32d3, 0x108: 0x2fcc, 0x109: 0x32d8, 0x10a: 0x2fd1, 0x10b: 0x32dd, + 0x10c: 0x2fd6, 0x10d: 0x32e2, 0x10e: 0x2fe0, 0x10f: 0x32ec, + 0x112: 0x46c7, 0x113: 0x4758, 0x114: 0x3008, 0x115: 0x3314, 0x116: 0x300d, 0x117: 0x3319, + 0x118: 0x302b, 0x119: 0x3337, 0x11a: 0x301c, 0x11b: 0x3328, 0x11c: 0x3044, 0x11d: 0x3350, + 0x11e: 0x304e, 0x11f: 0x335a, 0x120: 0x3053, 0x121: 0x335f, 0x122: 0x305d, 0x123: 0x3369, + 0x124: 0x3062, 0x125: 0x336e, 0x128: 0x3094, 0x129: 0x33a5, + 0x12a: 0x3099, 0x12b: 0x33aa, 0x12c: 0x309e, 0x12d: 0x33af, 0x12e: 0x30c1, 0x12f: 0x33cd, + 0x130: 0x30a3, 0x132: 0x1960, 0x133: 0x19ed, 0x134: 0x30cb, 0x135: 0x33d7, + 0x136: 0x30df, 0x137: 0x33f0, 0x139: 0x30e9, 0x13a: 0x33fa, 0x13b: 0x30f3, + 0x13c: 0x3404, 0x13d: 0x30ee, 0x13e: 0x33ff, 0x13f: 0x1bb2, + // Block 0x5, offset 0x140 + 0x140: 0x1c3a, 0x143: 0x3116, 0x144: 0x3427, 0x145: 0x312f, + 0x146: 0x3440, 0x147: 0x3125, 0x148: 0x3436, 0x149: 0x1c62, + 0x14c: 0x46ea, 0x14d: 0x477b, 0x14e: 0x3148, 0x14f: 0x3459, 0x150: 0x3152, 0x151: 0x3463, + 0x154: 0x3170, 0x155: 0x3481, 0x156: 0x3189, 0x157: 0x349a, + 0x158: 0x317a, 0x159: 0x348b, 0x15a: 0x470d, 0x15b: 0x479e, 0x15c: 0x3193, 0x15d: 0x34a4, + 0x15e: 0x31a2, 0x15f: 0x34b3, 0x160: 0x4712, 0x161: 0x47a3, 0x162: 0x31bb, 0x163: 0x34d1, + 0x164: 0x31ac, 0x165: 0x34c2, 0x168: 0x471c, 0x169: 0x47ad, + 0x16a: 0x4721, 0x16b: 0x47b2, 0x16c: 0x31d9, 0x16d: 0x34ef, 0x16e: 0x31e3, 0x16f: 0x34f9, + 0x170: 0x31e8, 0x171: 0x34fe, 0x172: 0x3206, 0x173: 0x351c, 0x174: 0x3229, 0x175: 0x353f, + 0x176: 0x3251, 0x177: 0x356c, 0x178: 0x3265, 0x179: 0x3274, 0x17a: 0x3594, 0x17b: 0x327e, + 0x17c: 0x359e, 0x17d: 0x3283, 0x17e: 0x35a3, 0x17f: 0x00a7, + // Block 0x6, offset 0x180 + 0x184: 0x2e05, 0x185: 0x2e0b, + 0x186: 0x2e11, 0x187: 0x1975, 0x188: 0x1978, 0x189: 0x1a0e, 0x18a: 0x198d, 0x18b: 0x1990, + 0x18c: 0x1a44, 0x18d: 0x2f9f, 0x18e: 0x32ab, 0x18f: 0x30ad, 0x190: 0x33b9, 0x191: 0x3157, + 0x192: 0x3468, 0x193: 0x31ed, 0x194: 0x3503, 0x195: 0x39e6, 0x196: 0x3b75, 0x197: 0x39df, + 0x198: 0x3b6e, 0x199: 0x39ed, 0x19a: 0x3b7c, 0x19b: 0x39d8, 0x19c: 0x3b67, + 0x19e: 0x38c7, 0x19f: 0x3a56, 0x1a0: 0x38c0, 0x1a1: 0x3a4f, 0x1a2: 0x35ca, 0x1a3: 0x35dc, + 0x1a6: 0x3058, 0x1a7: 0x3364, 0x1a8: 0x30d5, 0x1a9: 0x33e6, + 0x1aa: 0x4703, 0x1ab: 0x4794, 0x1ac: 0x39a7, 0x1ad: 0x3b36, 0x1ae: 0x35ee, 0x1af: 0x35f4, + 0x1b0: 0x33dc, 0x1b1: 0x1945, 0x1b2: 0x1948, 0x1b3: 0x19d5, 0x1b4: 0x303f, 0x1b5: 0x334b, + 0x1b8: 0x3111, 0x1b9: 0x3422, 0x1ba: 0x38ce, 0x1bb: 0x3a5d, + 0x1bc: 0x35c4, 0x1bd: 0x35d6, 0x1be: 0x35d0, 0x1bf: 0x35e2, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x2fa4, 0x1c1: 0x32b0, 0x1c2: 0x2fa9, 0x1c3: 0x32b5, 0x1c4: 0x3021, 0x1c5: 0x332d, + 0x1c6: 0x3026, 0x1c7: 0x3332, 0x1c8: 0x30b2, 0x1c9: 0x33be, 0x1ca: 0x30b7, 0x1cb: 0x33c3, + 0x1cc: 0x315c, 0x1cd: 0x346d, 0x1ce: 0x3161, 0x1cf: 0x3472, 0x1d0: 0x317f, 0x1d1: 0x3490, + 0x1d2: 0x3184, 0x1d3: 0x3495, 0x1d4: 0x31f2, 0x1d5: 0x3508, 0x1d6: 0x31f7, 0x1d7: 0x350d, + 0x1d8: 0x319d, 0x1d9: 0x34ae, 0x1da: 0x31b6, 0x1db: 0x34cc, + 0x1de: 0x3071, 0x1df: 0x337d, + 0x1e6: 0x46a9, 0x1e7: 0x473a, 0x1e8: 0x46d1, 0x1e9: 0x4762, + 0x1ea: 0x3976, 0x1eb: 0x3b05, 0x1ec: 0x3953, 0x1ed: 0x3ae2, 0x1ee: 0x46ef, 0x1ef: 0x4780, + 0x1f0: 0x396f, 0x1f1: 0x3afe, 0x1f2: 0x325b, 0x1f3: 0x3576, + // Block 0x8, offset 0x200 + 0x200: 0x9933, 0x201: 0x9933, 0x202: 0x9933, 0x203: 0x9933, 0x204: 0x9933, 0x205: 0x8133, + 0x206: 0x9933, 0x207: 0x9933, 0x208: 0x9933, 0x209: 0x9933, 0x20a: 0x9933, 0x20b: 0x9933, + 0x20c: 0x9933, 0x20d: 0x8133, 0x20e: 0x8133, 0x20f: 0x9933, 0x210: 0x8133, 0x211: 0x9933, + 0x212: 0x8133, 0x213: 0x9933, 0x214: 0x9933, 0x215: 0x8134, 0x216: 0x812e, 0x217: 0x812e, + 0x218: 0x812e, 0x219: 0x812e, 0x21a: 0x8134, 0x21b: 0x992c, 0x21c: 0x812e, 0x21d: 0x812e, + 0x21e: 0x812e, 0x21f: 0x812e, 0x220: 0x812e, 0x221: 0x812a, 0x222: 0x812a, 0x223: 0x992e, + 0x224: 0x992e, 0x225: 0x992e, 0x226: 0x992e, 0x227: 0x992a, 0x228: 0x992a, 0x229: 0x812e, + 0x22a: 0x812e, 0x22b: 0x812e, 0x22c: 0x812e, 0x22d: 0x992e, 0x22e: 0x992e, 0x22f: 0x812e, + 0x230: 0x992e, 0x231: 0x992e, 0x232: 0x812e, 0x233: 0x812e, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812e, 0x23a: 0x812e, 0x23b: 0x812e, + 0x23c: 0x812e, 0x23d: 0x8133, 0x23e: 0x8133, 0x23f: 0x8133, + // Block 0x9, offset 0x240 + 0x240: 0x49c5, 0x241: 0x49ca, 0x242: 0x9933, 0x243: 0x49cf, 0x244: 0x4a88, 0x245: 0x9937, + 0x246: 0x8133, 0x247: 0x812e, 0x248: 0x812e, 0x249: 0x812e, 0x24a: 0x8133, 0x24b: 0x8133, + 0x24c: 0x8133, 0x24d: 0x812e, 0x24e: 0x812e, 0x250: 0x8133, 0x251: 0x8133, + 0x252: 0x8133, 0x253: 0x812e, 0x254: 0x812e, 0x255: 0x812e, 0x256: 0x812e, 0x257: 0x8133, + 0x258: 0x8134, 0x259: 0x812e, 0x25a: 0x812e, 0x25b: 0x8133, 0x25c: 0x8135, 0x25d: 0x8136, + 0x25e: 0x8136, 0x25f: 0x8135, 0x260: 0x8136, 0x261: 0x8136, 0x262: 0x8135, 0x263: 0x8133, + 0x264: 0x8133, 0x265: 0x8133, 0x266: 0x8133, 0x267: 0x8133, 0x268: 0x8133, 0x269: 0x8133, + 0x26a: 0x8133, 0x26b: 0x8133, 0x26c: 0x8133, 0x26d: 0x8133, 0x26e: 0x8133, 0x26f: 0x8133, + 0x274: 0x0173, + 0x27a: 0x42bc, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x4271, 0x285: 0x4492, + 0x286: 0x3600, 0x287: 0x00ce, 0x288: 0x361e, 0x289: 0x362a, 0x28a: 0x363c, + 0x28c: 0x365a, 0x28e: 0x366c, 0x28f: 0x368a, 0x290: 0x3e1f, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x364e, 0x2ab: 0x367e, 0x2ac: 0x4815, 0x2ad: 0x36ae, 0x2ae: 0x483f, 0x2af: 0x36c0, + 0x2b0: 0x3e87, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c1: 0xa000, 0x2c5: 0xa000, + 0x2c9: 0xa000, 0x2ca: 0x4857, 0x2cb: 0x4875, + 0x2cc: 0x36de, 0x2cd: 0x36f6, 0x2ce: 0x488d, 0x2d0: 0x01c1, 0x2d1: 0x01d3, + 0x2d2: 0x01af, 0x2d3: 0x4323, 0x2d4: 0x4329, 0x2d5: 0x01fd, 0x2d6: 0x01eb, + 0x2f0: 0x01d9, 0x2f1: 0x01ee, 0x2f2: 0x01f1, 0x2f4: 0x018b, 0x2f5: 0x01ca, + 0x2f9: 0x01a9, + // Block 0xc, offset 0x300 + 0x300: 0x3738, 0x301: 0x3744, 0x303: 0x3732, + 0x306: 0xa000, 0x307: 0x3720, + 0x30c: 0x3774, 0x30d: 0x375c, 0x30e: 0x3786, 0x310: 0xa000, + 0x313: 0xa000, 0x315: 0xa000, 0x316: 0xa000, 0x317: 0xa000, + 0x318: 0xa000, 0x319: 0x3768, 0x31a: 0xa000, + 0x31e: 0xa000, 0x323: 0xa000, + 0x327: 0xa000, + 0x32b: 0xa000, 0x32d: 0xa000, + 0x330: 0xa000, 0x333: 0xa000, 0x335: 0xa000, + 0x336: 0xa000, 0x337: 0xa000, 0x338: 0xa000, 0x339: 0x37ec, 0x33a: 0xa000, + 0x33e: 0xa000, + // Block 0xd, offset 0x340 + 0x341: 0x374a, 0x342: 0x37ce, + 0x350: 0x3726, 0x351: 0x37aa, + 0x352: 0x372c, 0x353: 0x37b0, 0x356: 0x373e, 0x357: 0x37c2, + 0x358: 0xa000, 0x359: 0xa000, 0x35a: 0x3840, 0x35b: 0x3846, 0x35c: 0x3750, 0x35d: 0x37d4, + 0x35e: 0x3756, 0x35f: 0x37da, 0x362: 0x3762, 0x363: 0x37e6, + 0x364: 0x376e, 0x365: 0x37f2, 0x366: 0x377a, 0x367: 0x37fe, 0x368: 0xa000, 0x369: 0xa000, + 0x36a: 0x384c, 0x36b: 0x3852, 0x36c: 0x37a4, 0x36d: 0x3828, 0x36e: 0x3780, 0x36f: 0x3804, + 0x370: 0x378c, 0x371: 0x3810, 0x372: 0x3792, 0x373: 0x3816, 0x374: 0x3798, 0x375: 0x381c, + 0x378: 0x379e, 0x379: 0x3822, + // Block 0xe, offset 0x380 + 0x387: 0x1d67, + 0x391: 0x812e, + 0x392: 0x8133, 0x393: 0x8133, 0x394: 0x8133, 0x395: 0x8133, 0x396: 0x812e, 0x397: 0x8133, + 0x398: 0x8133, 0x399: 0x8133, 0x39a: 0x812f, 0x39b: 0x812e, 0x39c: 0x8133, 0x39d: 0x8133, + 0x39e: 0x8133, 0x39f: 0x8133, 0x3a0: 0x8133, 0x3a1: 0x8133, 0x3a2: 0x812e, 0x3a3: 0x812e, + 0x3a4: 0x812e, 0x3a5: 0x812e, 0x3a6: 0x812e, 0x3a7: 0x812e, 0x3a8: 0x8133, 0x3a9: 0x8133, + 0x3aa: 0x812e, 0x3ab: 0x8133, 0x3ac: 0x8133, 0x3ad: 0x812f, 0x3ae: 0x8132, 0x3af: 0x8133, + 0x3b0: 0x8106, 0x3b1: 0x8107, 0x3b2: 0x8108, 0x3b3: 0x8109, 0x3b4: 0x810a, 0x3b5: 0x810b, + 0x3b6: 0x810c, 0x3b7: 0x810d, 0x3b8: 0x810e, 0x3b9: 0x810f, 0x3ba: 0x810f, 0x3bb: 0x8110, + 0x3bc: 0x8111, 0x3bd: 0x8112, 0x3bf: 0x8113, + // Block 0xf, offset 0x3c0 + 0x3c8: 0xa000, 0x3ca: 0xa000, 0x3cb: 0x8117, + 0x3cc: 0x8118, 0x3cd: 0x8119, 0x3ce: 0x811a, 0x3cf: 0x811b, 0x3d0: 0x811c, 0x3d1: 0x811d, + 0x3d2: 0x811e, 0x3d3: 0x9933, 0x3d4: 0x9933, 0x3d5: 0x992e, 0x3d6: 0x812e, 0x3d7: 0x8133, + 0x3d8: 0x8133, 0x3d9: 0x8133, 0x3da: 0x8133, 0x3db: 0x8133, 0x3dc: 0x812e, 0x3dd: 0x8133, + 0x3de: 0x8133, 0x3df: 0x812e, + 0x3f0: 0x811f, 0x3f5: 0x1d8a, + 0x3f6: 0x2019, 0x3f7: 0x2055, 0x3f8: 0x2050, + // Block 0x10, offset 0x400 + 0x413: 0x812e, 0x414: 0x8133, 0x415: 0x8133, 0x416: 0x8133, 0x417: 0x8133, + 0x418: 0x8133, 0x419: 0x8133, 0x41a: 0x8133, 0x41b: 0x8133, 0x41c: 0x8133, 0x41d: 0x8133, + 0x41e: 0x8133, 0x41f: 0x8133, 0x420: 0x8133, 0x421: 0x8133, 0x423: 0x812e, + 0x424: 0x8133, 0x425: 0x8133, 0x426: 0x812e, 0x427: 0x8133, 0x428: 0x8133, 0x429: 0x812e, + 0x42a: 0x8133, 0x42b: 0x8133, 0x42c: 0x8133, 0x42d: 0x812e, 0x42e: 0x812e, 0x42f: 0x812e, + 0x430: 0x8117, 0x431: 0x8118, 0x432: 0x8119, 0x433: 0x8133, 0x434: 0x8133, 0x435: 0x8133, + 0x436: 0x812e, 0x437: 0x8133, 0x438: 0x8133, 0x439: 0x812e, 0x43a: 0x812e, 0x43b: 0x8133, + 0x43c: 0x8133, 0x43d: 0x8133, 0x43e: 0x8133, 0x43f: 0x8133, + // Block 0x11, offset 0x440 + 0x445: 0xa000, + 0x446: 0x2d33, 0x447: 0xa000, 0x448: 0x2d3b, 0x449: 0xa000, 0x44a: 0x2d43, 0x44b: 0xa000, + 0x44c: 0x2d4b, 0x44d: 0xa000, 0x44e: 0x2d53, 0x451: 0xa000, + 0x452: 0x2d5b, + 0x474: 0x8103, 0x475: 0x9900, + 0x47a: 0xa000, 0x47b: 0x2d63, + 0x47c: 0xa000, 0x47d: 0x2d6b, 0x47e: 0xa000, 0x47f: 0xa000, + // Block 0x12, offset 0x480 + 0x480: 0x0069, 0x481: 0x006b, 0x482: 0x006f, 0x483: 0x0083, 0x484: 0x00f5, 0x485: 0x00f8, + 0x486: 0x0416, 0x487: 0x0085, 0x488: 0x0089, 0x489: 0x008b, 0x48a: 0x0104, 0x48b: 0x0107, + 0x48c: 0x010a, 0x48d: 0x008f, 0x48f: 0x0097, 0x490: 0x009b, 0x491: 0x00e0, + 0x492: 0x009f, 0x493: 0x00fe, 0x494: 0x041a, 0x495: 0x041e, 0x496: 0x00a1, 0x497: 0x00a9, + 0x498: 0x00ab, 0x499: 0x0426, 0x49a: 0x012b, 0x49b: 0x00ad, 0x49c: 0x042a, 0x49d: 0x01c1, + 0x49e: 0x01c4, 0x49f: 0x01c7, 0x4a0: 0x01fd, 0x4a1: 0x0200, 0x4a2: 0x0093, 0x4a3: 0x00a5, + 0x4a4: 0x00ab, 0x4a5: 0x00ad, 0x4a6: 0x01c1, 0x4a7: 0x01c4, 0x4a8: 0x01ee, 0x4a9: 0x01fd, + 0x4aa: 0x0200, + 0x4b8: 0x020f, + // Block 0x13, offset 0x4c0 + 0x4db: 0x00fb, 0x4dc: 0x0087, 0x4dd: 0x0101, + 0x4de: 0x00d4, 0x4df: 0x010a, 0x4e0: 0x008d, 0x4e1: 0x010d, 0x4e2: 0x0110, 0x4e3: 0x0116, + 0x4e4: 0x011c, 0x4e5: 0x011f, 0x4e6: 0x0122, 0x4e7: 0x042e, 0x4e8: 0x016d, 0x4e9: 0x0128, + 0x4ea: 0x0432, 0x4eb: 0x0170, 0x4ec: 0x0131, 0x4ed: 0x012e, 0x4ee: 0x0134, 0x4ef: 0x0137, + 0x4f0: 0x013a, 0x4f1: 0x013d, 0x4f2: 0x0140, 0x4f3: 0x014c, 0x4f4: 0x014f, 0x4f5: 0x00ec, + 0x4f6: 0x0152, 0x4f7: 0x0155, 0x4f8: 0x0422, 0x4f9: 0x0158, 0x4fa: 0x015b, 0x4fb: 0x00b5, + 0x4fc: 0x0161, 0x4fd: 0x0164, 0x4fe: 0x0167, 0x4ff: 0x01d3, + // Block 0x14, offset 0x500 + 0x500: 0x8133, 0x501: 0x8133, 0x502: 0x812e, 0x503: 0x8133, 0x504: 0x8133, 0x505: 0x8133, + 0x506: 0x8133, 0x507: 0x8133, 0x508: 0x8133, 0x509: 0x8133, 0x50a: 0x812e, 0x50b: 0x8133, + 0x50c: 0x8133, 0x50d: 0x8136, 0x50e: 0x812b, 0x50f: 0x812e, 0x510: 0x812a, 0x511: 0x8133, + 0x512: 0x8133, 0x513: 0x8133, 0x514: 0x8133, 0x515: 0x8133, 0x516: 0x8133, 0x517: 0x8133, + 0x518: 0x8133, 0x519: 0x8133, 0x51a: 0x8133, 0x51b: 0x8133, 0x51c: 0x8133, 0x51d: 0x8133, + 0x51e: 0x8133, 0x51f: 0x8133, 0x520: 0x8133, 0x521: 0x8133, 0x522: 0x8133, 0x523: 0x8133, + 0x524: 0x8133, 0x525: 0x8133, 0x526: 0x8133, 0x527: 0x8133, 0x528: 0x8133, 0x529: 0x8133, + 0x52a: 0x8133, 0x52b: 0x8133, 0x52c: 0x8133, 0x52d: 0x8133, 0x52e: 0x8133, 0x52f: 0x8133, + 0x530: 0x8133, 0x531: 0x8133, 0x532: 0x8133, 0x533: 0x8133, 0x534: 0x8133, 0x535: 0x8133, + 0x536: 0x8134, 0x537: 0x8132, 0x538: 0x8132, 0x539: 0x812e, 0x53b: 0x8133, + 0x53c: 0x8135, 0x53d: 0x812e, 0x53e: 0x8133, 0x53f: 0x812e, + // Block 0x15, offset 0x540 + 0x540: 0x2fae, 0x541: 0x32ba, 0x542: 0x2fb8, 0x543: 0x32c4, 0x544: 0x2fbd, 0x545: 0x32c9, + 0x546: 0x2fc2, 0x547: 0x32ce, 0x548: 0x38e3, 0x549: 0x3a72, 0x54a: 0x2fdb, 0x54b: 0x32e7, + 0x54c: 0x2fe5, 0x54d: 0x32f1, 0x54e: 0x2ff4, 0x54f: 0x3300, 0x550: 0x2fea, 0x551: 0x32f6, + 0x552: 0x2fef, 0x553: 0x32fb, 0x554: 0x3906, 0x555: 0x3a95, 0x556: 0x390d, 0x557: 0x3a9c, + 0x558: 0x3030, 0x559: 0x333c, 0x55a: 0x3035, 0x55b: 0x3341, 0x55c: 0x391b, 0x55d: 0x3aaa, + 0x55e: 0x303a, 0x55f: 0x3346, 0x560: 0x3049, 0x561: 0x3355, 0x562: 0x3067, 0x563: 0x3373, + 0x564: 0x3076, 0x565: 0x3382, 0x566: 0x306c, 0x567: 0x3378, 0x568: 0x307b, 0x569: 0x3387, + 0x56a: 0x3080, 0x56b: 0x338c, 0x56c: 0x30c6, 0x56d: 0x33d2, 0x56e: 0x3922, 0x56f: 0x3ab1, + 0x570: 0x30d0, 0x571: 0x33e1, 0x572: 0x30da, 0x573: 0x33eb, 0x574: 0x30e4, 0x575: 0x33f5, + 0x576: 0x46db, 0x577: 0x476c, 0x578: 0x3929, 0x579: 0x3ab8, 0x57a: 0x30fd, 0x57b: 0x340e, + 0x57c: 0x30f8, 0x57d: 0x3409, 0x57e: 0x3102, 0x57f: 0x3413, + // Block 0x16, offset 0x580 + 0x580: 0x3107, 0x581: 0x3418, 0x582: 0x310c, 0x583: 0x341d, 0x584: 0x3120, 0x585: 0x3431, + 0x586: 0x312a, 0x587: 0x343b, 0x588: 0x3139, 0x589: 0x344a, 0x58a: 0x3134, 0x58b: 0x3445, + 0x58c: 0x394c, 0x58d: 0x3adb, 0x58e: 0x395a, 0x58f: 0x3ae9, 0x590: 0x3961, 0x591: 0x3af0, + 0x592: 0x3968, 0x593: 0x3af7, 0x594: 0x3166, 0x595: 0x3477, 0x596: 0x316b, 0x597: 0x347c, + 0x598: 0x3175, 0x599: 0x3486, 0x59a: 0x4708, 0x59b: 0x4799, 0x59c: 0x39ae, 0x59d: 0x3b3d, + 0x59e: 0x318e, 0x59f: 0x349f, 0x5a0: 0x3198, 0x5a1: 0x34a9, 0x5a2: 0x4717, 0x5a3: 0x47a8, + 0x5a4: 0x39b5, 0x5a5: 0x3b44, 0x5a6: 0x39bc, 0x5a7: 0x3b4b, 0x5a8: 0x39c3, 0x5a9: 0x3b52, + 0x5aa: 0x31a7, 0x5ab: 0x34b8, 0x5ac: 0x31b1, 0x5ad: 0x34c7, 0x5ae: 0x31c5, 0x5af: 0x34db, + 0x5b0: 0x31c0, 0x5b1: 0x34d6, 0x5b2: 0x3201, 0x5b3: 0x3517, 0x5b4: 0x3210, 0x5b5: 0x3526, + 0x5b6: 0x320b, 0x5b7: 0x3521, 0x5b8: 0x39ca, 0x5b9: 0x3b59, 0x5ba: 0x39d1, 0x5bb: 0x3b60, + 0x5bc: 0x3215, 0x5bd: 0x352b, 0x5be: 0x321a, 0x5bf: 0x3530, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x321f, 0x5c1: 0x3535, 0x5c2: 0x3224, 0x5c3: 0x353a, 0x5c4: 0x3233, 0x5c5: 0x3549, + 0x5c6: 0x322e, 0x5c7: 0x3544, 0x5c8: 0x3238, 0x5c9: 0x3553, 0x5ca: 0x323d, 0x5cb: 0x3558, + 0x5cc: 0x3242, 0x5cd: 0x355d, 0x5ce: 0x3260, 0x5cf: 0x357b, 0x5d0: 0x3279, 0x5d1: 0x3599, + 0x5d2: 0x3288, 0x5d3: 0x35a8, 0x5d4: 0x328d, 0x5d5: 0x35ad, 0x5d6: 0x3391, 0x5d7: 0x34bd, + 0x5d8: 0x354e, 0x5d9: 0x358a, 0x5da: 0x1be6, 0x5db: 0x42ee, + 0x5e0: 0x46b8, 0x5e1: 0x4749, 0x5e2: 0x2f9a, 0x5e3: 0x32a6, + 0x5e4: 0x388f, 0x5e5: 0x3a1e, 0x5e6: 0x3888, 0x5e7: 0x3a17, 0x5e8: 0x389d, 0x5e9: 0x3a2c, + 0x5ea: 0x3896, 0x5eb: 0x3a25, 0x5ec: 0x38d5, 0x5ed: 0x3a64, 0x5ee: 0x38ab, 0x5ef: 0x3a3a, + 0x5f0: 0x38a4, 0x5f1: 0x3a33, 0x5f2: 0x38b9, 0x5f3: 0x3a48, 0x5f4: 0x38b2, 0x5f5: 0x3a41, + 0x5f6: 0x38dc, 0x5f7: 0x3a6b, 0x5f8: 0x46cc, 0x5f9: 0x475d, 0x5fa: 0x3017, 0x5fb: 0x3323, + 0x5fc: 0x3003, 0x5fd: 0x330f, 0x5fe: 0x38f1, 0x5ff: 0x3a80, + // Block 0x18, offset 0x600 + 0x600: 0x38ea, 0x601: 0x3a79, 0x602: 0x38ff, 0x603: 0x3a8e, 0x604: 0x38f8, 0x605: 0x3a87, + 0x606: 0x3914, 0x607: 0x3aa3, 0x608: 0x30a8, 0x609: 0x33b4, 0x60a: 0x30bc, 0x60b: 0x33c8, + 0x60c: 0x46fe, 0x60d: 0x478f, 0x60e: 0x314d, 0x60f: 0x345e, 0x610: 0x3937, 0x611: 0x3ac6, + 0x612: 0x3930, 0x613: 0x3abf, 0x614: 0x3945, 0x615: 0x3ad4, 0x616: 0x393e, 0x617: 0x3acd, + 0x618: 0x39a0, 0x619: 0x3b2f, 0x61a: 0x3984, 0x61b: 0x3b13, 0x61c: 0x397d, 0x61d: 0x3b0c, + 0x61e: 0x3992, 0x61f: 0x3b21, 0x620: 0x398b, 0x621: 0x3b1a, 0x622: 0x3999, 0x623: 0x3b28, + 0x624: 0x31fc, 0x625: 0x3512, 0x626: 0x31de, 0x627: 0x34f4, 0x628: 0x39fb, 0x629: 0x3b8a, + 0x62a: 0x39f4, 0x62b: 0x3b83, 0x62c: 0x3a09, 0x62d: 0x3b98, 0x62e: 0x3a02, 0x62f: 0x3b91, + 0x630: 0x3a10, 0x631: 0x3b9f, 0x632: 0x3247, 0x633: 0x3562, 0x634: 0x326f, 0x635: 0x358f, + 0x636: 0x326a, 0x637: 0x3585, 0x638: 0x3256, 0x639: 0x3571, + // Block 0x19, offset 0x640 + 0x640: 0x481b, 0x641: 0x4821, 0x642: 0x4935, 0x643: 0x494d, 0x644: 0x493d, 0x645: 0x4955, + 0x646: 0x4945, 0x647: 0x495d, 0x648: 0x47c1, 0x649: 0x47c7, 0x64a: 0x48a5, 0x64b: 0x48bd, + 0x64c: 0x48ad, 0x64d: 0x48c5, 0x64e: 0x48b5, 0x64f: 0x48cd, 0x650: 0x482d, 0x651: 0x4833, + 0x652: 0x3dcf, 0x653: 0x3ddf, 0x654: 0x3dd7, 0x655: 0x3de7, + 0x658: 0x47cd, 0x659: 0x47d3, 0x65a: 0x3cff, 0x65b: 0x3d0f, 0x65c: 0x3d07, 0x65d: 0x3d17, + 0x660: 0x4845, 0x661: 0x484b, 0x662: 0x4965, 0x663: 0x497d, + 0x664: 0x496d, 0x665: 0x4985, 0x666: 0x4975, 0x667: 0x498d, 0x668: 0x47d9, 0x669: 0x47df, + 0x66a: 0x48d5, 0x66b: 0x48ed, 0x66c: 0x48dd, 0x66d: 0x48f5, 0x66e: 0x48e5, 0x66f: 0x48fd, + 0x670: 0x485d, 0x671: 0x4863, 0x672: 0x3e2f, 0x673: 0x3e47, 0x674: 0x3e37, 0x675: 0x3e4f, + 0x676: 0x3e3f, 0x677: 0x3e57, 0x678: 0x47e5, 0x679: 0x47eb, 0x67a: 0x3d2f, 0x67b: 0x3d47, + 0x67c: 0x3d37, 0x67d: 0x3d4f, 0x67e: 0x3d3f, 0x67f: 0x3d57, + // Block 0x1a, offset 0x680 + 0x680: 0x4869, 0x681: 0x486f, 0x682: 0x3e5f, 0x683: 0x3e6f, 0x684: 0x3e67, 0x685: 0x3e77, + 0x688: 0x47f1, 0x689: 0x47f7, 0x68a: 0x3d5f, 0x68b: 0x3d6f, + 0x68c: 0x3d67, 0x68d: 0x3d77, 0x690: 0x487b, 0x691: 0x4881, + 0x692: 0x3e97, 0x693: 0x3eaf, 0x694: 0x3e9f, 0x695: 0x3eb7, 0x696: 0x3ea7, 0x697: 0x3ebf, + 0x699: 0x47fd, 0x69b: 0x3d7f, 0x69d: 0x3d87, + 0x69f: 0x3d8f, 0x6a0: 0x4893, 0x6a1: 0x4899, 0x6a2: 0x4995, 0x6a3: 0x49ad, + 0x6a4: 0x499d, 0x6a5: 0x49b5, 0x6a6: 0x49a5, 0x6a7: 0x49bd, 0x6a8: 0x4803, 0x6a9: 0x4809, + 0x6aa: 0x4905, 0x6ab: 0x491d, 0x6ac: 0x490d, 0x6ad: 0x4925, 0x6ae: 0x4915, 0x6af: 0x492d, + 0x6b0: 0x480f, 0x6b1: 0x4335, 0x6b2: 0x36a8, 0x6b3: 0x433b, 0x6b4: 0x4839, 0x6b5: 0x4341, + 0x6b6: 0x36ba, 0x6b7: 0x4347, 0x6b8: 0x36d8, 0x6b9: 0x434d, 0x6ba: 0x36f0, 0x6bb: 0x4353, + 0x6bc: 0x4887, 0x6bd: 0x4359, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3db7, 0x6c1: 0x3dbf, 0x6c2: 0x419b, 0x6c3: 0x41b9, 0x6c4: 0x41a5, 0x6c5: 0x41c3, + 0x6c6: 0x41af, 0x6c7: 0x41cd, 0x6c8: 0x3cef, 0x6c9: 0x3cf7, 0x6ca: 0x40e7, 0x6cb: 0x4105, + 0x6cc: 0x40f1, 0x6cd: 0x410f, 0x6ce: 0x40fb, 0x6cf: 0x4119, 0x6d0: 0x3dff, 0x6d1: 0x3e07, + 0x6d2: 0x41d7, 0x6d3: 0x41f5, 0x6d4: 0x41e1, 0x6d5: 0x41ff, 0x6d6: 0x41eb, 0x6d7: 0x4209, + 0x6d8: 0x3d1f, 0x6d9: 0x3d27, 0x6da: 0x4123, 0x6db: 0x4141, 0x6dc: 0x412d, 0x6dd: 0x414b, + 0x6de: 0x4137, 0x6df: 0x4155, 0x6e0: 0x3ed7, 0x6e1: 0x3edf, 0x6e2: 0x4213, 0x6e3: 0x4231, + 0x6e4: 0x421d, 0x6e5: 0x423b, 0x6e6: 0x4227, 0x6e7: 0x4245, 0x6e8: 0x3d97, 0x6e9: 0x3d9f, + 0x6ea: 0x415f, 0x6eb: 0x417d, 0x6ec: 0x4169, 0x6ed: 0x4187, 0x6ee: 0x4173, 0x6ef: 0x4191, + 0x6f0: 0x369c, 0x6f1: 0x3696, 0x6f2: 0x3da7, 0x6f3: 0x36a2, 0x6f4: 0x3daf, + 0x6f6: 0x4827, 0x6f7: 0x3dc7, 0x6f8: 0x360c, 0x6f9: 0x3606, 0x6fa: 0x35fa, 0x6fb: 0x4305, + 0x6fc: 0x3612, 0x6fd: 0x429e, 0x6fe: 0x01d6, 0x6ff: 0x429e, + // Block 0x1c, offset 0x700 + 0x700: 0x42b7, 0x701: 0x4499, 0x702: 0x3def, 0x703: 0x36b4, 0x704: 0x3df7, + 0x706: 0x4851, 0x707: 0x3e0f, 0x708: 0x3618, 0x709: 0x430b, 0x70a: 0x3624, 0x70b: 0x4311, + 0x70c: 0x3630, 0x70d: 0x44a0, 0x70e: 0x44a7, 0x70f: 0x44ae, 0x710: 0x36cc, 0x711: 0x36c6, + 0x712: 0x3e17, 0x713: 0x44fb, 0x716: 0x36d2, 0x717: 0x3e27, + 0x718: 0x3648, 0x719: 0x3642, 0x71a: 0x3636, 0x71b: 0x4317, 0x71d: 0x44b5, + 0x71e: 0x44bc, 0x71f: 0x44c3, 0x720: 0x3702, 0x721: 0x36fc, 0x722: 0x3e7f, 0x723: 0x4503, + 0x724: 0x36e4, 0x725: 0x36ea, 0x726: 0x3708, 0x727: 0x3e8f, 0x728: 0x3678, 0x729: 0x3672, + 0x72a: 0x3666, 0x72b: 0x4323, 0x72c: 0x3660, 0x72d: 0x448b, 0x72e: 0x4492, 0x72f: 0x0081, + 0x732: 0x3ec7, 0x733: 0x370e, 0x734: 0x3ecf, + 0x736: 0x489f, 0x737: 0x3ee7, 0x738: 0x3654, 0x739: 0x431d, 0x73a: 0x3684, 0x73b: 0x432f, + 0x73c: 0x3690, 0x73d: 0x4271, 0x73e: 0x42a3, + // Block 0x1d, offset 0x740 + 0x740: 0x1bde, 0x741: 0x1be2, 0x742: 0x0047, 0x743: 0x1c5a, 0x745: 0x1bee, + 0x746: 0x1bf2, 0x747: 0x00e9, 0x749: 0x1c5e, 0x74a: 0x008f, 0x74b: 0x0051, + 0x74c: 0x0051, 0x74d: 0x0051, 0x74e: 0x0091, 0x74f: 0x00da, 0x750: 0x0053, 0x751: 0x0053, + 0x752: 0x0059, 0x753: 0x0099, 0x755: 0x005d, 0x756: 0x1993, + 0x759: 0x0061, 0x75a: 0x0063, 0x75b: 0x0065, 0x75c: 0x0065, 0x75d: 0x0065, + 0x760: 0x19a5, 0x761: 0x1bce, 0x762: 0x19ae, + 0x764: 0x0075, 0x766: 0x01bb, 0x768: 0x0075, + 0x76a: 0x0057, 0x76b: 0x42e9, 0x76c: 0x0045, 0x76d: 0x0047, 0x76f: 0x008b, + 0x770: 0x004b, 0x771: 0x004d, 0x773: 0x005b, 0x774: 0x009f, 0x775: 0x0218, + 0x776: 0x021b, 0x777: 0x021e, 0x778: 0x0221, 0x779: 0x0093, 0x77b: 0x1b9e, + 0x77c: 0x01eb, 0x77d: 0x01c4, 0x77e: 0x017c, 0x77f: 0x01a3, + // Block 0x1e, offset 0x780 + 0x780: 0x0466, 0x785: 0x0049, + 0x786: 0x0089, 0x787: 0x008b, 0x788: 0x0093, 0x789: 0x0095, + 0x790: 0x2234, 0x791: 0x2240, + 0x792: 0x22f4, 0x793: 0x221c, 0x794: 0x22a0, 0x795: 0x2228, 0x796: 0x22a6, 0x797: 0x22be, + 0x798: 0x22ca, 0x799: 0x222e, 0x79a: 0x22d0, 0x79b: 0x223a, 0x79c: 0x22c4, 0x79d: 0x22d6, + 0x79e: 0x22dc, 0x79f: 0x1cc2, 0x7a0: 0x0053, 0x7a1: 0x195d, 0x7a2: 0x1baa, 0x7a3: 0x1966, + 0x7a4: 0x006d, 0x7a5: 0x19b1, 0x7a6: 0x1bd6, 0x7a7: 0x1d4e, 0x7a8: 0x1969, 0x7a9: 0x0071, + 0x7aa: 0x19bd, 0x7ab: 0x1bda, 0x7ac: 0x0059, 0x7ad: 0x0047, 0x7ae: 0x0049, 0x7af: 0x005b, + 0x7b0: 0x0093, 0x7b1: 0x19ea, 0x7b2: 0x1c1e, 0x7b3: 0x19f3, 0x7b4: 0x00ad, 0x7b5: 0x1a68, + 0x7b6: 0x1c52, 0x7b7: 0x1d62, 0x7b8: 0x19f6, 0x7b9: 0x00b1, 0x7ba: 0x1a6b, 0x7bb: 0x1c56, + 0x7bc: 0x0099, 0x7bd: 0x0087, 0x7be: 0x0089, 0x7bf: 0x009b, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x3c1d, 0x7c3: 0xa000, 0x7c4: 0x3c24, 0x7c5: 0xa000, + 0x7c7: 0x3c2b, 0x7c8: 0xa000, 0x7c9: 0x3c32, + 0x7cd: 0xa000, + 0x7e0: 0x2f7c, 0x7e1: 0xa000, 0x7e2: 0x3c40, + 0x7e4: 0xa000, 0x7e5: 0xa000, + 0x7ed: 0x3c39, 0x7ee: 0x2f77, 0x7ef: 0x2f81, + 0x7f0: 0x3c47, 0x7f1: 0x3c4e, 0x7f2: 0xa000, 0x7f3: 0xa000, 0x7f4: 0x3c55, 0x7f5: 0x3c5c, + 0x7f6: 0xa000, 0x7f7: 0xa000, 0x7f8: 0x3c63, 0x7f9: 0x3c6a, 0x7fa: 0xa000, 0x7fb: 0xa000, + 0x7fc: 0xa000, 0x7fd: 0xa000, + // Block 0x20, offset 0x800 + 0x800: 0x3c71, 0x801: 0x3c78, 0x802: 0xa000, 0x803: 0xa000, 0x804: 0x3c8d, 0x805: 0x3c94, + 0x806: 0xa000, 0x807: 0xa000, 0x808: 0x3c9b, 0x809: 0x3ca2, + 0x811: 0xa000, + 0x812: 0xa000, + 0x822: 0xa000, + 0x828: 0xa000, 0x829: 0xa000, + 0x82b: 0xa000, 0x82c: 0x3cb7, 0x82d: 0x3cbe, 0x82e: 0x3cc5, 0x82f: 0x3ccc, + 0x832: 0xa000, 0x833: 0xa000, 0x834: 0xa000, 0x835: 0xa000, + // Block 0x21, offset 0x840 + 0x860: 0x0023, 0x861: 0x0025, 0x862: 0x0027, 0x863: 0x0029, + 0x864: 0x002b, 0x865: 0x002d, 0x866: 0x002f, 0x867: 0x0031, 0x868: 0x0033, 0x869: 0x1885, + 0x86a: 0x1888, 0x86b: 0x188b, 0x86c: 0x188e, 0x86d: 0x1891, 0x86e: 0x1894, 0x86f: 0x1897, + 0x870: 0x189a, 0x871: 0x189d, 0x872: 0x18a0, 0x873: 0x18a9, 0x874: 0x1a6e, 0x875: 0x1a72, + 0x876: 0x1a76, 0x877: 0x1a7a, 0x878: 0x1a7e, 0x879: 0x1a82, 0x87a: 0x1a86, 0x87b: 0x1a8a, + 0x87c: 0x1a8e, 0x87d: 0x1c86, 0x87e: 0x1c8b, 0x87f: 0x1c90, + // Block 0x22, offset 0x880 + 0x880: 0x1c95, 0x881: 0x1c9a, 0x882: 0x1c9f, 0x883: 0x1ca4, 0x884: 0x1ca9, 0x885: 0x1cae, + 0x886: 0x1cb3, 0x887: 0x1cb8, 0x888: 0x1882, 0x889: 0x18a6, 0x88a: 0x18ca, 0x88b: 0x18ee, + 0x88c: 0x1912, 0x88d: 0x191b, 0x88e: 0x1921, 0x88f: 0x1927, 0x890: 0x192d, 0x891: 0x1b66, + 0x892: 0x1b6a, 0x893: 0x1b6e, 0x894: 0x1b72, 0x895: 0x1b76, 0x896: 0x1b7a, 0x897: 0x1b7e, + 0x898: 0x1b82, 0x899: 0x1b86, 0x89a: 0x1b8a, 0x89b: 0x1b8e, 0x89c: 0x1afa, 0x89d: 0x1afe, + 0x89e: 0x1b02, 0x89f: 0x1b06, 0x8a0: 0x1b0a, 0x8a1: 0x1b0e, 0x8a2: 0x1b12, 0x8a3: 0x1b16, + 0x8a4: 0x1b1a, 0x8a5: 0x1b1e, 0x8a6: 0x1b22, 0x8a7: 0x1b26, 0x8a8: 0x1b2a, 0x8a9: 0x1b2e, + 0x8aa: 0x1b32, 0x8ab: 0x1b36, 0x8ac: 0x1b3a, 0x8ad: 0x1b3e, 0x8ae: 0x1b42, 0x8af: 0x1b46, + 0x8b0: 0x1b4a, 0x8b1: 0x1b4e, 0x8b2: 0x1b52, 0x8b3: 0x1b56, 0x8b4: 0x1b5a, 0x8b5: 0x1b5e, + 0x8b6: 0x0043, 0x8b7: 0x0045, 0x8b8: 0x0047, 0x8b9: 0x0049, 0x8ba: 0x004b, 0x8bb: 0x004d, + 0x8bc: 0x004f, 0x8bd: 0x0051, 0x8be: 0x0053, 0x8bf: 0x0055, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x06c2, 0x8c1: 0x06e6, 0x8c2: 0x06f2, 0x8c3: 0x0702, 0x8c4: 0x070a, 0x8c5: 0x0716, + 0x8c6: 0x071e, 0x8c7: 0x0726, 0x8c8: 0x0732, 0x8c9: 0x0786, 0x8ca: 0x079e, 0x8cb: 0x07ae, + 0x8cc: 0x07be, 0x8cd: 0x07ce, 0x8ce: 0x07de, 0x8cf: 0x07fe, 0x8d0: 0x0802, 0x8d1: 0x0806, + 0x8d2: 0x083a, 0x8d3: 0x0862, 0x8d4: 0x0872, 0x8d5: 0x087a, 0x8d6: 0x087e, 0x8d7: 0x088a, + 0x8d8: 0x08a6, 0x8d9: 0x08aa, 0x8da: 0x08c2, 0x8db: 0x08c6, 0x8dc: 0x08ce, 0x8dd: 0x08de, + 0x8de: 0x097a, 0x8df: 0x098e, 0x8e0: 0x09ce, 0x8e1: 0x09e2, 0x8e2: 0x09ea, 0x8e3: 0x09ee, + 0x8e4: 0x09fe, 0x8e5: 0x0a1a, 0x8e6: 0x0a46, 0x8e7: 0x0a52, 0x8e8: 0x0a72, 0x8e9: 0x0a7e, + 0x8ea: 0x0a82, 0x8eb: 0x0a86, 0x8ec: 0x0a9e, 0x8ed: 0x0aa2, 0x8ee: 0x0ace, 0x8ef: 0x0ada, + 0x8f0: 0x0ae2, 0x8f1: 0x0aea, 0x8f2: 0x0afa, 0x8f3: 0x0b02, 0x8f4: 0x0b0a, 0x8f5: 0x0b36, + 0x8f6: 0x0b3a, 0x8f7: 0x0b42, 0x8f8: 0x0b46, 0x8f9: 0x0b4e, 0x8fa: 0x0b56, 0x8fb: 0x0b66, + 0x8fc: 0x0b82, 0x8fd: 0x0bfa, 0x8fe: 0x0c0e, 0x8ff: 0x0c12, + // Block 0x24, offset 0x900 + 0x900: 0x0c92, 0x901: 0x0c96, 0x902: 0x0caa, 0x903: 0x0cae, 0x904: 0x0cb6, 0x905: 0x0cbe, + 0x906: 0x0cc6, 0x907: 0x0cd2, 0x908: 0x0cfa, 0x909: 0x0d0a, 0x90a: 0x0d1e, 0x90b: 0x0d8e, + 0x90c: 0x0d9a, 0x90d: 0x0daa, 0x90e: 0x0db6, 0x90f: 0x0dc2, 0x910: 0x0dca, 0x911: 0x0dce, + 0x912: 0x0dd2, 0x913: 0x0dd6, 0x914: 0x0dda, 0x915: 0x0e92, 0x916: 0x0eda, 0x917: 0x0ee6, + 0x918: 0x0eea, 0x919: 0x0eee, 0x91a: 0x0ef2, 0x91b: 0x0efa, 0x91c: 0x0efe, 0x91d: 0x0f12, + 0x91e: 0x0f2e, 0x91f: 0x0f36, 0x920: 0x0f76, 0x921: 0x0f7a, 0x922: 0x0f82, 0x923: 0x0f86, + 0x924: 0x0f8e, 0x925: 0x0f92, 0x926: 0x0fb6, 0x927: 0x0fba, 0x928: 0x0fd6, 0x929: 0x0fda, + 0x92a: 0x0fde, 0x92b: 0x0fe2, 0x92c: 0x0ff6, 0x92d: 0x101a, 0x92e: 0x101e, 0x92f: 0x1022, + 0x930: 0x1046, 0x931: 0x1086, 0x932: 0x108a, 0x933: 0x10aa, 0x934: 0x10ba, 0x935: 0x10c2, + 0x936: 0x10e2, 0x937: 0x1106, 0x938: 0x114a, 0x939: 0x1152, 0x93a: 0x1166, 0x93b: 0x1172, + 0x93c: 0x117a, 0x93d: 0x1182, 0x93e: 0x1186, 0x93f: 0x118a, + // Block 0x25, offset 0x940 + 0x940: 0x11a2, 0x941: 0x11a6, 0x942: 0x11c2, 0x943: 0x11ca, 0x944: 0x11d2, 0x945: 0x11d6, + 0x946: 0x11e2, 0x947: 0x11ea, 0x948: 0x11ee, 0x949: 0x11f2, 0x94a: 0x11fa, 0x94b: 0x11fe, + 0x94c: 0x129e, 0x94d: 0x12b2, 0x94e: 0x12e6, 0x94f: 0x12ea, 0x950: 0x12f2, 0x951: 0x131e, + 0x952: 0x1326, 0x953: 0x132e, 0x954: 0x1336, 0x955: 0x1372, 0x956: 0x1376, 0x957: 0x137e, + 0x958: 0x1382, 0x959: 0x1386, 0x95a: 0x13b2, 0x95b: 0x13b6, 0x95c: 0x13be, 0x95d: 0x13d2, + 0x95e: 0x13d6, 0x95f: 0x13f2, 0x960: 0x13fa, 0x961: 0x13fe, 0x962: 0x1422, 0x963: 0x1442, + 0x964: 0x1456, 0x965: 0x145a, 0x966: 0x1462, 0x967: 0x148e, 0x968: 0x1492, 0x969: 0x14a2, + 0x96a: 0x14c6, 0x96b: 0x14d2, 0x96c: 0x14e2, 0x96d: 0x14fa, 0x96e: 0x1502, 0x96f: 0x1506, + 0x970: 0x150a, 0x971: 0x150e, 0x972: 0x151a, 0x973: 0x151e, 0x974: 0x1526, 0x975: 0x1542, + 0x976: 0x1546, 0x977: 0x154a, 0x978: 0x1562, 0x979: 0x1566, 0x97a: 0x156e, 0x97b: 0x1582, + 0x97c: 0x1586, 0x97d: 0x158a, 0x97e: 0x1592, 0x97f: 0x1596, + // Block 0x26, offset 0x980 + 0x986: 0xa000, 0x98b: 0xa000, + 0x98c: 0x3f1f, 0x98d: 0xa000, 0x98e: 0x3f27, 0x98f: 0xa000, 0x990: 0x3f2f, 0x991: 0xa000, + 0x992: 0x3f37, 0x993: 0xa000, 0x994: 0x3f3f, 0x995: 0xa000, 0x996: 0x3f47, 0x997: 0xa000, + 0x998: 0x3f4f, 0x999: 0xa000, 0x99a: 0x3f57, 0x99b: 0xa000, 0x99c: 0x3f5f, 0x99d: 0xa000, + 0x99e: 0x3f67, 0x99f: 0xa000, 0x9a0: 0x3f6f, 0x9a1: 0xa000, 0x9a2: 0x3f77, + 0x9a4: 0xa000, 0x9a5: 0x3f7f, 0x9a6: 0xa000, 0x9a7: 0x3f87, 0x9a8: 0xa000, 0x9a9: 0x3f8f, + 0x9af: 0xa000, + 0x9b0: 0x3f97, 0x9b1: 0x3f9f, 0x9b2: 0xa000, 0x9b3: 0x3fa7, 0x9b4: 0x3faf, 0x9b5: 0xa000, + 0x9b6: 0x3fb7, 0x9b7: 0x3fbf, 0x9b8: 0xa000, 0x9b9: 0x3fc7, 0x9ba: 0x3fcf, 0x9bb: 0xa000, + 0x9bc: 0x3fd7, 0x9bd: 0x3fdf, + // Block 0x27, offset 0x9c0 + 0x9d4: 0x3f17, + 0x9d9: 0x9904, 0x9da: 0x9904, 0x9db: 0x42f3, 0x9dc: 0x42f9, 0x9dd: 0xa000, + 0x9de: 0x3fe7, 0x9df: 0x26ba, + 0x9e6: 0xa000, + 0x9eb: 0xa000, 0x9ec: 0x3ff7, 0x9ed: 0xa000, 0x9ee: 0x3fff, 0x9ef: 0xa000, + 0x9f0: 0x4007, 0x9f1: 0xa000, 0x9f2: 0x400f, 0x9f3: 0xa000, 0x9f4: 0x4017, 0x9f5: 0xa000, + 0x9f6: 0x401f, 0x9f7: 0xa000, 0x9f8: 0x4027, 0x9f9: 0xa000, 0x9fa: 0x402f, 0x9fb: 0xa000, + 0x9fc: 0x4037, 0x9fd: 0xa000, 0x9fe: 0x403f, 0x9ff: 0xa000, + // Block 0x28, offset 0xa00 + 0xa00: 0x4047, 0xa01: 0xa000, 0xa02: 0x404f, 0xa04: 0xa000, 0xa05: 0x4057, + 0xa06: 0xa000, 0xa07: 0x405f, 0xa08: 0xa000, 0xa09: 0x4067, + 0xa0f: 0xa000, 0xa10: 0x406f, 0xa11: 0x4077, + 0xa12: 0xa000, 0xa13: 0x407f, 0xa14: 0x4087, 0xa15: 0xa000, 0xa16: 0x408f, 0xa17: 0x4097, + 0xa18: 0xa000, 0xa19: 0x409f, 0xa1a: 0x40a7, 0xa1b: 0xa000, 0xa1c: 0x40af, 0xa1d: 0x40b7, + 0xa2f: 0xa000, + 0xa30: 0xa000, 0xa31: 0xa000, 0xa32: 0xa000, 0xa34: 0x3fef, + 0xa37: 0x40bf, 0xa38: 0x40c7, 0xa39: 0x40cf, 0xa3a: 0x40d7, + 0xa3d: 0xa000, 0xa3e: 0x40df, 0xa3f: 0x26cf, + // Block 0x29, offset 0xa40 + 0xa40: 0x036a, 0xa41: 0x032e, 0xa42: 0x0332, 0xa43: 0x0336, 0xa44: 0x037e, 0xa45: 0x033a, + 0xa46: 0x033e, 0xa47: 0x0342, 0xa48: 0x0346, 0xa49: 0x034a, 0xa4a: 0x034e, 0xa4b: 0x0352, + 0xa4c: 0x0356, 0xa4d: 0x035a, 0xa4e: 0x035e, 0xa4f: 0x49d4, 0xa50: 0x49da, 0xa51: 0x49e0, + 0xa52: 0x49e6, 0xa53: 0x49ec, 0xa54: 0x49f2, 0xa55: 0x49f8, 0xa56: 0x49fe, 0xa57: 0x4a04, + 0xa58: 0x4a0a, 0xa59: 0x4a10, 0xa5a: 0x4a16, 0xa5b: 0x4a1c, 0xa5c: 0x4a22, 0xa5d: 0x4a28, + 0xa5e: 0x4a2e, 0xa5f: 0x4a34, 0xa60: 0x4a3a, 0xa61: 0x4a40, 0xa62: 0x4a46, 0xa63: 0x4a4c, + 0xa64: 0x03c6, 0xa65: 0x0362, 0xa66: 0x0366, 0xa67: 0x03ea, 0xa68: 0x03ee, 0xa69: 0x03f2, + 0xa6a: 0x03f6, 0xa6b: 0x03fa, 0xa6c: 0x03fe, 0xa6d: 0x0402, 0xa6e: 0x036e, 0xa6f: 0x0406, + 0xa70: 0x040a, 0xa71: 0x0372, 0xa72: 0x0376, 0xa73: 0x037a, 0xa74: 0x0382, 0xa75: 0x0386, + 0xa76: 0x038a, 0xa77: 0x038e, 0xa78: 0x0392, 0xa79: 0x0396, 0xa7a: 0x039a, 0xa7b: 0x039e, + 0xa7c: 0x03a2, 0xa7d: 0x03a6, 0xa7e: 0x03aa, 0xa7f: 0x03ae, + // Block 0x2a, offset 0xa80 + 0xa80: 0x03b2, 0xa81: 0x03b6, 0xa82: 0x040e, 0xa83: 0x0412, 0xa84: 0x03ba, 0xa85: 0x03be, + 0xa86: 0x03c2, 0xa87: 0x03ca, 0xa88: 0x03ce, 0xa89: 0x03d2, 0xa8a: 0x03d6, 0xa8b: 0x03da, + 0xa8c: 0x03de, 0xa8d: 0x03e2, 0xa8e: 0x03e6, + 0xa92: 0x06c2, 0xa93: 0x071e, 0xa94: 0x06ce, 0xa95: 0x097e, 0xa96: 0x06d2, 0xa97: 0x06ea, + 0xa98: 0x06d6, 0xa99: 0x0f96, 0xa9a: 0x070a, 0xa9b: 0x06de, 0xa9c: 0x06c6, 0xa9d: 0x0a02, + 0xa9e: 0x0992, 0xa9f: 0x0732, + // Block 0x2b, offset 0xac0 + 0xac0: 0x205a, 0xac1: 0x2060, 0xac2: 0x2066, 0xac3: 0x206c, 0xac4: 0x2072, 0xac5: 0x2078, + 0xac6: 0x207e, 0xac7: 0x2084, 0xac8: 0x208a, 0xac9: 0x2090, 0xaca: 0x2096, 0xacb: 0x209c, + 0xacc: 0x20a2, 0xacd: 0x20a8, 0xace: 0x2733, 0xacf: 0x273c, 0xad0: 0x2745, 0xad1: 0x274e, + 0xad2: 0x2757, 0xad3: 0x2760, 0xad4: 0x2769, 0xad5: 0x2772, 0xad6: 0x277b, 0xad7: 0x278d, + 0xad8: 0x2796, 0xad9: 0x279f, 0xada: 0x27a8, 0xadb: 0x27b1, 0xadc: 0x2784, 0xadd: 0x2bb9, + 0xade: 0x2afa, 0xae0: 0x20ae, 0xae1: 0x20c6, 0xae2: 0x20ba, 0xae3: 0x210e, + 0xae4: 0x20cc, 0xae5: 0x20ea, 0xae6: 0x20b4, 0xae7: 0x20e4, 0xae8: 0x20c0, 0xae9: 0x20f6, + 0xaea: 0x2126, 0xaeb: 0x2144, 0xaec: 0x213e, 0xaed: 0x2132, 0xaee: 0x2180, 0xaef: 0x2114, + 0xaf0: 0x2120, 0xaf1: 0x2138, 0xaf2: 0x212c, 0xaf3: 0x2156, 0xaf4: 0x2102, 0xaf5: 0x214a, + 0xaf6: 0x2174, 0xaf7: 0x215c, 0xaf8: 0x20f0, 0xaf9: 0x20d2, 0xafa: 0x2108, 0xafb: 0x211a, + 0xafc: 0x2150, 0xafd: 0x20d8, 0xafe: 0x217a, 0xaff: 0x20fc, + // Block 0x2c, offset 0xb00 + 0xb00: 0x2162, 0xb01: 0x20de, 0xb02: 0x2168, 0xb03: 0x216e, 0xb04: 0x0932, 0xb05: 0x0b06, + 0xb06: 0x0caa, 0xb07: 0x10ca, + 0xb10: 0x1bca, 0xb11: 0x18ac, + 0xb12: 0x18af, 0xb13: 0x18b2, 0xb14: 0x18b5, 0xb15: 0x18b8, 0xb16: 0x18bb, 0xb17: 0x18be, + 0xb18: 0x18c1, 0xb19: 0x18c4, 0xb1a: 0x18cd, 0xb1b: 0x18d0, 0xb1c: 0x18d3, 0xb1d: 0x18d6, + 0xb1e: 0x18d9, 0xb1f: 0x18dc, 0xb20: 0x0316, 0xb21: 0x031e, 0xb22: 0x0322, 0xb23: 0x032a, + 0xb24: 0x032e, 0xb25: 0x0332, 0xb26: 0x033a, 0xb27: 0x0342, 0xb28: 0x0346, 0xb29: 0x034e, + 0xb2a: 0x0352, 0xb2b: 0x0356, 0xb2c: 0x035a, 0xb2d: 0x035e, 0xb2e: 0x2e2f, 0xb2f: 0x2e37, + 0xb30: 0x2e3f, 0xb31: 0x2e47, 0xb32: 0x2e4f, 0xb33: 0x2e57, 0xb34: 0x2e5f, 0xb35: 0x2e67, + 0xb36: 0x2e77, 0xb37: 0x2e7f, 0xb38: 0x2e87, 0xb39: 0x2e8f, 0xb3a: 0x2e97, 0xb3b: 0x2e9f, + 0xb3c: 0x2eea, 0xb3d: 0x2eb2, 0xb3e: 0x2e6f, + // Block 0x2d, offset 0xb40 + 0xb40: 0x06c2, 0xb41: 0x071e, 0xb42: 0x06ce, 0xb43: 0x097e, 0xb44: 0x0722, 0xb45: 0x07b2, + 0xb46: 0x06ca, 0xb47: 0x07ae, 0xb48: 0x070e, 0xb49: 0x088a, 0xb4a: 0x0d0a, 0xb4b: 0x0e92, + 0xb4c: 0x0dda, 0xb4d: 0x0d1e, 0xb4e: 0x1462, 0xb4f: 0x098e, 0xb50: 0x0cd2, 0xb51: 0x0d4e, + 0xb52: 0x0d0e, 0xb53: 0x104e, 0xb54: 0x08fe, 0xb55: 0x0f06, 0xb56: 0x138a, 0xb57: 0x1062, + 0xb58: 0x0846, 0xb59: 0x1092, 0xb5a: 0x0f9e, 0xb5b: 0x0a1a, 0xb5c: 0x1412, 0xb5d: 0x0782, + 0xb5e: 0x08ae, 0xb5f: 0x0dfa, 0xb60: 0x152a, 0xb61: 0x0746, 0xb62: 0x07d6, 0xb63: 0x0d9e, + 0xb64: 0x06d2, 0xb65: 0x06ea, 0xb66: 0x06d6, 0xb67: 0x0ade, 0xb68: 0x08f2, 0xb69: 0x0882, + 0xb6a: 0x0a5a, 0xb6b: 0x0a4e, 0xb6c: 0x0fee, 0xb6d: 0x0742, 0xb6e: 0x139e, 0xb6f: 0x089e, + 0xb70: 0x09f6, 0xb71: 0x18df, 0xb72: 0x18e2, 0xb73: 0x18e5, 0xb74: 0x18e8, 0xb75: 0x18f1, + 0xb76: 0x18f4, 0xb77: 0x18f7, 0xb78: 0x18fa, 0xb79: 0x18fd, 0xb7a: 0x1900, 0xb7b: 0x1903, + 0xb7c: 0x1906, 0xb7d: 0x1909, 0xb7e: 0x190c, 0xb7f: 0x1915, + // Block 0x2e, offset 0xb80 + 0xb80: 0x1ccc, 0xb81: 0x1cdb, 0xb82: 0x1cea, 0xb83: 0x1cf9, 0xb84: 0x1d08, 0xb85: 0x1d17, + 0xb86: 0x1d26, 0xb87: 0x1d35, 0xb88: 0x1d44, 0xb89: 0x2192, 0xb8a: 0x21a4, 0xb8b: 0x21b6, + 0xb8c: 0x1957, 0xb8d: 0x1c0a, 0xb8e: 0x19d8, 0xb8f: 0x1bae, 0xb90: 0x04ce, 0xb91: 0x04d6, + 0xb92: 0x04de, 0xb93: 0x04e6, 0xb94: 0x04ee, 0xb95: 0x04f2, 0xb96: 0x04f6, 0xb97: 0x04fa, + 0xb98: 0x04fe, 0xb99: 0x0502, 0xb9a: 0x0506, 0xb9b: 0x050a, 0xb9c: 0x050e, 0xb9d: 0x0512, + 0xb9e: 0x0516, 0xb9f: 0x051a, 0xba0: 0x051e, 0xba1: 0x0526, 0xba2: 0x052a, 0xba3: 0x052e, + 0xba4: 0x0532, 0xba5: 0x0536, 0xba6: 0x053a, 0xba7: 0x053e, 0xba8: 0x0542, 0xba9: 0x0546, + 0xbaa: 0x054a, 0xbab: 0x054e, 0xbac: 0x0552, 0xbad: 0x0556, 0xbae: 0x055a, 0xbaf: 0x055e, + 0xbb0: 0x0562, 0xbb1: 0x0566, 0xbb2: 0x056a, 0xbb3: 0x0572, 0xbb4: 0x057a, 0xbb5: 0x0582, + 0xbb6: 0x0586, 0xbb7: 0x058a, 0xbb8: 0x058e, 0xbb9: 0x0592, 0xbba: 0x0596, 0xbbb: 0x059a, + 0xbbc: 0x059e, 0xbbd: 0x05a2, 0xbbe: 0x05a6, 0xbbf: 0x2700, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x2b19, 0xbc1: 0x29b5, 0xbc2: 0x2b29, 0xbc3: 0x288d, 0xbc4: 0x2efb, 0xbc5: 0x2897, + 0xbc6: 0x28a1, 0xbc7: 0x2f3f, 0xbc8: 0x29c2, 0xbc9: 0x28ab, 0xbca: 0x28b5, 0xbcb: 0x28bf, + 0xbcc: 0x29e9, 0xbcd: 0x29f6, 0xbce: 0x29cf, 0xbcf: 0x29dc, 0xbd0: 0x2ec0, 0xbd1: 0x2a03, + 0xbd2: 0x2a10, 0xbd3: 0x2bcb, 0xbd4: 0x26c1, 0xbd5: 0x2bde, 0xbd6: 0x2bf1, 0xbd7: 0x2b39, + 0xbd8: 0x2a1d, 0xbd9: 0x2c04, 0xbda: 0x2c17, 0xbdb: 0x2a2a, 0xbdc: 0x28c9, 0xbdd: 0x28d3, + 0xbde: 0x2ece, 0xbdf: 0x2a37, 0xbe0: 0x2b49, 0xbe1: 0x2f0c, 0xbe2: 0x28dd, 0xbe3: 0x28e7, + 0xbe4: 0x2a44, 0xbe5: 0x28f1, 0xbe6: 0x28fb, 0xbe7: 0x26d6, 0xbe8: 0x26dd, 0xbe9: 0x2905, + 0xbea: 0x290f, 0xbeb: 0x2c2a, 0xbec: 0x2a51, 0xbed: 0x2b59, 0xbee: 0x2c3d, 0xbef: 0x2a5e, + 0xbf0: 0x2923, 0xbf1: 0x2919, 0xbf2: 0x2f53, 0xbf3: 0x2a6b, 0xbf4: 0x2c50, 0xbf5: 0x292d, + 0xbf6: 0x2b69, 0xbf7: 0x2937, 0xbf8: 0x2a85, 0xbf9: 0x2941, 0xbfa: 0x2a92, 0xbfb: 0x2f1d, + 0xbfc: 0x2a78, 0xbfd: 0x2b79, 0xbfe: 0x2a9f, 0xbff: 0x26e4, + // Block 0x30, offset 0xc00 + 0xc00: 0x2f2e, 0xc01: 0x294b, 0xc02: 0x2955, 0xc03: 0x2aac, 0xc04: 0x295f, 0xc05: 0x2969, + 0xc06: 0x2973, 0xc07: 0x2b89, 0xc08: 0x2ab9, 0xc09: 0x26eb, 0xc0a: 0x2c63, 0xc0b: 0x2ea7, + 0xc0c: 0x2b99, 0xc0d: 0x2ac6, 0xc0e: 0x2edc, 0xc0f: 0x297d, 0xc10: 0x2987, 0xc11: 0x2ad3, + 0xc12: 0x26f2, 0xc13: 0x2ae0, 0xc14: 0x2ba9, 0xc15: 0x26f9, 0xc16: 0x2c76, 0xc17: 0x2991, + 0xc18: 0x1cbd, 0xc19: 0x1cd1, 0xc1a: 0x1ce0, 0xc1b: 0x1cef, 0xc1c: 0x1cfe, 0xc1d: 0x1d0d, + 0xc1e: 0x1d1c, 0xc1f: 0x1d2b, 0xc20: 0x1d3a, 0xc21: 0x1d49, 0xc22: 0x2198, 0xc23: 0x21aa, + 0xc24: 0x21bc, 0xc25: 0x21c8, 0xc26: 0x21d4, 0xc27: 0x21e0, 0xc28: 0x21ec, 0xc29: 0x21f8, + 0xc2a: 0x2204, 0xc2b: 0x2210, 0xc2c: 0x224c, 0xc2d: 0x2258, 0xc2e: 0x2264, 0xc2f: 0x2270, + 0xc30: 0x227c, 0xc31: 0x1c1a, 0xc32: 0x19cc, 0xc33: 0x1939, 0xc34: 0x1bea, 0xc35: 0x1a4d, + 0xc36: 0x1a5c, 0xc37: 0x19d2, 0xc38: 0x1c02, 0xc39: 0x1c06, 0xc3a: 0x1963, 0xc3b: 0x270e, + 0xc3c: 0x271c, 0xc3d: 0x2707, 0xc3e: 0x2715, 0xc3f: 0x2aed, + // Block 0x31, offset 0xc40 + 0xc40: 0x1a50, 0xc41: 0x1a38, 0xc42: 0x1c66, 0xc43: 0x1a20, 0xc44: 0x19f9, 0xc45: 0x196c, + 0xc46: 0x197b, 0xc47: 0x194b, 0xc48: 0x1bf6, 0xc49: 0x1d58, 0xc4a: 0x1a53, 0xc4b: 0x1a3b, + 0xc4c: 0x1c6a, 0xc4d: 0x1c76, 0xc4e: 0x1a2c, 0xc4f: 0x1a02, 0xc50: 0x195a, 0xc51: 0x1c22, + 0xc52: 0x1bb6, 0xc53: 0x1ba2, 0xc54: 0x1bd2, 0xc55: 0x1c7a, 0xc56: 0x1a2f, 0xc57: 0x19cf, + 0xc58: 0x1a05, 0xc59: 0x19e4, 0xc5a: 0x1a47, 0xc5b: 0x1c7e, 0xc5c: 0x1a32, 0xc5d: 0x19c6, + 0xc5e: 0x1a08, 0xc5f: 0x1c42, 0xc60: 0x1bfa, 0xc61: 0x1a1a, 0xc62: 0x1c2a, 0xc63: 0x1c46, + 0xc64: 0x1bfe, 0xc65: 0x1a1d, 0xc66: 0x1c2e, 0xc67: 0x22ee, 0xc68: 0x2302, 0xc69: 0x199c, + 0xc6a: 0x1c26, 0xc6b: 0x1bba, 0xc6c: 0x1ba6, 0xc6d: 0x1c4e, 0xc6e: 0x2723, 0xc6f: 0x27ba, + 0xc70: 0x1a5f, 0xc71: 0x1a4a, 0xc72: 0x1c82, 0xc73: 0x1a35, 0xc74: 0x1a56, 0xc75: 0x1a3e, + 0xc76: 0x1c6e, 0xc77: 0x1a23, 0xc78: 0x19fc, 0xc79: 0x1987, 0xc7a: 0x1a59, 0xc7b: 0x1a41, + 0xc7c: 0x1c72, 0xc7d: 0x1a26, 0xc7e: 0x19ff, 0xc7f: 0x198a, + // Block 0x32, offset 0xc80 + 0xc80: 0x1c32, 0xc81: 0x1bbe, 0xc82: 0x1d53, 0xc83: 0x193c, 0xc84: 0x19c0, 0xc85: 0x19c3, + 0xc86: 0x22fb, 0xc87: 0x1b9a, 0xc88: 0x19c9, 0xc89: 0x194e, 0xc8a: 0x19e7, 0xc8b: 0x1951, + 0xc8c: 0x19f0, 0xc8d: 0x196f, 0xc8e: 0x1972, 0xc8f: 0x1a0b, 0xc90: 0x1a11, 0xc91: 0x1a14, + 0xc92: 0x1c36, 0xc93: 0x1a17, 0xc94: 0x1a29, 0xc95: 0x1c3e, 0xc96: 0x1c4a, 0xc97: 0x1996, + 0xc98: 0x1d5d, 0xc99: 0x1bc2, 0xc9a: 0x1999, 0xc9b: 0x1a62, 0xc9c: 0x19ab, 0xc9d: 0x19ba, + 0xc9e: 0x22e8, 0xc9f: 0x22e2, 0xca0: 0x1cc7, 0xca1: 0x1cd6, 0xca2: 0x1ce5, 0xca3: 0x1cf4, + 0xca4: 0x1d03, 0xca5: 0x1d12, 0xca6: 0x1d21, 0xca7: 0x1d30, 0xca8: 0x1d3f, 0xca9: 0x218c, + 0xcaa: 0x219e, 0xcab: 0x21b0, 0xcac: 0x21c2, 0xcad: 0x21ce, 0xcae: 0x21da, 0xcaf: 0x21e6, + 0xcb0: 0x21f2, 0xcb1: 0x21fe, 0xcb2: 0x220a, 0xcb3: 0x2246, 0xcb4: 0x2252, 0xcb5: 0x225e, + 0xcb6: 0x226a, 0xcb7: 0x2276, 0xcb8: 0x2282, 0xcb9: 0x2288, 0xcba: 0x228e, 0xcbb: 0x2294, + 0xcbc: 0x229a, 0xcbd: 0x22ac, 0xcbe: 0x22b2, 0xcbf: 0x1c16, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x137a, 0xcc1: 0x0cfe, 0xcc2: 0x13d6, 0xcc3: 0x13a2, 0xcc4: 0x0e5a, 0xcc5: 0x06ee, + 0xcc6: 0x08e2, 0xcc7: 0x162e, 0xcc8: 0x162e, 0xcc9: 0x0a0e, 0xcca: 0x1462, 0xccb: 0x0946, + 0xccc: 0x0a0a, 0xccd: 0x0bf2, 0xcce: 0x0fd2, 0xccf: 0x1162, 0xcd0: 0x129a, 0xcd1: 0x12d6, + 0xcd2: 0x130a, 0xcd3: 0x141e, 0xcd4: 0x0d76, 0xcd5: 0x0e02, 0xcd6: 0x0eae, 0xcd7: 0x0f46, + 0xcd8: 0x1262, 0xcd9: 0x144a, 0xcda: 0x1576, 0xcdb: 0x0712, 0xcdc: 0x08b6, 0xcdd: 0x0d8a, + 0xcde: 0x0ed2, 0xcdf: 0x1296, 0xce0: 0x15c6, 0xce1: 0x0ab6, 0xce2: 0x0e7a, 0xce3: 0x1286, + 0xce4: 0x131a, 0xce5: 0x0c26, 0xce6: 0x11be, 0xce7: 0x12e2, 0xce8: 0x0b22, 0xce9: 0x0d12, + 0xcea: 0x0e1a, 0xceb: 0x0f1e, 0xcec: 0x142a, 0xced: 0x0752, 0xcee: 0x07ea, 0xcef: 0x0856, + 0xcf0: 0x0c8e, 0xcf1: 0x0d82, 0xcf2: 0x0ece, 0xcf3: 0x0ff2, 0xcf4: 0x117a, 0xcf5: 0x128e, + 0xcf6: 0x12a6, 0xcf7: 0x13ca, 0xcf8: 0x14f2, 0xcf9: 0x15a6, 0xcfa: 0x15c2, 0xcfb: 0x102e, + 0xcfc: 0x106e, 0xcfd: 0x1126, 0xcfe: 0x1246, 0xcff: 0x147e, + // Block 0x34, offset 0xd00 + 0xd00: 0x15ce, 0xd01: 0x134e, 0xd02: 0x09ca, 0xd03: 0x0b3e, 0xd04: 0x10de, 0xd05: 0x119e, + 0xd06: 0x0f02, 0xd07: 0x1036, 0xd08: 0x139a, 0xd09: 0x14ea, 0xd0a: 0x09c6, 0xd0b: 0x0a92, + 0xd0c: 0x0d7a, 0xd0d: 0x0e2e, 0xd0e: 0x0e62, 0xd0f: 0x1116, 0xd10: 0x113e, 0xd11: 0x14aa, + 0xd12: 0x0852, 0xd13: 0x11aa, 0xd14: 0x07f6, 0xd15: 0x07f2, 0xd16: 0x109a, 0xd17: 0x112a, + 0xd18: 0x125e, 0xd19: 0x14b2, 0xd1a: 0x136a, 0xd1b: 0x0c2a, 0xd1c: 0x0d76, 0xd1d: 0x135a, + 0xd1e: 0x06fa, 0xd1f: 0x0a66, 0xd20: 0x0b96, 0xd21: 0x0f32, 0xd22: 0x0fb2, 0xd23: 0x0876, + 0xd24: 0x103e, 0xd25: 0x0762, 0xd26: 0x0b7a, 0xd27: 0x06da, 0xd28: 0x0dee, 0xd29: 0x0ca6, + 0xd2a: 0x1112, 0xd2b: 0x08ca, 0xd2c: 0x09b6, 0xd2d: 0x0ffe, 0xd2e: 0x1266, 0xd2f: 0x133e, + 0xd30: 0x0dba, 0xd31: 0x13fa, 0xd32: 0x0de6, 0xd33: 0x0c3a, 0xd34: 0x121e, 0xd35: 0x0c5a, + 0xd36: 0x0fae, 0xd37: 0x072e, 0xd38: 0x07aa, 0xd39: 0x07ee, 0xd3a: 0x0d56, 0xd3b: 0x10fe, + 0xd3c: 0x11f6, 0xd3d: 0x134a, 0xd3e: 0x145e, 0xd3f: 0x085e, + // Block 0x35, offset 0xd40 + 0xd40: 0x0912, 0xd41: 0x0a1a, 0xd42: 0x0b32, 0xd43: 0x0cc2, 0xd44: 0x0e7e, 0xd45: 0x1042, + 0xd46: 0x149a, 0xd47: 0x157e, 0xd48: 0x15d2, 0xd49: 0x15ea, 0xd4a: 0x083a, 0xd4b: 0x0cf6, + 0xd4c: 0x0da6, 0xd4d: 0x13ee, 0xd4e: 0x0afe, 0xd4f: 0x0bda, 0xd50: 0x0bf6, 0xd51: 0x0c86, + 0xd52: 0x0e6e, 0xd53: 0x0eba, 0xd54: 0x0f6a, 0xd55: 0x108e, 0xd56: 0x1132, 0xd57: 0x1196, + 0xd58: 0x13de, 0xd59: 0x126e, 0xd5a: 0x1406, 0xd5b: 0x1482, 0xd5c: 0x0812, 0xd5d: 0x083e, + 0xd5e: 0x0926, 0xd5f: 0x0eaa, 0xd60: 0x12f6, 0xd61: 0x133e, 0xd62: 0x0b1e, 0xd63: 0x0b8e, + 0xd64: 0x0c52, 0xd65: 0x0db2, 0xd66: 0x10da, 0xd67: 0x0f26, 0xd68: 0x073e, 0xd69: 0x0982, + 0xd6a: 0x0a66, 0xd6b: 0x0aca, 0xd6c: 0x0b9a, 0xd6d: 0x0f42, 0xd6e: 0x0f5e, 0xd6f: 0x116e, + 0xd70: 0x118e, 0xd71: 0x1466, 0xd72: 0x14e6, 0xd73: 0x14f6, 0xd74: 0x1532, 0xd75: 0x0756, + 0xd76: 0x1082, 0xd77: 0x1452, 0xd78: 0x14ce, 0xd79: 0x0bb2, 0xd7a: 0x071a, 0xd7b: 0x077a, + 0xd7c: 0x0a6a, 0xd7d: 0x0a8a, 0xd7e: 0x0cb2, 0xd7f: 0x0d76, + // Block 0x36, offset 0xd80 + 0xd80: 0x0ec6, 0xd81: 0x0fce, 0xd82: 0x127a, 0xd83: 0x141a, 0xd84: 0x1626, 0xd85: 0x0ce6, + 0xd86: 0x14a6, 0xd87: 0x0836, 0xd88: 0x0d32, 0xd89: 0x0d3e, 0xd8a: 0x0e12, 0xd8b: 0x0e4a, + 0xd8c: 0x0f4e, 0xd8d: 0x0faa, 0xd8e: 0x102a, 0xd8f: 0x110e, 0xd90: 0x153e, 0xd91: 0x07b2, + 0xd92: 0x0c06, 0xd93: 0x14b6, 0xd94: 0x076a, 0xd95: 0x0aae, 0xd96: 0x0e32, 0xd97: 0x13e2, + 0xd98: 0x0b6a, 0xd99: 0x0bba, 0xd9a: 0x0d46, 0xd9b: 0x0f32, 0xd9c: 0x14be, 0xd9d: 0x081a, + 0xd9e: 0x0902, 0xd9f: 0x0a9a, 0xda0: 0x0cd6, 0xda1: 0x0d22, 0xda2: 0x0d62, 0xda3: 0x0df6, + 0xda4: 0x0f4a, 0xda5: 0x0fbe, 0xda6: 0x115a, 0xda7: 0x12fa, 0xda8: 0x1306, 0xda9: 0x145a, + 0xdaa: 0x14da, 0xdab: 0x0886, 0xdac: 0x0e4e, 0xdad: 0x0906, 0xdae: 0x0eca, 0xdaf: 0x0f6e, + 0xdb0: 0x128a, 0xdb1: 0x14c2, 0xdb2: 0x15ae, 0xdb3: 0x15d6, 0xdb4: 0x0d3a, 0xdb5: 0x0e2a, + 0xdb6: 0x11c6, 0xdb7: 0x10ba, 0xdb8: 0x10c6, 0xdb9: 0x10ea, 0xdba: 0x0f1a, 0xdbb: 0x0ea2, + 0xdbc: 0x1366, 0xdbd: 0x0736, 0xdbe: 0x122e, 0xdbf: 0x081e, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x080e, 0xdc1: 0x0b0e, 0xdc2: 0x0c2e, 0xdc3: 0x10f6, 0xdc4: 0x0a56, 0xdc5: 0x0e06, + 0xdc6: 0x0cf2, 0xdc7: 0x13ea, 0xdc8: 0x12ea, 0xdc9: 0x14ae, 0xdca: 0x1326, 0xdcb: 0x0b2a, + 0xdcc: 0x078a, 0xdcd: 0x095e, 0xdd0: 0x09b2, + 0xdd2: 0x0ce2, 0xdd5: 0x07fa, 0xdd6: 0x0f22, 0xdd7: 0x0fe6, + 0xdd8: 0x104a, 0xdd9: 0x1066, 0xdda: 0x106a, 0xddb: 0x107e, 0xddc: 0x14fe, 0xddd: 0x10ee, + 0xdde: 0x1172, 0xde0: 0x1292, 0xde2: 0x1356, + 0xde5: 0x140a, 0xde6: 0x1436, + 0xdea: 0x1552, 0xdeb: 0x1556, 0xdec: 0x155a, 0xded: 0x15be, 0xdee: 0x142e, 0xdef: 0x14ca, + 0xdf0: 0x075a, 0xdf1: 0x077e, 0xdf2: 0x0792, 0xdf3: 0x084e, 0xdf4: 0x085a, 0xdf5: 0x089a, + 0xdf6: 0x094e, 0xdf7: 0x096a, 0xdf8: 0x0972, 0xdf9: 0x09ae, 0xdfa: 0x09ba, 0xdfb: 0x0a96, + 0xdfc: 0x0a9e, 0xdfd: 0x0ba6, 0xdfe: 0x0bce, 0xdff: 0x0bd6, + // Block 0x38, offset 0xe00 + 0xe00: 0x0bee, 0xe01: 0x0c9a, 0xe02: 0x0cca, 0xe03: 0x0cea, 0xe04: 0x0d5a, 0xe05: 0x0e1e, + 0xe06: 0x0e3a, 0xe07: 0x0e6a, 0xe08: 0x0ebe, 0xe09: 0x0ede, 0xe0a: 0x0f52, 0xe0b: 0x1032, + 0xe0c: 0x104e, 0xe0d: 0x1056, 0xe0e: 0x1052, 0xe0f: 0x105a, 0xe10: 0x105e, 0xe11: 0x1062, + 0xe12: 0x1076, 0xe13: 0x107a, 0xe14: 0x109e, 0xe15: 0x10b2, 0xe16: 0x10ce, 0xe17: 0x1132, + 0xe18: 0x113a, 0xe19: 0x1142, 0xe1a: 0x1156, 0xe1b: 0x117e, 0xe1c: 0x11ce, 0xe1d: 0x1202, + 0xe1e: 0x1202, 0xe1f: 0x126a, 0xe20: 0x1312, 0xe21: 0x132a, 0xe22: 0x135e, 0xe23: 0x1362, + 0xe24: 0x13a6, 0xe25: 0x13aa, 0xe26: 0x1402, 0xe27: 0x140a, 0xe28: 0x14de, 0xe29: 0x1522, + 0xe2a: 0x153a, 0xe2b: 0x0b9e, 0xe2c: 0x1721, 0xe2d: 0x11e6, + 0xe30: 0x06e2, 0xe31: 0x07e6, 0xe32: 0x07a6, 0xe33: 0x074e, 0xe34: 0x078e, 0xe35: 0x07ba, + 0xe36: 0x084a, 0xe37: 0x0866, 0xe38: 0x094e, 0xe39: 0x093a, 0xe3a: 0x094a, 0xe3b: 0x0966, + 0xe3c: 0x09b2, 0xe3d: 0x09c2, 0xe3e: 0x0a06, 0xe3f: 0x0a12, + // Block 0x39, offset 0xe40 + 0xe40: 0x0a2e, 0xe41: 0x0a3e, 0xe42: 0x0b26, 0xe43: 0x0b2e, 0xe44: 0x0b5e, 0xe45: 0x0b7e, + 0xe46: 0x0bae, 0xe47: 0x0bc6, 0xe48: 0x0bb6, 0xe49: 0x0bd6, 0xe4a: 0x0bca, 0xe4b: 0x0bee, + 0xe4c: 0x0c0a, 0xe4d: 0x0c62, 0xe4e: 0x0c6e, 0xe4f: 0x0c76, 0xe50: 0x0c9e, 0xe51: 0x0ce2, + 0xe52: 0x0d12, 0xe53: 0x0d16, 0xe54: 0x0d2a, 0xe55: 0x0daa, 0xe56: 0x0dba, 0xe57: 0x0e12, + 0xe58: 0x0e5e, 0xe59: 0x0e56, 0xe5a: 0x0e6a, 0xe5b: 0x0e86, 0xe5c: 0x0ebe, 0xe5d: 0x1016, + 0xe5e: 0x0ee2, 0xe5f: 0x0f16, 0xe60: 0x0f22, 0xe61: 0x0f62, 0xe62: 0x0f7e, 0xe63: 0x0fa2, + 0xe64: 0x0fc6, 0xe65: 0x0fca, 0xe66: 0x0fe6, 0xe67: 0x0fea, 0xe68: 0x0ffa, 0xe69: 0x100e, + 0xe6a: 0x100a, 0xe6b: 0x103a, 0xe6c: 0x10b6, 0xe6d: 0x10ce, 0xe6e: 0x10e6, 0xe6f: 0x111e, + 0xe70: 0x1132, 0xe71: 0x114e, 0xe72: 0x117e, 0xe73: 0x1232, 0xe74: 0x125a, 0xe75: 0x12ce, + 0xe76: 0x1316, 0xe77: 0x1322, 0xe78: 0x132a, 0xe79: 0x1342, 0xe7a: 0x1356, 0xe7b: 0x1346, + 0xe7c: 0x135e, 0xe7d: 0x135a, 0xe7e: 0x1352, 0xe7f: 0x1362, + // Block 0x3a, offset 0xe80 + 0xe80: 0x136e, 0xe81: 0x13aa, 0xe82: 0x13e6, 0xe83: 0x1416, 0xe84: 0x144e, 0xe85: 0x146e, + 0xe86: 0x14ba, 0xe87: 0x14de, 0xe88: 0x14fe, 0xe89: 0x1512, 0xe8a: 0x1522, 0xe8b: 0x152e, + 0xe8c: 0x153a, 0xe8d: 0x158e, 0xe8e: 0x162e, 0xe8f: 0x16b8, 0xe90: 0x16b3, 0xe91: 0x16e5, + 0xe92: 0x060a, 0xe93: 0x0632, 0xe94: 0x0636, 0xe95: 0x1767, 0xe96: 0x1794, 0xe97: 0x180c, + 0xe98: 0x161a, 0xe99: 0x162a, + // Block 0x3b, offset 0xec0 + 0xec0: 0x19db, 0xec1: 0x19de, 0xec2: 0x19e1, 0xec3: 0x1c0e, 0xec4: 0x1c12, 0xec5: 0x1a65, + 0xec6: 0x1a65, + 0xed3: 0x1d7b, 0xed4: 0x1d6c, 0xed5: 0x1d71, 0xed6: 0x1d80, 0xed7: 0x1d76, + 0xedd: 0x43a7, + 0xede: 0x8116, 0xedf: 0x4419, 0xee0: 0x0230, 0xee1: 0x0218, 0xee2: 0x0221, 0xee3: 0x0224, + 0xee4: 0x0227, 0xee5: 0x022a, 0xee6: 0x022d, 0xee7: 0x0233, 0xee8: 0x0236, 0xee9: 0x0017, + 0xeea: 0x4407, 0xeeb: 0x440d, 0xeec: 0x450b, 0xeed: 0x4513, 0xeee: 0x435f, 0xeef: 0x4365, + 0xef0: 0x436b, 0xef1: 0x4371, 0xef2: 0x437d, 0xef3: 0x4383, 0xef4: 0x4389, 0xef5: 0x4395, + 0xef6: 0x439b, 0xef8: 0x43a1, 0xef9: 0x43ad, 0xefa: 0x43b3, 0xefb: 0x43b9, + 0xefc: 0x43c5, 0xefe: 0x43cb, + // Block 0x3c, offset 0xf00 + 0xf00: 0x43d1, 0xf01: 0x43d7, 0xf03: 0x43dd, 0xf04: 0x43e3, + 0xf06: 0x43ef, 0xf07: 0x43f5, 0xf08: 0x43fb, 0xf09: 0x4401, 0xf0a: 0x4413, 0xf0b: 0x438f, + 0xf0c: 0x4377, 0xf0d: 0x43bf, 0xf0e: 0x43e9, 0xf0f: 0x1d85, 0xf10: 0x029c, 0xf11: 0x029c, + 0xf12: 0x02a5, 0xf13: 0x02a5, 0xf14: 0x02a5, 0xf15: 0x02a5, 0xf16: 0x02a8, 0xf17: 0x02a8, + 0xf18: 0x02a8, 0xf19: 0x02a8, 0xf1a: 0x02ae, 0xf1b: 0x02ae, 0xf1c: 0x02ae, 0xf1d: 0x02ae, + 0xf1e: 0x02a2, 0xf1f: 0x02a2, 0xf20: 0x02a2, 0xf21: 0x02a2, 0xf22: 0x02ab, 0xf23: 0x02ab, + 0xf24: 0x02ab, 0xf25: 0x02ab, 0xf26: 0x029f, 0xf27: 0x029f, 0xf28: 0x029f, 0xf29: 0x029f, + 0xf2a: 0x02d2, 0xf2b: 0x02d2, 0xf2c: 0x02d2, 0xf2d: 0x02d2, 0xf2e: 0x02d5, 0xf2f: 0x02d5, + 0xf30: 0x02d5, 0xf31: 0x02d5, 0xf32: 0x02b4, 0xf33: 0x02b4, 0xf34: 0x02b4, 0xf35: 0x02b4, + 0xf36: 0x02b1, 0xf37: 0x02b1, 0xf38: 0x02b1, 0xf39: 0x02b1, 0xf3a: 0x02b7, 0xf3b: 0x02b7, + 0xf3c: 0x02b7, 0xf3d: 0x02b7, 0xf3e: 0x02ba, 0xf3f: 0x02ba, + // Block 0x3d, offset 0xf40 + 0xf40: 0x02ba, 0xf41: 0x02ba, 0xf42: 0x02c3, 0xf43: 0x02c3, 0xf44: 0x02c0, 0xf45: 0x02c0, + 0xf46: 0x02c6, 0xf47: 0x02c6, 0xf48: 0x02bd, 0xf49: 0x02bd, 0xf4a: 0x02cc, 0xf4b: 0x02cc, + 0xf4c: 0x02c9, 0xf4d: 0x02c9, 0xf4e: 0x02d8, 0xf4f: 0x02d8, 0xf50: 0x02d8, 0xf51: 0x02d8, + 0xf52: 0x02de, 0xf53: 0x02de, 0xf54: 0x02de, 0xf55: 0x02de, 0xf56: 0x02e4, 0xf57: 0x02e4, + 0xf58: 0x02e4, 0xf59: 0x02e4, 0xf5a: 0x02e1, 0xf5b: 0x02e1, 0xf5c: 0x02e1, 0xf5d: 0x02e1, + 0xf5e: 0x02e7, 0xf5f: 0x02e7, 0xf60: 0x02ea, 0xf61: 0x02ea, 0xf62: 0x02ea, 0xf63: 0x02ea, + 0xf64: 0x4485, 0xf65: 0x4485, 0xf66: 0x02f0, 0xf67: 0x02f0, 0xf68: 0x02f0, 0xf69: 0x02f0, + 0xf6a: 0x02ed, 0xf6b: 0x02ed, 0xf6c: 0x02ed, 0xf6d: 0x02ed, 0xf6e: 0x030b, 0xf6f: 0x030b, + 0xf70: 0x447f, 0xf71: 0x447f, + // Block 0x3e, offset 0xf80 + 0xf93: 0x02db, 0xf94: 0x02db, 0xf95: 0x02db, 0xf96: 0x02db, 0xf97: 0x02f9, + 0xf98: 0x02f9, 0xf99: 0x02f6, 0xf9a: 0x02f6, 0xf9b: 0x02fc, 0xf9c: 0x02fc, 0xf9d: 0x2055, + 0xf9e: 0x0302, 0xf9f: 0x0302, 0xfa0: 0x02f3, 0xfa1: 0x02f3, 0xfa2: 0x02ff, 0xfa3: 0x02ff, + 0xfa4: 0x0308, 0xfa5: 0x0308, 0xfa6: 0x0308, 0xfa7: 0x0308, 0xfa8: 0x0290, 0xfa9: 0x0290, + 0xfaa: 0x25b0, 0xfab: 0x25b0, 0xfac: 0x2620, 0xfad: 0x2620, 0xfae: 0x25ef, 0xfaf: 0x25ef, + 0xfb0: 0x260b, 0xfb1: 0x260b, 0xfb2: 0x2604, 0xfb3: 0x2604, 0xfb4: 0x2612, 0xfb5: 0x2612, + 0xfb6: 0x2619, 0xfb7: 0x2619, 0xfb8: 0x2619, 0xfb9: 0x25f6, 0xfba: 0x25f6, 0xfbb: 0x25f6, + 0xfbc: 0x0305, 0xfbd: 0x0305, 0xfbe: 0x0305, 0xfbf: 0x0305, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x25b7, 0xfc1: 0x25be, 0xfc2: 0x25da, 0xfc3: 0x25f6, 0xfc4: 0x25fd, 0xfc5: 0x1d8f, + 0xfc6: 0x1d94, 0xfc7: 0x1d99, 0xfc8: 0x1da8, 0xfc9: 0x1db7, 0xfca: 0x1dbc, 0xfcb: 0x1dc1, + 0xfcc: 0x1dc6, 0xfcd: 0x1dcb, 0xfce: 0x1dda, 0xfcf: 0x1de9, 0xfd0: 0x1dee, 0xfd1: 0x1df3, + 0xfd2: 0x1e02, 0xfd3: 0x1e11, 0xfd4: 0x1e16, 0xfd5: 0x1e1b, 0xfd6: 0x1e20, 0xfd7: 0x1e2f, + 0xfd8: 0x1e34, 0xfd9: 0x1e43, 0xfda: 0x1e48, 0xfdb: 0x1e4d, 0xfdc: 0x1e5c, 0xfdd: 0x1e61, + 0xfde: 0x1e66, 0xfdf: 0x1e70, 0xfe0: 0x1eac, 0xfe1: 0x1ebb, 0xfe2: 0x1eca, 0xfe3: 0x1ecf, + 0xfe4: 0x1ed4, 0xfe5: 0x1ede, 0xfe6: 0x1eed, 0xfe7: 0x1ef2, 0xfe8: 0x1f01, 0xfe9: 0x1f06, + 0xfea: 0x1f0b, 0xfeb: 0x1f1a, 0xfec: 0x1f1f, 0xfed: 0x1f2e, 0xfee: 0x1f33, 0xfef: 0x1f38, + 0xff0: 0x1f3d, 0xff1: 0x1f42, 0xff2: 0x1f47, 0xff3: 0x1f4c, 0xff4: 0x1f51, 0xff5: 0x1f56, + 0xff6: 0x1f5b, 0xff7: 0x1f60, 0xff8: 0x1f65, 0xff9: 0x1f6a, 0xffa: 0x1f6f, 0xffb: 0x1f74, + 0xffc: 0x1f79, 0xffd: 0x1f7e, 0xffe: 0x1f83, 0xfff: 0x1f8d, + // Block 0x40, offset 0x1000 + 0x1000: 0x1f92, 0x1001: 0x1f97, 0x1002: 0x1f9c, 0x1003: 0x1fa6, 0x1004: 0x1fab, 0x1005: 0x1fb5, + 0x1006: 0x1fba, 0x1007: 0x1fbf, 0x1008: 0x1fc4, 0x1009: 0x1fc9, 0x100a: 0x1fce, 0x100b: 0x1fd3, + 0x100c: 0x1fd8, 0x100d: 0x1fdd, 0x100e: 0x1fec, 0x100f: 0x1ffb, 0x1010: 0x2000, 0x1011: 0x2005, + 0x1012: 0x200a, 0x1013: 0x200f, 0x1014: 0x2014, 0x1015: 0x201e, 0x1016: 0x2023, 0x1017: 0x2028, + 0x1018: 0x2037, 0x1019: 0x2046, 0x101a: 0x204b, 0x101b: 0x4437, 0x101c: 0x443d, 0x101d: 0x4473, + 0x101e: 0x44ca, 0x101f: 0x44d1, 0x1020: 0x44d8, 0x1021: 0x44df, 0x1022: 0x44e6, 0x1023: 0x44ed, + 0x1024: 0x25cc, 0x1025: 0x25d3, 0x1026: 0x25da, 0x1027: 0x25e1, 0x1028: 0x25f6, 0x1029: 0x25fd, + 0x102a: 0x1d9e, 0x102b: 0x1da3, 0x102c: 0x1da8, 0x102d: 0x1dad, 0x102e: 0x1db7, 0x102f: 0x1dbc, + 0x1030: 0x1dd0, 0x1031: 0x1dd5, 0x1032: 0x1dda, 0x1033: 0x1ddf, 0x1034: 0x1de9, 0x1035: 0x1dee, + 0x1036: 0x1df8, 0x1037: 0x1dfd, 0x1038: 0x1e02, 0x1039: 0x1e07, 0x103a: 0x1e11, 0x103b: 0x1e16, + 0x103c: 0x1f42, 0x103d: 0x1f47, 0x103e: 0x1f56, 0x103f: 0x1f5b, + // Block 0x41, offset 0x1040 + 0x1040: 0x1f60, 0x1041: 0x1f74, 0x1042: 0x1f79, 0x1043: 0x1f7e, 0x1044: 0x1f83, 0x1045: 0x1f9c, + 0x1046: 0x1fa6, 0x1047: 0x1fab, 0x1048: 0x1fb0, 0x1049: 0x1fc4, 0x104a: 0x1fe2, 0x104b: 0x1fe7, + 0x104c: 0x1fec, 0x104d: 0x1ff1, 0x104e: 0x1ffb, 0x104f: 0x2000, 0x1050: 0x4473, 0x1051: 0x202d, + 0x1052: 0x2032, 0x1053: 0x2037, 0x1054: 0x203c, 0x1055: 0x2046, 0x1056: 0x204b, 0x1057: 0x25b7, + 0x1058: 0x25be, 0x1059: 0x25c5, 0x105a: 0x25da, 0x105b: 0x25e8, 0x105c: 0x1d8f, 0x105d: 0x1d94, + 0x105e: 0x1d99, 0x105f: 0x1da8, 0x1060: 0x1db2, 0x1061: 0x1dc1, 0x1062: 0x1dc6, 0x1063: 0x1dcb, + 0x1064: 0x1dda, 0x1065: 0x1de4, 0x1066: 0x1e02, 0x1067: 0x1e1b, 0x1068: 0x1e20, 0x1069: 0x1e2f, + 0x106a: 0x1e34, 0x106b: 0x1e43, 0x106c: 0x1e4d, 0x106d: 0x1e5c, 0x106e: 0x1e61, 0x106f: 0x1e66, + 0x1070: 0x1e70, 0x1071: 0x1eac, 0x1072: 0x1eb1, 0x1073: 0x1ebb, 0x1074: 0x1eca, 0x1075: 0x1ecf, + 0x1076: 0x1ed4, 0x1077: 0x1ede, 0x1078: 0x1eed, 0x1079: 0x1f01, 0x107a: 0x1f06, 0x107b: 0x1f0b, + 0x107c: 0x1f1a, 0x107d: 0x1f1f, 0x107e: 0x1f2e, 0x107f: 0x1f33, + // Block 0x42, offset 0x1080 + 0x1080: 0x1f38, 0x1081: 0x1f3d, 0x1082: 0x1f4c, 0x1083: 0x1f51, 0x1084: 0x1f65, 0x1085: 0x1f6a, + 0x1086: 0x1f6f, 0x1087: 0x1f74, 0x1088: 0x1f79, 0x1089: 0x1f8d, 0x108a: 0x1f92, 0x108b: 0x1f97, + 0x108c: 0x1f9c, 0x108d: 0x1fa1, 0x108e: 0x1fb5, 0x108f: 0x1fba, 0x1090: 0x1fbf, 0x1091: 0x1fc4, + 0x1092: 0x1fd3, 0x1093: 0x1fd8, 0x1094: 0x1fdd, 0x1095: 0x1fec, 0x1096: 0x1ff6, 0x1097: 0x2005, + 0x1098: 0x200a, 0x1099: 0x4467, 0x109a: 0x201e, 0x109b: 0x2023, 0x109c: 0x2028, 0x109d: 0x2037, + 0x109e: 0x2041, 0x109f: 0x25da, 0x10a0: 0x25e8, 0x10a1: 0x1da8, 0x10a2: 0x1db2, 0x10a3: 0x1dda, + 0x10a4: 0x1de4, 0x10a5: 0x1e02, 0x10a6: 0x1e0c, 0x10a7: 0x1e70, 0x10a8: 0x1e75, 0x10a9: 0x1e98, + 0x10aa: 0x1e9d, 0x10ab: 0x1f74, 0x10ac: 0x1f79, 0x10ad: 0x1f9c, 0x10ae: 0x1fec, 0x10af: 0x1ff6, + 0x10b0: 0x2037, 0x10b1: 0x2041, 0x10b2: 0x451b, 0x10b3: 0x4523, 0x10b4: 0x452b, 0x10b5: 0x1ef7, + 0x10b6: 0x1efc, 0x10b7: 0x1f10, 0x10b8: 0x1f15, 0x10b9: 0x1f24, 0x10ba: 0x1f29, 0x10bb: 0x1e7a, + 0x10bc: 0x1e7f, 0x10bd: 0x1ea2, 0x10be: 0x1ea7, 0x10bf: 0x1e39, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x1e3e, 0x10c1: 0x1e25, 0x10c2: 0x1e2a, 0x10c3: 0x1e52, 0x10c4: 0x1e57, 0x10c5: 0x1ec0, + 0x10c6: 0x1ec5, 0x10c7: 0x1ee3, 0x10c8: 0x1ee8, 0x10c9: 0x1e84, 0x10ca: 0x1e89, 0x10cb: 0x1e8e, + 0x10cc: 0x1e98, 0x10cd: 0x1e93, 0x10ce: 0x1e6b, 0x10cf: 0x1eb6, 0x10d0: 0x1ed9, 0x10d1: 0x1ef7, + 0x10d2: 0x1efc, 0x10d3: 0x1f10, 0x10d4: 0x1f15, 0x10d5: 0x1f24, 0x10d6: 0x1f29, 0x10d7: 0x1e7a, + 0x10d8: 0x1e7f, 0x10d9: 0x1ea2, 0x10da: 0x1ea7, 0x10db: 0x1e39, 0x10dc: 0x1e3e, 0x10dd: 0x1e25, + 0x10de: 0x1e2a, 0x10df: 0x1e52, 0x10e0: 0x1e57, 0x10e1: 0x1ec0, 0x10e2: 0x1ec5, 0x10e3: 0x1ee3, + 0x10e4: 0x1ee8, 0x10e5: 0x1e84, 0x10e6: 0x1e89, 0x10e7: 0x1e8e, 0x10e8: 0x1e98, 0x10e9: 0x1e93, + 0x10ea: 0x1e6b, 0x10eb: 0x1eb6, 0x10ec: 0x1ed9, 0x10ed: 0x1e84, 0x10ee: 0x1e89, 0x10ef: 0x1e8e, + 0x10f0: 0x1e98, 0x10f1: 0x1e75, 0x10f2: 0x1e9d, 0x10f3: 0x1ef2, 0x10f4: 0x1e5c, 0x10f5: 0x1e61, + 0x10f6: 0x1e66, 0x10f7: 0x1e84, 0x10f8: 0x1e89, 0x10f9: 0x1e8e, 0x10fa: 0x1ef2, 0x10fb: 0x1f01, + 0x10fc: 0x441f, 0x10fd: 0x441f, + // Block 0x44, offset 0x1100 + 0x1110: 0x2317, 0x1111: 0x232c, + 0x1112: 0x232c, 0x1113: 0x2333, 0x1114: 0x233a, 0x1115: 0x234f, 0x1116: 0x2356, 0x1117: 0x235d, + 0x1118: 0x2380, 0x1119: 0x2380, 0x111a: 0x23a3, 0x111b: 0x239c, 0x111c: 0x23b8, 0x111d: 0x23aa, + 0x111e: 0x23b1, 0x111f: 0x23d4, 0x1120: 0x23d4, 0x1121: 0x23cd, 0x1122: 0x23db, 0x1123: 0x23db, + 0x1124: 0x2405, 0x1125: 0x2405, 0x1126: 0x2421, 0x1127: 0x23e9, 0x1128: 0x23e9, 0x1129: 0x23e2, + 0x112a: 0x23f7, 0x112b: 0x23f7, 0x112c: 0x23fe, 0x112d: 0x23fe, 0x112e: 0x2428, 0x112f: 0x2436, + 0x1130: 0x2436, 0x1131: 0x243d, 0x1132: 0x243d, 0x1133: 0x2444, 0x1134: 0x244b, 0x1135: 0x2452, + 0x1136: 0x2459, 0x1137: 0x2459, 0x1138: 0x2460, 0x1139: 0x246e, 0x113a: 0x247c, 0x113b: 0x2475, + 0x113c: 0x2483, 0x113d: 0x2483, 0x113e: 0x2498, 0x113f: 0x249f, + // Block 0x45, offset 0x1140 + 0x1140: 0x24d0, 0x1141: 0x24de, 0x1142: 0x24d7, 0x1143: 0x24bb, 0x1144: 0x24bb, 0x1145: 0x24e5, + 0x1146: 0x24e5, 0x1147: 0x24ec, 0x1148: 0x24ec, 0x1149: 0x2516, 0x114a: 0x251d, 0x114b: 0x2524, + 0x114c: 0x24fa, 0x114d: 0x2508, 0x114e: 0x252b, 0x114f: 0x2532, + 0x1152: 0x2501, 0x1153: 0x2586, 0x1154: 0x258d, 0x1155: 0x2563, 0x1156: 0x256a, 0x1157: 0x254e, + 0x1158: 0x254e, 0x1159: 0x2555, 0x115a: 0x257f, 0x115b: 0x2578, 0x115c: 0x25a2, 0x115d: 0x25a2, + 0x115e: 0x2310, 0x115f: 0x2325, 0x1160: 0x231e, 0x1161: 0x2348, 0x1162: 0x2341, 0x1163: 0x236b, + 0x1164: 0x2364, 0x1165: 0x238e, 0x1166: 0x2372, 0x1167: 0x2387, 0x1168: 0x23bf, 0x1169: 0x240c, + 0x116a: 0x23f0, 0x116b: 0x242f, 0x116c: 0x24c9, 0x116d: 0x24f3, 0x116e: 0x259b, 0x116f: 0x2594, + 0x1170: 0x25a9, 0x1171: 0x2540, 0x1172: 0x24a6, 0x1173: 0x2571, 0x1174: 0x2498, 0x1175: 0x24d0, + 0x1176: 0x2467, 0x1177: 0x24b4, 0x1178: 0x2547, 0x1179: 0x2539, 0x117a: 0x24c2, 0x117b: 0x24ad, + 0x117c: 0x24c2, 0x117d: 0x2547, 0x117e: 0x2379, 0x117f: 0x2395, + // Block 0x46, offset 0x1180 + 0x1180: 0x250f, 0x1181: 0x248a, 0x1182: 0x2309, 0x1183: 0x24ad, 0x1184: 0x2452, 0x1185: 0x2421, + 0x1186: 0x23c6, 0x1187: 0x255c, + 0x11b0: 0x241a, 0x11b1: 0x2491, 0x11b2: 0x27cc, 0x11b3: 0x27c3, 0x11b4: 0x27f9, 0x11b5: 0x27e7, + 0x11b6: 0x27d5, 0x11b7: 0x27f0, 0x11b8: 0x2802, 0x11b9: 0x2413, 0x11ba: 0x2c89, 0x11bb: 0x2b09, + 0x11bc: 0x27de, + // Block 0x47, offset 0x11c0 + 0x11d0: 0x0019, 0x11d1: 0x0486, + 0x11d2: 0x048a, 0x11d3: 0x0035, 0x11d4: 0x0037, 0x11d5: 0x0003, 0x11d6: 0x003f, 0x11d7: 0x04c2, + 0x11d8: 0x04c6, 0x11d9: 0x1b62, + 0x11e0: 0x8133, 0x11e1: 0x8133, 0x11e2: 0x8133, 0x11e3: 0x8133, + 0x11e4: 0x8133, 0x11e5: 0x8133, 0x11e6: 0x8133, 0x11e7: 0x812e, 0x11e8: 0x812e, 0x11e9: 0x812e, + 0x11ea: 0x812e, 0x11eb: 0x812e, 0x11ec: 0x812e, 0x11ed: 0x812e, 0x11ee: 0x8133, 0x11ef: 0x8133, + 0x11f0: 0x1876, 0x11f1: 0x0446, 0x11f2: 0x0442, 0x11f3: 0x007f, 0x11f4: 0x007f, 0x11f5: 0x0011, + 0x11f6: 0x0013, 0x11f7: 0x00b7, 0x11f8: 0x00bb, 0x11f9: 0x04ba, 0x11fa: 0x04be, 0x11fb: 0x04ae, + 0x11fc: 0x04b2, 0x11fd: 0x0496, 0x11fe: 0x049a, 0x11ff: 0x048e, + // Block 0x48, offset 0x1200 + 0x1200: 0x0492, 0x1201: 0x049e, 0x1202: 0x04a2, 0x1203: 0x04a6, 0x1204: 0x04aa, + 0x1207: 0x0077, 0x1208: 0x007b, 0x1209: 0x4280, 0x120a: 0x4280, 0x120b: 0x4280, + 0x120c: 0x4280, 0x120d: 0x007f, 0x120e: 0x007f, 0x120f: 0x007f, 0x1210: 0x0019, 0x1211: 0x0486, + 0x1212: 0x001d, 0x1214: 0x0037, 0x1215: 0x0035, 0x1216: 0x003f, 0x1217: 0x0003, + 0x1218: 0x0446, 0x1219: 0x0011, 0x121a: 0x0013, 0x121b: 0x00b7, 0x121c: 0x00bb, 0x121d: 0x04ba, + 0x121e: 0x04be, 0x121f: 0x0007, 0x1220: 0x000d, 0x1221: 0x0015, 0x1222: 0x0017, 0x1223: 0x001b, + 0x1224: 0x0039, 0x1225: 0x003d, 0x1226: 0x003b, 0x1228: 0x0079, 0x1229: 0x0009, + 0x122a: 0x000b, 0x122b: 0x0041, + 0x1230: 0x42c1, 0x1231: 0x4443, 0x1232: 0x42c6, 0x1234: 0x42cb, + 0x1236: 0x42d0, 0x1237: 0x4449, 0x1238: 0x42d5, 0x1239: 0x444f, 0x123a: 0x42da, 0x123b: 0x4455, + 0x123c: 0x42df, 0x123d: 0x445b, 0x123e: 0x42e4, 0x123f: 0x4461, + // Block 0x49, offset 0x1240 + 0x1240: 0x0239, 0x1241: 0x4425, 0x1242: 0x4425, 0x1243: 0x442b, 0x1244: 0x442b, 0x1245: 0x446d, + 0x1246: 0x446d, 0x1247: 0x4431, 0x1248: 0x4431, 0x1249: 0x4479, 0x124a: 0x4479, 0x124b: 0x4479, + 0x124c: 0x4479, 0x124d: 0x023c, 0x124e: 0x023c, 0x124f: 0x023f, 0x1250: 0x023f, 0x1251: 0x023f, + 0x1252: 0x023f, 0x1253: 0x0242, 0x1254: 0x0242, 0x1255: 0x0245, 0x1256: 0x0245, 0x1257: 0x0245, + 0x1258: 0x0245, 0x1259: 0x0248, 0x125a: 0x0248, 0x125b: 0x0248, 0x125c: 0x0248, 0x125d: 0x024b, + 0x125e: 0x024b, 0x125f: 0x024b, 0x1260: 0x024b, 0x1261: 0x024e, 0x1262: 0x024e, 0x1263: 0x024e, + 0x1264: 0x024e, 0x1265: 0x0251, 0x1266: 0x0251, 0x1267: 0x0251, 0x1268: 0x0251, 0x1269: 0x0254, + 0x126a: 0x0254, 0x126b: 0x0257, 0x126c: 0x0257, 0x126d: 0x025a, 0x126e: 0x025a, 0x126f: 0x025d, + 0x1270: 0x025d, 0x1271: 0x0260, 0x1272: 0x0260, 0x1273: 0x0260, 0x1274: 0x0260, 0x1275: 0x0263, + 0x1276: 0x0263, 0x1277: 0x0263, 0x1278: 0x0263, 0x1279: 0x0266, 0x127a: 0x0266, 0x127b: 0x0266, + 0x127c: 0x0266, 0x127d: 0x0269, 0x127e: 0x0269, 0x127f: 0x0269, + // Block 0x4a, offset 0x1280 + 0x1280: 0x0269, 0x1281: 0x026c, 0x1282: 0x026c, 0x1283: 0x026c, 0x1284: 0x026c, 0x1285: 0x026f, + 0x1286: 0x026f, 0x1287: 0x026f, 0x1288: 0x026f, 0x1289: 0x0272, 0x128a: 0x0272, 0x128b: 0x0272, + 0x128c: 0x0272, 0x128d: 0x0275, 0x128e: 0x0275, 0x128f: 0x0275, 0x1290: 0x0275, 0x1291: 0x0278, + 0x1292: 0x0278, 0x1293: 0x0278, 0x1294: 0x0278, 0x1295: 0x027b, 0x1296: 0x027b, 0x1297: 0x027b, + 0x1298: 0x027b, 0x1299: 0x027e, 0x129a: 0x027e, 0x129b: 0x027e, 0x129c: 0x027e, 0x129d: 0x0281, + 0x129e: 0x0281, 0x129f: 0x0281, 0x12a0: 0x0281, 0x12a1: 0x0284, 0x12a2: 0x0284, 0x12a3: 0x0284, + 0x12a4: 0x0284, 0x12a5: 0x0287, 0x12a6: 0x0287, 0x12a7: 0x0287, 0x12a8: 0x0287, 0x12a9: 0x028a, + 0x12aa: 0x028a, 0x12ab: 0x028a, 0x12ac: 0x028a, 0x12ad: 0x028d, 0x12ae: 0x028d, 0x12af: 0x0290, + 0x12b0: 0x0290, 0x12b1: 0x0293, 0x12b2: 0x0293, 0x12b3: 0x0293, 0x12b4: 0x0293, 0x12b5: 0x2e17, + 0x12b6: 0x2e17, 0x12b7: 0x2e1f, 0x12b8: 0x2e1f, 0x12b9: 0x2e27, 0x12ba: 0x2e27, 0x12bb: 0x1f88, + 0x12bc: 0x1f88, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x0081, 0x12c1: 0x0083, 0x12c2: 0x0085, 0x12c3: 0x0087, 0x12c4: 0x0089, 0x12c5: 0x008b, + 0x12c6: 0x008d, 0x12c7: 0x008f, 0x12c8: 0x0091, 0x12c9: 0x0093, 0x12ca: 0x0095, 0x12cb: 0x0097, + 0x12cc: 0x0099, 0x12cd: 0x009b, 0x12ce: 0x009d, 0x12cf: 0x009f, 0x12d0: 0x00a1, 0x12d1: 0x00a3, + 0x12d2: 0x00a5, 0x12d3: 0x00a7, 0x12d4: 0x00a9, 0x12d5: 0x00ab, 0x12d6: 0x00ad, 0x12d7: 0x00af, + 0x12d8: 0x00b1, 0x12d9: 0x00b3, 0x12da: 0x00b5, 0x12db: 0x00b7, 0x12dc: 0x00b9, 0x12dd: 0x00bb, + 0x12de: 0x00bd, 0x12df: 0x047a, 0x12e0: 0x047e, 0x12e1: 0x048a, 0x12e2: 0x049e, 0x12e3: 0x04a2, + 0x12e4: 0x0486, 0x12e5: 0x05ae, 0x12e6: 0x05a6, 0x12e7: 0x04ca, 0x12e8: 0x04d2, 0x12e9: 0x04da, + 0x12ea: 0x04e2, 0x12eb: 0x04ea, 0x12ec: 0x056e, 0x12ed: 0x0576, 0x12ee: 0x057e, 0x12ef: 0x0522, + 0x12f0: 0x05b2, 0x12f1: 0x04ce, 0x12f2: 0x04d6, 0x12f3: 0x04de, 0x12f4: 0x04e6, 0x12f5: 0x04ee, + 0x12f6: 0x04f2, 0x12f7: 0x04f6, 0x12f8: 0x04fa, 0x12f9: 0x04fe, 0x12fa: 0x0502, 0x12fb: 0x0506, + 0x12fc: 0x050a, 0x12fd: 0x050e, 0x12fe: 0x0512, 0x12ff: 0x0516, + // Block 0x4c, offset 0x1300 + 0x1300: 0x051a, 0x1301: 0x051e, 0x1302: 0x0526, 0x1303: 0x052a, 0x1304: 0x052e, 0x1305: 0x0532, + 0x1306: 0x0536, 0x1307: 0x053a, 0x1308: 0x053e, 0x1309: 0x0542, 0x130a: 0x0546, 0x130b: 0x054a, + 0x130c: 0x054e, 0x130d: 0x0552, 0x130e: 0x0556, 0x130f: 0x055a, 0x1310: 0x055e, 0x1311: 0x0562, + 0x1312: 0x0566, 0x1313: 0x056a, 0x1314: 0x0572, 0x1315: 0x057a, 0x1316: 0x0582, 0x1317: 0x0586, + 0x1318: 0x058a, 0x1319: 0x058e, 0x131a: 0x0592, 0x131b: 0x0596, 0x131c: 0x059a, 0x131d: 0x05aa, + 0x131e: 0x4a8f, 0x131f: 0x4a95, 0x1320: 0x03c6, 0x1321: 0x0316, 0x1322: 0x031a, 0x1323: 0x4a52, + 0x1324: 0x031e, 0x1325: 0x4a58, 0x1326: 0x4a5e, 0x1327: 0x0322, 0x1328: 0x0326, 0x1329: 0x032a, + 0x132a: 0x4a64, 0x132b: 0x4a6a, 0x132c: 0x4a70, 0x132d: 0x4a76, 0x132e: 0x4a7c, 0x132f: 0x4a82, + 0x1330: 0x036a, 0x1331: 0x032e, 0x1332: 0x0332, 0x1333: 0x0336, 0x1334: 0x037e, 0x1335: 0x033a, + 0x1336: 0x033e, 0x1337: 0x0342, 0x1338: 0x0346, 0x1339: 0x034a, 0x133a: 0x034e, 0x133b: 0x0352, + 0x133c: 0x0356, 0x133d: 0x035a, 0x133e: 0x035e, + // Block 0x4d, offset 0x1340 + 0x1342: 0x49d4, 0x1343: 0x49da, 0x1344: 0x49e0, 0x1345: 0x49e6, + 0x1346: 0x49ec, 0x1347: 0x49f2, 0x134a: 0x49f8, 0x134b: 0x49fe, + 0x134c: 0x4a04, 0x134d: 0x4a0a, 0x134e: 0x4a10, 0x134f: 0x4a16, + 0x1352: 0x4a1c, 0x1353: 0x4a22, 0x1354: 0x4a28, 0x1355: 0x4a2e, 0x1356: 0x4a34, 0x1357: 0x4a3a, + 0x135a: 0x4a40, 0x135b: 0x4a46, 0x135c: 0x4a4c, + 0x1360: 0x00bf, 0x1361: 0x00c2, 0x1362: 0x00cb, 0x1363: 0x427b, + 0x1364: 0x00c8, 0x1365: 0x00c5, 0x1366: 0x044a, 0x1368: 0x046e, 0x1369: 0x044e, + 0x136a: 0x0452, 0x136b: 0x0456, 0x136c: 0x045a, 0x136d: 0x0472, 0x136e: 0x0476, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0063, 0x1381: 0x0065, 0x1382: 0x0067, 0x1383: 0x0069, 0x1384: 0x006b, 0x1385: 0x006d, + 0x1386: 0x006f, 0x1387: 0x0071, 0x1388: 0x0073, 0x1389: 0x0075, 0x138a: 0x0083, 0x138b: 0x0085, + 0x138c: 0x0087, 0x138d: 0x0089, 0x138e: 0x008b, 0x138f: 0x008d, 0x1390: 0x008f, 0x1391: 0x0091, + 0x1392: 0x0093, 0x1393: 0x0095, 0x1394: 0x0097, 0x1395: 0x0099, 0x1396: 0x009b, 0x1397: 0x009d, + 0x1398: 0x009f, 0x1399: 0x00a1, 0x139a: 0x00a3, 0x139b: 0x00a5, 0x139c: 0x00a7, 0x139d: 0x00a9, + 0x139e: 0x00ab, 0x139f: 0x00ad, 0x13a0: 0x00af, 0x13a1: 0x00b1, 0x13a2: 0x00b3, 0x13a3: 0x00b5, + 0x13a4: 0x00dd, 0x13a5: 0x00f2, 0x13a8: 0x0176, 0x13a9: 0x0179, + 0x13aa: 0x017c, 0x13ab: 0x017f, 0x13ac: 0x0182, 0x13ad: 0x0185, 0x13ae: 0x0188, 0x13af: 0x018b, + 0x13b0: 0x018e, 0x13b1: 0x0191, 0x13b2: 0x0194, 0x13b3: 0x0197, 0x13b4: 0x019a, 0x13b5: 0x019d, + 0x13b6: 0x01a0, 0x13b7: 0x01a3, 0x13b8: 0x01a6, 0x13b9: 0x018b, 0x13ba: 0x01a9, 0x13bb: 0x01ac, + 0x13bc: 0x01af, 0x13bd: 0x01b2, 0x13be: 0x01b5, 0x13bf: 0x01b8, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x0200, 0x13c1: 0x0203, 0x13c2: 0x0206, 0x13c3: 0x045e, 0x13c4: 0x01ca, 0x13c5: 0x01d3, + 0x13c6: 0x01d9, 0x13c7: 0x01fd, 0x13c8: 0x01ee, 0x13c9: 0x01eb, 0x13ca: 0x0209, 0x13cb: 0x020c, + 0x13ce: 0x0021, 0x13cf: 0x0023, 0x13d0: 0x0025, 0x13d1: 0x0027, + 0x13d2: 0x0029, 0x13d3: 0x002b, 0x13d4: 0x002d, 0x13d5: 0x002f, 0x13d6: 0x0031, 0x13d7: 0x0033, + 0x13d8: 0x0021, 0x13d9: 0x0023, 0x13da: 0x0025, 0x13db: 0x0027, 0x13dc: 0x0029, 0x13dd: 0x002b, + 0x13de: 0x002d, 0x13df: 0x002f, 0x13e0: 0x0031, 0x13e1: 0x0033, 0x13e2: 0x0021, 0x13e3: 0x0023, + 0x13e4: 0x0025, 0x13e5: 0x0027, 0x13e6: 0x0029, 0x13e7: 0x002b, 0x13e8: 0x002d, 0x13e9: 0x002f, + 0x13ea: 0x0031, 0x13eb: 0x0033, 0x13ec: 0x0021, 0x13ed: 0x0023, 0x13ee: 0x0025, 0x13ef: 0x0027, + 0x13f0: 0x0029, 0x13f1: 0x002b, 0x13f2: 0x002d, 0x13f3: 0x002f, 0x13f4: 0x0031, 0x13f5: 0x0033, + 0x13f6: 0x0021, 0x13f7: 0x0023, 0x13f8: 0x0025, 0x13f9: 0x0027, 0x13fa: 0x0029, 0x13fb: 0x002b, + 0x13fc: 0x002d, 0x13fd: 0x002f, 0x13fe: 0x0031, 0x13ff: 0x0033, + // Block 0x50, offset 0x1400 + 0x1400: 0x023c, 0x1401: 0x023f, 0x1402: 0x024b, 0x1403: 0x0254, 0x1405: 0x028d, + 0x1406: 0x025d, 0x1407: 0x024e, 0x1408: 0x026c, 0x1409: 0x0293, 0x140a: 0x027e, 0x140b: 0x0281, + 0x140c: 0x0284, 0x140d: 0x0287, 0x140e: 0x0260, 0x140f: 0x0272, 0x1410: 0x0278, 0x1411: 0x0266, + 0x1412: 0x027b, 0x1413: 0x025a, 0x1414: 0x0263, 0x1415: 0x0245, 0x1416: 0x0248, 0x1417: 0x0251, + 0x1418: 0x0257, 0x1419: 0x0269, 0x141a: 0x026f, 0x141b: 0x0275, 0x141c: 0x0296, 0x141d: 0x02e7, + 0x141e: 0x02cf, 0x141f: 0x0299, 0x1421: 0x023f, 0x1422: 0x024b, + 0x1424: 0x028a, 0x1427: 0x024e, 0x1429: 0x0293, + 0x142a: 0x027e, 0x142b: 0x0281, 0x142c: 0x0284, 0x142d: 0x0287, 0x142e: 0x0260, 0x142f: 0x0272, + 0x1430: 0x0278, 0x1431: 0x0266, 0x1432: 0x027b, 0x1434: 0x0263, 0x1435: 0x0245, + 0x1436: 0x0248, 0x1437: 0x0251, 0x1439: 0x0269, 0x143b: 0x0275, + // Block 0x51, offset 0x1440 + 0x1442: 0x024b, + 0x1447: 0x024e, 0x1449: 0x0293, 0x144b: 0x0281, + 0x144d: 0x0287, 0x144e: 0x0260, 0x144f: 0x0272, 0x1451: 0x0266, + 0x1452: 0x027b, 0x1454: 0x0263, 0x1457: 0x0251, + 0x1459: 0x0269, 0x145b: 0x0275, 0x145d: 0x02e7, + 0x145f: 0x0299, 0x1461: 0x023f, 0x1462: 0x024b, + 0x1464: 0x028a, 0x1467: 0x024e, 0x1468: 0x026c, 0x1469: 0x0293, + 0x146a: 0x027e, 0x146c: 0x0284, 0x146d: 0x0287, 0x146e: 0x0260, 0x146f: 0x0272, + 0x1470: 0x0278, 0x1471: 0x0266, 0x1472: 0x027b, 0x1474: 0x0263, 0x1475: 0x0245, + 0x1476: 0x0248, 0x1477: 0x0251, 0x1479: 0x0269, 0x147a: 0x026f, 0x147b: 0x0275, + 0x147c: 0x0296, 0x147e: 0x02cf, + // Block 0x52, offset 0x1480 + 0x1480: 0x023c, 0x1481: 0x023f, 0x1482: 0x024b, 0x1483: 0x0254, 0x1484: 0x028a, 0x1485: 0x028d, + 0x1486: 0x025d, 0x1487: 0x024e, 0x1488: 0x026c, 0x1489: 0x0293, 0x148b: 0x0281, + 0x148c: 0x0284, 0x148d: 0x0287, 0x148e: 0x0260, 0x148f: 0x0272, 0x1490: 0x0278, 0x1491: 0x0266, + 0x1492: 0x027b, 0x1493: 0x025a, 0x1494: 0x0263, 0x1495: 0x0245, 0x1496: 0x0248, 0x1497: 0x0251, + 0x1498: 0x0257, 0x1499: 0x0269, 0x149a: 0x026f, 0x149b: 0x0275, + 0x14a1: 0x023f, 0x14a2: 0x024b, 0x14a3: 0x0254, + 0x14a5: 0x028d, 0x14a6: 0x025d, 0x14a7: 0x024e, 0x14a8: 0x026c, 0x14a9: 0x0293, + 0x14ab: 0x0281, 0x14ac: 0x0284, 0x14ad: 0x0287, 0x14ae: 0x0260, 0x14af: 0x0272, + 0x14b0: 0x0278, 0x14b1: 0x0266, 0x14b2: 0x027b, 0x14b3: 0x025a, 0x14b4: 0x0263, 0x14b5: 0x0245, + 0x14b6: 0x0248, 0x14b7: 0x0251, 0x14b8: 0x0257, 0x14b9: 0x0269, 0x14ba: 0x026f, 0x14bb: 0x0275, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x187c, 0x14c1: 0x1879, 0x14c2: 0x187f, 0x14c3: 0x18a3, 0x14c4: 0x18c7, 0x14c5: 0x18eb, + 0x14c6: 0x190f, 0x14c7: 0x1918, 0x14c8: 0x191e, 0x14c9: 0x1924, 0x14ca: 0x192a, + 0x14d0: 0x1a92, 0x14d1: 0x1a96, + 0x14d2: 0x1a9a, 0x14d3: 0x1a9e, 0x14d4: 0x1aa2, 0x14d5: 0x1aa6, 0x14d6: 0x1aaa, 0x14d7: 0x1aae, + 0x14d8: 0x1ab2, 0x14d9: 0x1ab6, 0x14da: 0x1aba, 0x14db: 0x1abe, 0x14dc: 0x1ac2, 0x14dd: 0x1ac6, + 0x14de: 0x1aca, 0x14df: 0x1ace, 0x14e0: 0x1ad2, 0x14e1: 0x1ad6, 0x14e2: 0x1ada, 0x14e3: 0x1ade, + 0x14e4: 0x1ae2, 0x14e5: 0x1ae6, 0x14e6: 0x1aea, 0x14e7: 0x1aee, 0x14e8: 0x1af2, 0x14e9: 0x1af6, + 0x14ea: 0x272b, 0x14eb: 0x0047, 0x14ec: 0x0065, 0x14ed: 0x193f, 0x14ee: 0x19b7, + 0x14f0: 0x0043, 0x14f1: 0x0045, 0x14f2: 0x0047, 0x14f3: 0x0049, 0x14f4: 0x004b, 0x14f5: 0x004d, + 0x14f6: 0x004f, 0x14f7: 0x0051, 0x14f8: 0x0053, 0x14f9: 0x0055, 0x14fa: 0x0057, 0x14fb: 0x0059, + 0x14fc: 0x005b, 0x14fd: 0x005d, 0x14fe: 0x005f, 0x14ff: 0x0061, + // Block 0x54, offset 0x1500 + 0x1500: 0x26b3, 0x1501: 0x26c8, 0x1502: 0x0506, + 0x1510: 0x0c12, 0x1511: 0x0a4a, + 0x1512: 0x08d6, 0x1513: 0x45db, 0x1514: 0x071e, 0x1515: 0x09f2, 0x1516: 0x1332, 0x1517: 0x0a02, + 0x1518: 0x072a, 0x1519: 0x0cda, 0x151a: 0x0eb2, 0x151b: 0x0cb2, 0x151c: 0x082a, 0x151d: 0x0b6e, + 0x151e: 0x07c2, 0x151f: 0x0cba, 0x1520: 0x0816, 0x1521: 0x111a, 0x1522: 0x0f86, 0x1523: 0x138e, + 0x1524: 0x09d6, 0x1525: 0x090e, 0x1526: 0x0e66, 0x1527: 0x0c1e, 0x1528: 0x0c4a, 0x1529: 0x06c2, + 0x152a: 0x06ce, 0x152b: 0x140e, 0x152c: 0x0ade, 0x152d: 0x06ea, 0x152e: 0x08f2, 0x152f: 0x0c3e, + 0x1530: 0x13b6, 0x1531: 0x0c16, 0x1532: 0x1072, 0x1533: 0x10ae, 0x1534: 0x08fa, 0x1535: 0x0e46, + 0x1536: 0x0d0e, 0x1537: 0x0d0a, 0x1538: 0x0f9a, 0x1539: 0x082e, 0x153a: 0x095a, 0x153b: 0x1446, + // Block 0x55, offset 0x1540 + 0x1540: 0x06fe, 0x1541: 0x06f6, 0x1542: 0x0706, 0x1543: 0x164a, 0x1544: 0x074a, 0x1545: 0x075a, + 0x1546: 0x075e, 0x1547: 0x0766, 0x1548: 0x076e, 0x1549: 0x0772, 0x154a: 0x077e, 0x154b: 0x0776, + 0x154c: 0x05b6, 0x154d: 0x165e, 0x154e: 0x0792, 0x154f: 0x0796, 0x1550: 0x079a, 0x1551: 0x07b6, + 0x1552: 0x164f, 0x1553: 0x05ba, 0x1554: 0x07a2, 0x1555: 0x07c2, 0x1556: 0x1659, 0x1557: 0x07d2, + 0x1558: 0x07da, 0x1559: 0x073a, 0x155a: 0x07e2, 0x155b: 0x07e6, 0x155c: 0x1834, 0x155d: 0x0802, + 0x155e: 0x080a, 0x155f: 0x05c2, 0x1560: 0x0822, 0x1561: 0x0826, 0x1562: 0x082e, 0x1563: 0x0832, + 0x1564: 0x05c6, 0x1565: 0x084a, 0x1566: 0x084e, 0x1567: 0x085a, 0x1568: 0x0866, 0x1569: 0x086a, + 0x156a: 0x086e, 0x156b: 0x0876, 0x156c: 0x0896, 0x156d: 0x089a, 0x156e: 0x08a2, 0x156f: 0x08b2, + 0x1570: 0x08ba, 0x1571: 0x08be, 0x1572: 0x08be, 0x1573: 0x08be, 0x1574: 0x166d, 0x1575: 0x0e96, + 0x1576: 0x08d2, 0x1577: 0x08da, 0x1578: 0x1672, 0x1579: 0x08e6, 0x157a: 0x08ee, 0x157b: 0x08f6, + 0x157c: 0x091e, 0x157d: 0x090a, 0x157e: 0x0916, 0x157f: 0x091a, + // Block 0x56, offset 0x1580 + 0x1580: 0x0922, 0x1581: 0x092a, 0x1582: 0x092e, 0x1583: 0x0936, 0x1584: 0x093e, 0x1585: 0x0942, + 0x1586: 0x0942, 0x1587: 0x094a, 0x1588: 0x0952, 0x1589: 0x0956, 0x158a: 0x0962, 0x158b: 0x0986, + 0x158c: 0x096a, 0x158d: 0x098a, 0x158e: 0x096e, 0x158f: 0x0976, 0x1590: 0x080e, 0x1591: 0x09d2, + 0x1592: 0x099a, 0x1593: 0x099e, 0x1594: 0x09a2, 0x1595: 0x0996, 0x1596: 0x09aa, 0x1597: 0x09a6, + 0x1598: 0x09be, 0x1599: 0x1677, 0x159a: 0x09da, 0x159b: 0x09de, 0x159c: 0x09e6, 0x159d: 0x09f2, + 0x159e: 0x09fa, 0x159f: 0x0a16, 0x15a0: 0x167c, 0x15a1: 0x1681, 0x15a2: 0x0a22, 0x15a3: 0x0a26, + 0x15a4: 0x0a2a, 0x15a5: 0x0a1e, 0x15a6: 0x0a32, 0x15a7: 0x05ca, 0x15a8: 0x05ce, 0x15a9: 0x0a3a, + 0x15aa: 0x0a42, 0x15ab: 0x0a42, 0x15ac: 0x1686, 0x15ad: 0x0a5e, 0x15ae: 0x0a62, 0x15af: 0x0a66, + 0x15b0: 0x0a6e, 0x15b1: 0x168b, 0x15b2: 0x0a76, 0x15b3: 0x0a7a, 0x15b4: 0x0b52, 0x15b5: 0x0a82, + 0x15b6: 0x05d2, 0x15b7: 0x0a8e, 0x15b8: 0x0a9e, 0x15b9: 0x0aaa, 0x15ba: 0x0aa6, 0x15bb: 0x1695, + 0x15bc: 0x0ab2, 0x15bd: 0x169a, 0x15be: 0x0abe, 0x15bf: 0x0aba, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x0ac2, 0x15c1: 0x0ad2, 0x15c2: 0x0ad6, 0x15c3: 0x05d6, 0x15c4: 0x0ae6, 0x15c5: 0x0aee, + 0x15c6: 0x0af2, 0x15c7: 0x0af6, 0x15c8: 0x05da, 0x15c9: 0x169f, 0x15ca: 0x05de, 0x15cb: 0x0b12, + 0x15cc: 0x0b16, 0x15cd: 0x0b1a, 0x15ce: 0x0b22, 0x15cf: 0x1866, 0x15d0: 0x0b3a, 0x15d1: 0x16a9, + 0x15d2: 0x16a9, 0x15d3: 0x11da, 0x15d4: 0x0b4a, 0x15d5: 0x0b4a, 0x15d6: 0x05e2, 0x15d7: 0x16cc, + 0x15d8: 0x179e, 0x15d9: 0x0b5a, 0x15da: 0x0b62, 0x15db: 0x05e6, 0x15dc: 0x0b76, 0x15dd: 0x0b86, + 0x15de: 0x0b8a, 0x15df: 0x0b92, 0x15e0: 0x0ba2, 0x15e1: 0x05ee, 0x15e2: 0x05ea, 0x15e3: 0x0ba6, + 0x15e4: 0x16ae, 0x15e5: 0x0baa, 0x15e6: 0x0bbe, 0x15e7: 0x0bc2, 0x15e8: 0x0bc6, 0x15e9: 0x0bc2, + 0x15ea: 0x0bd2, 0x15eb: 0x0bd6, 0x15ec: 0x0be6, 0x15ed: 0x0bde, 0x15ee: 0x0be2, 0x15ef: 0x0bea, + 0x15f0: 0x0bee, 0x15f1: 0x0bf2, 0x15f2: 0x0bfe, 0x15f3: 0x0c02, 0x15f4: 0x0c1a, 0x15f5: 0x0c22, + 0x15f6: 0x0c32, 0x15f7: 0x0c46, 0x15f8: 0x16bd, 0x15f9: 0x0c42, 0x15fa: 0x0c36, 0x15fb: 0x0c4e, + 0x15fc: 0x0c56, 0x15fd: 0x0c6a, 0x15fe: 0x16c2, 0x15ff: 0x0c72, + // Block 0x58, offset 0x1600 + 0x1600: 0x0c66, 0x1601: 0x0c5e, 0x1602: 0x05f2, 0x1603: 0x0c7a, 0x1604: 0x0c82, 0x1605: 0x0c8a, + 0x1606: 0x0c7e, 0x1607: 0x05f6, 0x1608: 0x0c9a, 0x1609: 0x0ca2, 0x160a: 0x16c7, 0x160b: 0x0cce, + 0x160c: 0x0d02, 0x160d: 0x0cde, 0x160e: 0x0602, 0x160f: 0x0cea, 0x1610: 0x05fe, 0x1611: 0x05fa, + 0x1612: 0x07c6, 0x1613: 0x07ca, 0x1614: 0x0d06, 0x1615: 0x0cee, 0x1616: 0x11ae, 0x1617: 0x0666, + 0x1618: 0x0d12, 0x1619: 0x0d16, 0x161a: 0x0d1a, 0x161b: 0x0d2e, 0x161c: 0x0d26, 0x161d: 0x16e0, + 0x161e: 0x0606, 0x161f: 0x0d42, 0x1620: 0x0d36, 0x1621: 0x0d52, 0x1622: 0x0d5a, 0x1623: 0x16ea, + 0x1624: 0x0d5e, 0x1625: 0x0d4a, 0x1626: 0x0d66, 0x1627: 0x060a, 0x1628: 0x0d6a, 0x1629: 0x0d6e, + 0x162a: 0x0d72, 0x162b: 0x0d7e, 0x162c: 0x16ef, 0x162d: 0x0d86, 0x162e: 0x060e, 0x162f: 0x0d92, + 0x1630: 0x16f4, 0x1631: 0x0d96, 0x1632: 0x0612, 0x1633: 0x0da2, 0x1634: 0x0dae, 0x1635: 0x0dba, + 0x1636: 0x0dbe, 0x1637: 0x16f9, 0x1638: 0x1690, 0x1639: 0x16fe, 0x163a: 0x0dde, 0x163b: 0x1703, + 0x163c: 0x0dea, 0x163d: 0x0df2, 0x163e: 0x0de2, 0x163f: 0x0dfe, + // Block 0x59, offset 0x1640 + 0x1640: 0x0e0e, 0x1641: 0x0e1e, 0x1642: 0x0e12, 0x1643: 0x0e16, 0x1644: 0x0e22, 0x1645: 0x0e26, + 0x1646: 0x1708, 0x1647: 0x0e0a, 0x1648: 0x0e3e, 0x1649: 0x0e42, 0x164a: 0x0616, 0x164b: 0x0e56, + 0x164c: 0x0e52, 0x164d: 0x170d, 0x164e: 0x0e36, 0x164f: 0x0e72, 0x1650: 0x1712, 0x1651: 0x1717, + 0x1652: 0x0e76, 0x1653: 0x0e8a, 0x1654: 0x0e86, 0x1655: 0x0e82, 0x1656: 0x061a, 0x1657: 0x0e8e, + 0x1658: 0x0e9e, 0x1659: 0x0e9a, 0x165a: 0x0ea6, 0x165b: 0x1654, 0x165c: 0x0eb6, 0x165d: 0x171c, + 0x165e: 0x0ec2, 0x165f: 0x1726, 0x1660: 0x0ed6, 0x1661: 0x0ee2, 0x1662: 0x0ef6, 0x1663: 0x172b, + 0x1664: 0x0f0a, 0x1665: 0x0f0e, 0x1666: 0x1730, 0x1667: 0x1735, 0x1668: 0x0f2a, 0x1669: 0x0f3a, + 0x166a: 0x061e, 0x166b: 0x0f3e, 0x166c: 0x0622, 0x166d: 0x0622, 0x166e: 0x0f56, 0x166f: 0x0f5a, + 0x1670: 0x0f62, 0x1671: 0x0f66, 0x1672: 0x0f72, 0x1673: 0x0626, 0x1674: 0x0f8a, 0x1675: 0x173a, + 0x1676: 0x0fa6, 0x1677: 0x173f, 0x1678: 0x0fb2, 0x1679: 0x16a4, 0x167a: 0x0fc2, 0x167b: 0x1744, + 0x167c: 0x1749, 0x167d: 0x174e, 0x167e: 0x062a, 0x167f: 0x062e, + // Block 0x5a, offset 0x1680 + 0x1680: 0x0ffa, 0x1681: 0x1758, 0x1682: 0x1753, 0x1683: 0x175d, 0x1684: 0x1762, 0x1685: 0x1002, + 0x1686: 0x1006, 0x1687: 0x1006, 0x1688: 0x100e, 0x1689: 0x0636, 0x168a: 0x1012, 0x168b: 0x063a, + 0x168c: 0x063e, 0x168d: 0x176c, 0x168e: 0x1026, 0x168f: 0x102e, 0x1690: 0x103a, 0x1691: 0x0642, + 0x1692: 0x1771, 0x1693: 0x105e, 0x1694: 0x1776, 0x1695: 0x177b, 0x1696: 0x107e, 0x1697: 0x1096, + 0x1698: 0x0646, 0x1699: 0x109e, 0x169a: 0x10a2, 0x169b: 0x10a6, 0x169c: 0x1780, 0x169d: 0x1785, + 0x169e: 0x1785, 0x169f: 0x10be, 0x16a0: 0x064a, 0x16a1: 0x178a, 0x16a2: 0x10d2, 0x16a3: 0x10d6, + 0x16a4: 0x064e, 0x16a5: 0x178f, 0x16a6: 0x10f2, 0x16a7: 0x0652, 0x16a8: 0x1102, 0x16a9: 0x10fa, + 0x16aa: 0x110a, 0x16ab: 0x1799, 0x16ac: 0x1122, 0x16ad: 0x0656, 0x16ae: 0x112e, 0x16af: 0x1136, + 0x16b0: 0x1146, 0x16b1: 0x065a, 0x16b2: 0x17a3, 0x16b3: 0x17a8, 0x16b4: 0x065e, 0x16b5: 0x17ad, + 0x16b6: 0x115e, 0x16b7: 0x17b2, 0x16b8: 0x116a, 0x16b9: 0x1176, 0x16ba: 0x117e, 0x16bb: 0x17b7, + 0x16bc: 0x17bc, 0x16bd: 0x1192, 0x16be: 0x17c1, 0x16bf: 0x119a, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x16d1, 0x16c1: 0x0662, 0x16c2: 0x11b2, 0x16c3: 0x11b6, 0x16c4: 0x066a, 0x16c5: 0x11ba, + 0x16c6: 0x0a36, 0x16c7: 0x17c6, 0x16c8: 0x17cb, 0x16c9: 0x16d6, 0x16ca: 0x16db, 0x16cb: 0x11da, + 0x16cc: 0x11de, 0x16cd: 0x13f6, 0x16ce: 0x066e, 0x16cf: 0x120a, 0x16d0: 0x1206, 0x16d1: 0x120e, + 0x16d2: 0x0842, 0x16d3: 0x1212, 0x16d4: 0x1216, 0x16d5: 0x121a, 0x16d6: 0x1222, 0x16d7: 0x17d0, + 0x16d8: 0x121e, 0x16d9: 0x1226, 0x16da: 0x123a, 0x16db: 0x123e, 0x16dc: 0x122a, 0x16dd: 0x1242, + 0x16de: 0x1256, 0x16df: 0x126a, 0x16e0: 0x1236, 0x16e1: 0x124a, 0x16e2: 0x124e, 0x16e3: 0x1252, + 0x16e4: 0x17d5, 0x16e5: 0x17df, 0x16e6: 0x17da, 0x16e7: 0x0672, 0x16e8: 0x1272, 0x16e9: 0x1276, + 0x16ea: 0x127e, 0x16eb: 0x17f3, 0x16ec: 0x1282, 0x16ed: 0x17e4, 0x16ee: 0x0676, 0x16ef: 0x067a, + 0x16f0: 0x17e9, 0x16f1: 0x17ee, 0x16f2: 0x067e, 0x16f3: 0x12a2, 0x16f4: 0x12a6, 0x16f5: 0x12aa, + 0x16f6: 0x12ae, 0x16f7: 0x12ba, 0x16f8: 0x12b6, 0x16f9: 0x12c2, 0x16fa: 0x12be, 0x16fb: 0x12ce, + 0x16fc: 0x12c6, 0x16fd: 0x12ca, 0x16fe: 0x12d2, 0x16ff: 0x0682, + // Block 0x5c, offset 0x1700 + 0x1700: 0x12da, 0x1701: 0x12de, 0x1702: 0x0686, 0x1703: 0x12ee, 0x1704: 0x12f2, 0x1705: 0x17f8, + 0x1706: 0x12fe, 0x1707: 0x1302, 0x1708: 0x068a, 0x1709: 0x130e, 0x170a: 0x05be, 0x170b: 0x17fd, + 0x170c: 0x1802, 0x170d: 0x068e, 0x170e: 0x0692, 0x170f: 0x133a, 0x1710: 0x1352, 0x1711: 0x136e, + 0x1712: 0x137e, 0x1713: 0x1807, 0x1714: 0x1392, 0x1715: 0x1396, 0x1716: 0x13ae, 0x1717: 0x13ba, + 0x1718: 0x1811, 0x1719: 0x1663, 0x171a: 0x13c6, 0x171b: 0x13c2, 0x171c: 0x13ce, 0x171d: 0x1668, + 0x171e: 0x13da, 0x171f: 0x13e6, 0x1720: 0x1816, 0x1721: 0x181b, 0x1722: 0x1426, 0x1723: 0x1432, + 0x1724: 0x143a, 0x1725: 0x1820, 0x1726: 0x143e, 0x1727: 0x146a, 0x1728: 0x1476, 0x1729: 0x147a, + 0x172a: 0x1472, 0x172b: 0x1486, 0x172c: 0x148a, 0x172d: 0x1825, 0x172e: 0x1496, 0x172f: 0x0696, + 0x1730: 0x149e, 0x1731: 0x182a, 0x1732: 0x069a, 0x1733: 0x14d6, 0x1734: 0x0ac6, 0x1735: 0x14ee, + 0x1736: 0x182f, 0x1737: 0x1839, 0x1738: 0x069e, 0x1739: 0x06a2, 0x173a: 0x1516, 0x173b: 0x183e, + 0x173c: 0x06a6, 0x173d: 0x1843, 0x173e: 0x152e, 0x173f: 0x152e, + // Block 0x5d, offset 0x1740 + 0x1740: 0x1536, 0x1741: 0x1848, 0x1742: 0x154e, 0x1743: 0x06aa, 0x1744: 0x155e, 0x1745: 0x156a, + 0x1746: 0x1572, 0x1747: 0x157a, 0x1748: 0x06ae, 0x1749: 0x184d, 0x174a: 0x158e, 0x174b: 0x15aa, + 0x174c: 0x15b6, 0x174d: 0x06b2, 0x174e: 0x06b6, 0x174f: 0x15ba, 0x1750: 0x1852, 0x1751: 0x06ba, + 0x1752: 0x1857, 0x1753: 0x185c, 0x1754: 0x1861, 0x1755: 0x15de, 0x1756: 0x06be, 0x1757: 0x15f2, + 0x1758: 0x15fa, 0x1759: 0x15fe, 0x175a: 0x1606, 0x175b: 0x160e, 0x175c: 0x1616, 0x175d: 0x186b, +} + +// nfkcIndex: 22 blocks, 1408 entries, 2816 bytes +// Block 0 is the zero block. +var nfkcIndex = [1408]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x5c, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x5d, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x5e, 0xcb: 0x5f, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x09, + 0xd0: 0x0a, 0xd1: 0x60, 0xd2: 0x61, 0xd3: 0x0b, 0xd6: 0x0c, 0xd7: 0x62, + 0xd8: 0x63, 0xd9: 0x0d, 0xdb: 0x64, 0xdc: 0x65, 0xdd: 0x66, 0xdf: 0x67, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x68, 0x121: 0x69, 0x123: 0x0e, 0x124: 0x6a, 0x125: 0x6b, 0x126: 0x6c, 0x127: 0x6d, + 0x128: 0x6e, 0x129: 0x6f, 0x12a: 0x70, 0x12b: 0x71, 0x12c: 0x6c, 0x12d: 0x72, 0x12e: 0x73, 0x12f: 0x74, + 0x131: 0x75, 0x132: 0x76, 0x133: 0x77, 0x134: 0x78, 0x135: 0x79, 0x137: 0x7a, + 0x138: 0x7b, 0x139: 0x7c, 0x13a: 0x7d, 0x13b: 0x7e, 0x13c: 0x7f, 0x13d: 0x80, 0x13e: 0x81, 0x13f: 0x82, + // Block 0x5, offset 0x140 + 0x140: 0x83, 0x142: 0x84, 0x143: 0x85, 0x144: 0x86, 0x145: 0x87, 0x146: 0x88, 0x147: 0x89, + 0x14d: 0x8a, + 0x15c: 0x8b, 0x15f: 0x8c, + 0x162: 0x8d, 0x164: 0x8e, + 0x168: 0x8f, 0x169: 0x90, 0x16a: 0x91, 0x16b: 0x92, 0x16c: 0x0f, 0x16d: 0x93, 0x16e: 0x94, 0x16f: 0x95, + 0x170: 0x96, 0x173: 0x97, 0x174: 0x98, 0x175: 0x10, 0x176: 0x11, 0x177: 0x12, + 0x178: 0x13, 0x179: 0x14, 0x17a: 0x15, 0x17b: 0x16, 0x17c: 0x17, 0x17d: 0x18, 0x17e: 0x19, 0x17f: 0x1a, + // Block 0x6, offset 0x180 + 0x180: 0x99, 0x181: 0x9a, 0x182: 0x9b, 0x183: 0x9c, 0x184: 0x1b, 0x185: 0x1c, 0x186: 0x9d, 0x187: 0x9e, + 0x188: 0x9f, 0x189: 0x1d, 0x18a: 0x1e, 0x18b: 0xa0, 0x18c: 0xa1, + 0x191: 0x1f, 0x192: 0x20, 0x193: 0xa2, + 0x1a8: 0xa3, 0x1a9: 0xa4, 0x1ab: 0xa5, + 0x1b1: 0xa6, 0x1b3: 0xa7, 0x1b5: 0xa8, 0x1b7: 0xa9, + 0x1ba: 0xaa, 0x1bb: 0xab, 0x1bc: 0x21, 0x1bd: 0x22, 0x1be: 0x23, 0x1bf: 0xac, + // Block 0x7, offset 0x1c0 + 0x1c0: 0xad, 0x1c1: 0x24, 0x1c2: 0x25, 0x1c3: 0x26, 0x1c4: 0xae, 0x1c5: 0x27, 0x1c6: 0x28, + 0x1c8: 0x29, 0x1c9: 0x2a, 0x1ca: 0x2b, 0x1cb: 0x2c, 0x1cc: 0x2d, 0x1cd: 0x2e, 0x1ce: 0x2f, 0x1cf: 0x30, + // Block 0x8, offset 0x200 + 0x219: 0xaf, 0x21a: 0xb0, 0x21b: 0xb1, 0x21d: 0xb2, 0x21f: 0xb3, + 0x220: 0xb4, 0x223: 0xb5, 0x224: 0xb6, 0x225: 0xb7, 0x226: 0xb8, 0x227: 0xb9, + 0x22a: 0xba, 0x22b: 0xbb, 0x22d: 0xbc, 0x22f: 0xbd, + 0x230: 0xbe, 0x231: 0xbf, 0x232: 0xc0, 0x233: 0xc1, 0x234: 0xc2, 0x235: 0xc3, 0x236: 0xc4, 0x237: 0xbe, + 0x238: 0xbf, 0x239: 0xc0, 0x23a: 0xc1, 0x23b: 0xc2, 0x23c: 0xc3, 0x23d: 0xc4, 0x23e: 0xbe, 0x23f: 0xbf, + // Block 0x9, offset 0x240 + 0x240: 0xc0, 0x241: 0xc1, 0x242: 0xc2, 0x243: 0xc3, 0x244: 0xc4, 0x245: 0xbe, 0x246: 0xbf, 0x247: 0xc0, + 0x248: 0xc1, 0x249: 0xc2, 0x24a: 0xc3, 0x24b: 0xc4, 0x24c: 0xbe, 0x24d: 0xbf, 0x24e: 0xc0, 0x24f: 0xc1, + 0x250: 0xc2, 0x251: 0xc3, 0x252: 0xc4, 0x253: 0xbe, 0x254: 0xbf, 0x255: 0xc0, 0x256: 0xc1, 0x257: 0xc2, + 0x258: 0xc3, 0x259: 0xc4, 0x25a: 0xbe, 0x25b: 0xbf, 0x25c: 0xc0, 0x25d: 0xc1, 0x25e: 0xc2, 0x25f: 0xc3, + 0x260: 0xc4, 0x261: 0xbe, 0x262: 0xbf, 0x263: 0xc0, 0x264: 0xc1, 0x265: 0xc2, 0x266: 0xc3, 0x267: 0xc4, + 0x268: 0xbe, 0x269: 0xbf, 0x26a: 0xc0, 0x26b: 0xc1, 0x26c: 0xc2, 0x26d: 0xc3, 0x26e: 0xc4, 0x26f: 0xbe, + 0x270: 0xbf, 0x271: 0xc0, 0x272: 0xc1, 0x273: 0xc2, 0x274: 0xc3, 0x275: 0xc4, 0x276: 0xbe, 0x277: 0xbf, + 0x278: 0xc0, 0x279: 0xc1, 0x27a: 0xc2, 0x27b: 0xc3, 0x27c: 0xc4, 0x27d: 0xbe, 0x27e: 0xbf, 0x27f: 0xc0, + // Block 0xa, offset 0x280 + 0x280: 0xc1, 0x281: 0xc2, 0x282: 0xc3, 0x283: 0xc4, 0x284: 0xbe, 0x285: 0xbf, 0x286: 0xc0, 0x287: 0xc1, + 0x288: 0xc2, 0x289: 0xc3, 0x28a: 0xc4, 0x28b: 0xbe, 0x28c: 0xbf, 0x28d: 0xc0, 0x28e: 0xc1, 0x28f: 0xc2, + 0x290: 0xc3, 0x291: 0xc4, 0x292: 0xbe, 0x293: 0xbf, 0x294: 0xc0, 0x295: 0xc1, 0x296: 0xc2, 0x297: 0xc3, + 0x298: 0xc4, 0x299: 0xbe, 0x29a: 0xbf, 0x29b: 0xc0, 0x29c: 0xc1, 0x29d: 0xc2, 0x29e: 0xc3, 0x29f: 0xc4, + 0x2a0: 0xbe, 0x2a1: 0xbf, 0x2a2: 0xc0, 0x2a3: 0xc1, 0x2a4: 0xc2, 0x2a5: 0xc3, 0x2a6: 0xc4, 0x2a7: 0xbe, + 0x2a8: 0xbf, 0x2a9: 0xc0, 0x2aa: 0xc1, 0x2ab: 0xc2, 0x2ac: 0xc3, 0x2ad: 0xc4, 0x2ae: 0xbe, 0x2af: 0xbf, + 0x2b0: 0xc0, 0x2b1: 0xc1, 0x2b2: 0xc2, 0x2b3: 0xc3, 0x2b4: 0xc4, 0x2b5: 0xbe, 0x2b6: 0xbf, 0x2b7: 0xc0, + 0x2b8: 0xc1, 0x2b9: 0xc2, 0x2ba: 0xc3, 0x2bb: 0xc4, 0x2bc: 0xbe, 0x2bd: 0xbf, 0x2be: 0xc0, 0x2bf: 0xc1, + // Block 0xb, offset 0x2c0 + 0x2c0: 0xc2, 0x2c1: 0xc3, 0x2c2: 0xc4, 0x2c3: 0xbe, 0x2c4: 0xbf, 0x2c5: 0xc0, 0x2c6: 0xc1, 0x2c7: 0xc2, + 0x2c8: 0xc3, 0x2c9: 0xc4, 0x2ca: 0xbe, 0x2cb: 0xbf, 0x2cc: 0xc0, 0x2cd: 0xc1, 0x2ce: 0xc2, 0x2cf: 0xc3, + 0x2d0: 0xc4, 0x2d1: 0xbe, 0x2d2: 0xbf, 0x2d3: 0xc0, 0x2d4: 0xc1, 0x2d5: 0xc2, 0x2d6: 0xc3, 0x2d7: 0xc4, + 0x2d8: 0xbe, 0x2d9: 0xbf, 0x2da: 0xc0, 0x2db: 0xc1, 0x2dc: 0xc2, 0x2dd: 0xc3, 0x2de: 0xc5, + // Block 0xc, offset 0x300 + 0x324: 0x31, 0x325: 0x32, 0x326: 0x33, 0x327: 0x34, + 0x328: 0x35, 0x329: 0x36, 0x32a: 0x37, 0x32b: 0x38, 0x32c: 0x39, 0x32d: 0x3a, 0x32e: 0x3b, 0x32f: 0x3c, + 0x330: 0x3d, 0x331: 0x3e, 0x332: 0x3f, 0x333: 0x40, 0x334: 0x41, 0x335: 0x42, 0x336: 0x43, 0x337: 0x44, + 0x338: 0x45, 0x339: 0x46, 0x33a: 0x47, 0x33b: 0x48, 0x33c: 0xc6, 0x33d: 0x49, 0x33e: 0x4a, 0x33f: 0x4b, + // Block 0xd, offset 0x340 + 0x347: 0xc7, + 0x34b: 0xc8, 0x34d: 0xc9, + 0x368: 0xca, 0x36b: 0xcb, + 0x374: 0xcc, + 0x37a: 0xcd, 0x37d: 0xce, + // Block 0xe, offset 0x380 + 0x381: 0xcf, 0x382: 0xd0, 0x384: 0xd1, 0x385: 0xb8, 0x387: 0xd2, + 0x388: 0xd3, 0x38b: 0xd4, 0x38c: 0xd5, 0x38d: 0xd6, + 0x391: 0xd7, 0x392: 0xd8, 0x393: 0xd9, 0x396: 0xda, 0x397: 0xdb, + 0x398: 0xdc, 0x39a: 0xdd, 0x39c: 0xde, + 0x3a0: 0xdf, 0x3a4: 0xe0, 0x3a5: 0xe1, 0x3a7: 0xe2, + 0x3a8: 0xe3, 0x3a9: 0xe4, 0x3aa: 0xe5, + 0x3b0: 0xdc, 0x3b5: 0xe6, 0x3b6: 0xe7, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xe8, 0x3ec: 0xe9, + 0x3ff: 0xea, + // Block 0x10, offset 0x400 + 0x432: 0xeb, + // Block 0x11, offset 0x440 + 0x445: 0xec, 0x446: 0xed, 0x447: 0xee, + 0x449: 0xef, + 0x450: 0xf0, 0x451: 0xf1, 0x452: 0xf2, 0x453: 0xf3, 0x454: 0xf4, 0x455: 0xf5, 0x456: 0xf6, 0x457: 0xf7, + 0x458: 0xf8, 0x459: 0xf9, 0x45a: 0x4c, 0x45b: 0xfa, 0x45c: 0xfb, 0x45d: 0xfc, 0x45e: 0xfd, 0x45f: 0x4d, + // Block 0x12, offset 0x480 + 0x480: 0xfe, 0x484: 0xe9, + 0x48b: 0xff, + 0x4a3: 0x100, 0x4a5: 0x101, + 0x4b8: 0x4e, 0x4b9: 0x4f, 0x4ba: 0x50, + // Block 0x13, offset 0x4c0 + 0x4c4: 0x51, 0x4c5: 0x102, 0x4c6: 0x103, + 0x4c8: 0x52, 0x4c9: 0x104, + 0x4ef: 0x105, + // Block 0x14, offset 0x500 + 0x520: 0x53, 0x521: 0x54, 0x522: 0x55, 0x523: 0x56, 0x524: 0x57, 0x525: 0x58, 0x526: 0x59, 0x527: 0x5a, + 0x528: 0x5b, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfkcSparseOffset: 170 entries, 340 bytes +var nfkcSparseOffset = []uint16{0x0, 0xe, 0x12, 0x1b, 0x25, 0x35, 0x37, 0x3c, 0x47, 0x56, 0x63, 0x6b, 0x70, 0x75, 0x77, 0x7f, 0x86, 0x89, 0x91, 0x95, 0x99, 0x9b, 0x9d, 0xa6, 0xaa, 0xb1, 0xb6, 0xb9, 0xc3, 0xc6, 0xcd, 0xd5, 0xd9, 0xdb, 0xdf, 0xe3, 0xe9, 0xfa, 0x106, 0x108, 0x10e, 0x110, 0x112, 0x114, 0x116, 0x118, 0x11a, 0x11c, 0x11f, 0x122, 0x124, 0x127, 0x12a, 0x12e, 0x134, 0x136, 0x13f, 0x141, 0x144, 0x146, 0x151, 0x15c, 0x16a, 0x178, 0x188, 0x196, 0x19d, 0x1a3, 0x1b2, 0x1b6, 0x1b8, 0x1bc, 0x1be, 0x1c1, 0x1c3, 0x1c6, 0x1c8, 0x1cb, 0x1cd, 0x1cf, 0x1d1, 0x1dd, 0x1e7, 0x1f1, 0x1f4, 0x1f8, 0x1fa, 0x1fc, 0x1fe, 0x201, 0x204, 0x206, 0x208, 0x20a, 0x20c, 0x212, 0x215, 0x21a, 0x21c, 0x223, 0x229, 0x22f, 0x237, 0x23d, 0x243, 0x249, 0x24d, 0x24f, 0x251, 0x253, 0x255, 0x25b, 0x25e, 0x260, 0x262, 0x268, 0x26b, 0x273, 0x27a, 0x27d, 0x280, 0x282, 0x285, 0x28d, 0x291, 0x298, 0x29b, 0x2a1, 0x2a3, 0x2a5, 0x2a8, 0x2aa, 0x2ad, 0x2b2, 0x2b4, 0x2b6, 0x2b8, 0x2ba, 0x2bc, 0x2bf, 0x2c1, 0x2c3, 0x2c5, 0x2c7, 0x2c9, 0x2d6, 0x2e0, 0x2e2, 0x2e4, 0x2e8, 0x2ed, 0x2f9, 0x2fe, 0x307, 0x30d, 0x312, 0x316, 0x31b, 0x31f, 0x32f, 0x33d, 0x34b, 0x359, 0x35f, 0x361, 0x363, 0x366, 0x371, 0x373, 0x37d} + +// nfkcSparseValues: 895 entries, 3580 bytes +var nfkcSparseValues = [895]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0002, lo: 0x0d}, + {value: 0x0001, lo: 0xa0, hi: 0xa0}, + {value: 0x428f, lo: 0xa8, hi: 0xa8}, + {value: 0x0083, lo: 0xaa, hi: 0xaa}, + {value: 0x427b, lo: 0xaf, hi: 0xaf}, + {value: 0x0025, lo: 0xb2, hi: 0xb3}, + {value: 0x4271, lo: 0xb4, hi: 0xb4}, + {value: 0x01df, lo: 0xb5, hi: 0xb5}, + {value: 0x42a8, lo: 0xb8, hi: 0xb8}, + {value: 0x0023, lo: 0xb9, hi: 0xb9}, + {value: 0x009f, lo: 0xba, hi: 0xba}, + {value: 0x2222, lo: 0xbc, hi: 0xbc}, + {value: 0x2216, lo: 0xbd, hi: 0xbd}, + {value: 0x22b8, lo: 0xbe, hi: 0xbe}, + // Block 0x1, offset 0xe + {value: 0x0091, lo: 0x03}, + {value: 0x46f9, lo: 0xa0, hi: 0xa1}, + {value: 0x472b, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x12 + {value: 0x0003, lo: 0x08}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x0091, lo: 0xb0, hi: 0xb0}, + {value: 0x0119, lo: 0xb1, hi: 0xb1}, + {value: 0x0095, lo: 0xb2, hi: 0xb2}, + {value: 0x00a5, lo: 0xb3, hi: 0xb3}, + {value: 0x0143, lo: 0xb4, hi: 0xb6}, + {value: 0x00af, lo: 0xb7, hi: 0xb7}, + {value: 0x00b3, lo: 0xb8, hi: 0xb8}, + // Block 0x3, offset 0x1b + {value: 0x000a, lo: 0x09}, + {value: 0x4285, lo: 0x98, hi: 0x98}, + {value: 0x428a, lo: 0x99, hi: 0x9a}, + {value: 0x42ad, lo: 0x9b, hi: 0x9b}, + {value: 0x4276, lo: 0x9c, hi: 0x9c}, + {value: 0x4299, lo: 0x9d, hi: 0x9d}, + {value: 0x0113, lo: 0xa0, hi: 0xa0}, + {value: 0x0099, lo: 0xa1, hi: 0xa1}, + {value: 0x00a7, lo: 0xa2, hi: 0xa3}, + {value: 0x016a, lo: 0xa4, hi: 0xa4}, + // Block 0x4, offset 0x25 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x37bc, lo: 0x90, hi: 0x90}, + {value: 0x37c8, lo: 0x91, hi: 0x91}, + {value: 0x37b6, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x382e, lo: 0x97, hi: 0x97}, + {value: 0x37f8, lo: 0x9c, hi: 0x9c}, + {value: 0x37e0, lo: 0x9d, hi: 0x9d}, + {value: 0x380a, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x3834, lo: 0xb6, hi: 0xb6}, + {value: 0x383a, lo: 0xb7, hi: 0xb7}, + // Block 0x5, offset 0x35 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x83, hi: 0x87}, + // Block 0x6, offset 0x37 + {value: 0x0001, lo: 0x04}, + {value: 0x8114, lo: 0x81, hi: 0x82}, + {value: 0x8133, lo: 0x84, hi: 0x84}, + {value: 0x812e, lo: 0x85, hi: 0x85}, + {value: 0x810e, lo: 0x87, hi: 0x87}, + // Block 0x7, offset 0x3c + {value: 0x0000, lo: 0x0a}, + {value: 0x8133, lo: 0x90, hi: 0x97}, + {value: 0x811a, lo: 0x98, hi: 0x98}, + {value: 0x811b, lo: 0x99, hi: 0x99}, + {value: 0x811c, lo: 0x9a, hi: 0x9a}, + {value: 0x3858, lo: 0xa2, hi: 0xa2}, + {value: 0x385e, lo: 0xa3, hi: 0xa3}, + {value: 0x386a, lo: 0xa4, hi: 0xa4}, + {value: 0x3864, lo: 0xa5, hi: 0xa5}, + {value: 0x3870, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x8, offset 0x47 + {value: 0x0000, lo: 0x0e}, + {value: 0x3882, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x3876, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x387c, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8133, lo: 0x96, hi: 0x9c}, + {value: 0x8133, lo: 0x9f, hi: 0xa2}, + {value: 0x812e, lo: 0xa3, hi: 0xa3}, + {value: 0x8133, lo: 0xa4, hi: 0xa4}, + {value: 0x8133, lo: 0xa7, hi: 0xa8}, + {value: 0x812e, lo: 0xaa, hi: 0xaa}, + {value: 0x8133, lo: 0xab, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + // Block 0x9, offset 0x56 + {value: 0x0000, lo: 0x0c}, + {value: 0x8120, lo: 0x91, hi: 0x91}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + {value: 0x812e, lo: 0xb1, hi: 0xb1}, + {value: 0x8133, lo: 0xb2, hi: 0xb3}, + {value: 0x812e, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb5, hi: 0xb6}, + {value: 0x812e, lo: 0xb7, hi: 0xb9}, + {value: 0x8133, lo: 0xba, hi: 0xba}, + {value: 0x812e, lo: 0xbb, hi: 0xbc}, + {value: 0x8133, lo: 0xbd, hi: 0xbd}, + {value: 0x812e, lo: 0xbe, hi: 0xbe}, + {value: 0x8133, lo: 0xbf, hi: 0xbf}, + // Block 0xa, offset 0x63 + {value: 0x0005, lo: 0x07}, + {value: 0x8133, lo: 0x80, hi: 0x80}, + {value: 0x8133, lo: 0x81, hi: 0x81}, + {value: 0x812e, lo: 0x82, hi: 0x83}, + {value: 0x812e, lo: 0x84, hi: 0x85}, + {value: 0x812e, lo: 0x86, hi: 0x87}, + {value: 0x812e, lo: 0x88, hi: 0x89}, + {value: 0x8133, lo: 0x8a, hi: 0x8a}, + // Block 0xb, offset 0x6b + {value: 0x0000, lo: 0x04}, + {value: 0x8133, lo: 0xab, hi: 0xb1}, + {value: 0x812e, lo: 0xb2, hi: 0xb2}, + {value: 0x8133, lo: 0xb3, hi: 0xb3}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + // Block 0xc, offset 0x70 + {value: 0x0000, lo: 0x04}, + {value: 0x8133, lo: 0x96, hi: 0x99}, + {value: 0x8133, lo: 0x9b, hi: 0xa3}, + {value: 0x8133, lo: 0xa5, hi: 0xa7}, + {value: 0x8133, lo: 0xa9, hi: 0xad}, + // Block 0xd, offset 0x75 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x99, hi: 0x9b}, + // Block 0xe, offset 0x77 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x3eef, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x3ef7, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x3eff, lo: 0xb4, hi: 0xb4}, + {value: 0x9903, lo: 0xbc, hi: 0xbc}, + // Block 0xf, offset 0x7f + {value: 0x0008, lo: 0x06}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x8133, lo: 0x91, hi: 0x91}, + {value: 0x812e, lo: 0x92, hi: 0x92}, + {value: 0x8133, lo: 0x93, hi: 0x93}, + {value: 0x8133, lo: 0x94, hi: 0x94}, + {value: 0x4533, lo: 0x98, hi: 0x9f}, + // Block 0x10, offset 0x86 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x11, offset 0x89 + {value: 0x0008, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cab, lo: 0x8b, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x4573, lo: 0x9c, hi: 0x9d}, + {value: 0x4583, lo: 0x9f, hi: 0x9f}, + {value: 0x8133, lo: 0xbe, hi: 0xbe}, + // Block 0x12, offset 0x91 + {value: 0x0000, lo: 0x03}, + {value: 0x45ab, lo: 0xb3, hi: 0xb3}, + {value: 0x45b3, lo: 0xb6, hi: 0xb6}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + // Block 0x13, offset 0x95 + {value: 0x0008, lo: 0x03}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x458b, lo: 0x99, hi: 0x9b}, + {value: 0x45a3, lo: 0x9e, hi: 0x9e}, + // Block 0x14, offset 0x99 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + // Block 0x15, offset 0x9b + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + // Block 0x16, offset 0x9d + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cc3, lo: 0x88, hi: 0x88}, + {value: 0x2cbb, lo: 0x8b, hi: 0x8b}, + {value: 0x2ccb, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x45bb, lo: 0x9c, hi: 0x9c}, + {value: 0x45c3, lo: 0x9d, hi: 0x9d}, + // Block 0x17, offset 0xa6 + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2cd3, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x18, offset 0xaa + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cdb, lo: 0x8a, hi: 0x8a}, + {value: 0x2ceb, lo: 0x8b, hi: 0x8b}, + {value: 0x2ce3, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x19, offset 0xb1 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x3f07, lo: 0x88, hi: 0x88}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x8121, lo: 0x95, hi: 0x96}, + // Block 0x1a, offset 0xb6 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1b, offset 0xb9 + {value: 0x0000, lo: 0x09}, + {value: 0x2cf3, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2cfb, lo: 0x87, hi: 0x87}, + {value: 0x2d03, lo: 0x88, hi: 0x88}, + {value: 0x2f67, lo: 0x8a, hi: 0x8a}, + {value: 0x2def, lo: 0x8b, hi: 0x8b}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1c, offset 0xc3 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1d, offset 0xc6 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2d0b, lo: 0x8a, hi: 0x8a}, + {value: 0x2d1b, lo: 0x8b, hi: 0x8b}, + {value: 0x2d13, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1e, offset 0xcd + {value: 0x6bdd, lo: 0x07}, + {value: 0x9905, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x3f0f, lo: 0x9a, hi: 0x9a}, + {value: 0x2f6f, lo: 0x9c, hi: 0x9c}, + {value: 0x2dfa, lo: 0x9d, hi: 0x9d}, + {value: 0x2d23, lo: 0x9e, hi: 0x9f}, + // Block 0x1f, offset 0xd5 + {value: 0x0000, lo: 0x03}, + {value: 0x2627, lo: 0xb3, hi: 0xb3}, + {value: 0x8123, lo: 0xb8, hi: 0xb9}, + {value: 0x8105, lo: 0xba, hi: 0xba}, + // Block 0x20, offset 0xd9 + {value: 0x0000, lo: 0x01}, + {value: 0x8124, lo: 0x88, hi: 0x8b}, + // Block 0x21, offset 0xdb + {value: 0x0000, lo: 0x03}, + {value: 0x263c, lo: 0xb3, hi: 0xb3}, + {value: 0x8125, lo: 0xb8, hi: 0xb9}, + {value: 0x8105, lo: 0xba, hi: 0xba}, + // Block 0x22, offset 0xdf + {value: 0x0000, lo: 0x03}, + {value: 0x8126, lo: 0x88, hi: 0x8b}, + {value: 0x262e, lo: 0x9c, hi: 0x9c}, + {value: 0x2635, lo: 0x9d, hi: 0x9d}, + // Block 0x23, offset 0xe3 + {value: 0x0000, lo: 0x05}, + {value: 0x030e, lo: 0x8c, hi: 0x8c}, + {value: 0x812e, lo: 0x98, hi: 0x99}, + {value: 0x812e, lo: 0xb5, hi: 0xb5}, + {value: 0x812e, lo: 0xb7, hi: 0xb7}, + {value: 0x812c, lo: 0xb9, hi: 0xb9}, + // Block 0x24, offset 0xe9 + {value: 0x0000, lo: 0x10}, + {value: 0x264a, lo: 0x83, hi: 0x83}, + {value: 0x2651, lo: 0x8d, hi: 0x8d}, + {value: 0x2658, lo: 0x92, hi: 0x92}, + {value: 0x265f, lo: 0x97, hi: 0x97}, + {value: 0x2666, lo: 0x9c, hi: 0x9c}, + {value: 0x2643, lo: 0xa9, hi: 0xa9}, + {value: 0x8127, lo: 0xb1, hi: 0xb1}, + {value: 0x8128, lo: 0xb2, hi: 0xb2}, + {value: 0x4a9b, lo: 0xb3, hi: 0xb3}, + {value: 0x8129, lo: 0xb4, hi: 0xb4}, + {value: 0x4aa4, lo: 0xb5, hi: 0xb5}, + {value: 0x45cb, lo: 0xb6, hi: 0xb6}, + {value: 0x460b, lo: 0xb7, hi: 0xb7}, + {value: 0x45d3, lo: 0xb8, hi: 0xb8}, + {value: 0x4616, lo: 0xb9, hi: 0xb9}, + {value: 0x8128, lo: 0xba, hi: 0xbd}, + // Block 0x25, offset 0xfa + {value: 0x0000, lo: 0x0b}, + {value: 0x8128, lo: 0x80, hi: 0x80}, + {value: 0x4aad, lo: 0x81, hi: 0x81}, + {value: 0x8133, lo: 0x82, hi: 0x83}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0x86, hi: 0x87}, + {value: 0x2674, lo: 0x93, hi: 0x93}, + {value: 0x267b, lo: 0x9d, hi: 0x9d}, + {value: 0x2682, lo: 0xa2, hi: 0xa2}, + {value: 0x2689, lo: 0xa7, hi: 0xa7}, + {value: 0x2690, lo: 0xac, hi: 0xac}, + {value: 0x266d, lo: 0xb9, hi: 0xb9}, + // Block 0x26, offset 0x106 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x86, hi: 0x86}, + // Block 0x27, offset 0x108 + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2d2b, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + {value: 0x8105, lo: 0xb9, hi: 0xba}, + // Block 0x28, offset 0x10e + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x8d, hi: 0x8d}, + // Block 0x29, offset 0x110 + {value: 0x0000, lo: 0x01}, + {value: 0x0312, lo: 0xbc, hi: 0xbc}, + // Block 0x2a, offset 0x112 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2b, offset 0x114 + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2c, offset 0x116 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2d, offset 0x118 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2e, offset 0x11a + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x9d, hi: 0x9f}, + // Block 0x2f, offset 0x11c + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x94, hi: 0x94}, + {value: 0x8105, lo: 0xb4, hi: 0xb4}, + // Block 0x30, offset 0x11f + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x92, hi: 0x92}, + {value: 0x8133, lo: 0x9d, hi: 0x9d}, + // Block 0x31, offset 0x122 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + // Block 0x32, offset 0x124 + {value: 0x0004, lo: 0x02}, + {value: 0x812f, lo: 0xb9, hi: 0xba}, + {value: 0x812e, lo: 0xbb, hi: 0xbb}, + // Block 0x33, offset 0x127 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x97, hi: 0x97}, + {value: 0x812e, lo: 0x98, hi: 0x98}, + // Block 0x34, offset 0x12a + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0xa0, hi: 0xa0}, + {value: 0x8133, lo: 0xb5, hi: 0xbc}, + {value: 0x812e, lo: 0xbf, hi: 0xbf}, + // Block 0x35, offset 0x12e + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0xb0, hi: 0xb4}, + {value: 0x812e, lo: 0xb5, hi: 0xba}, + {value: 0x8133, lo: 0xbb, hi: 0xbc}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + {value: 0x812e, lo: 0xbf, hi: 0xbf}, + // Block 0x36, offset 0x134 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x80, hi: 0x80}, + // Block 0x37, offset 0x136 + {value: 0x0000, lo: 0x08}, + {value: 0x2d73, lo: 0x80, hi: 0x80}, + {value: 0x2d7b, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2d83, lo: 0x83, hi: 0x83}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0xab, hi: 0xab}, + {value: 0x812e, lo: 0xac, hi: 0xac}, + {value: 0x8133, lo: 0xad, hi: 0xb3}, + // Block 0x38, offset 0x13f + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xaa, hi: 0xab}, + // Block 0x39, offset 0x141 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xa6, hi: 0xa6}, + {value: 0x8105, lo: 0xb2, hi: 0xb3}, + // Block 0x3a, offset 0x144 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + // Block 0x3b, offset 0x146 + {value: 0x0000, lo: 0x0a}, + {value: 0x8133, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812e, lo: 0x95, hi: 0x99}, + {value: 0x8133, lo: 0x9a, hi: 0x9b}, + {value: 0x812e, lo: 0x9c, hi: 0x9f}, + {value: 0x8133, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x8133, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb8, hi: 0xb9}, + // Block 0x3c, offset 0x151 + {value: 0x0002, lo: 0x0a}, + {value: 0x0043, lo: 0xac, hi: 0xac}, + {value: 0x00d1, lo: 0xad, hi: 0xad}, + {value: 0x0045, lo: 0xae, hi: 0xae}, + {value: 0x0049, lo: 0xb0, hi: 0xb1}, + {value: 0x00e6, lo: 0xb2, hi: 0xb2}, + {value: 0x004f, lo: 0xb3, hi: 0xba}, + {value: 0x005f, lo: 0xbc, hi: 0xbc}, + {value: 0x00ef, lo: 0xbd, hi: 0xbd}, + {value: 0x0061, lo: 0xbe, hi: 0xbe}, + {value: 0x0065, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x15c + {value: 0x0000, lo: 0x0d}, + {value: 0x0001, lo: 0x80, hi: 0x8a}, + {value: 0x043e, lo: 0x91, hi: 0x91}, + {value: 0x42b2, lo: 0x97, hi: 0x97}, + {value: 0x001d, lo: 0xa4, hi: 0xa4}, + {value: 0x1876, lo: 0xa5, hi: 0xa5}, + {value: 0x1b62, lo: 0xa6, hi: 0xa6}, + {value: 0x0001, lo: 0xaf, hi: 0xaf}, + {value: 0x2697, lo: 0xb3, hi: 0xb3}, + {value: 0x280b, lo: 0xb4, hi: 0xb4}, + {value: 0x269e, lo: 0xb6, hi: 0xb6}, + {value: 0x2815, lo: 0xb7, hi: 0xb7}, + {value: 0x1870, lo: 0xbc, hi: 0xbc}, + {value: 0x4280, lo: 0xbe, hi: 0xbe}, + // Block 0x3e, offset 0x16a + {value: 0x0002, lo: 0x0d}, + {value: 0x1936, lo: 0x87, hi: 0x87}, + {value: 0x1933, lo: 0x88, hi: 0x88}, + {value: 0x1873, lo: 0x89, hi: 0x89}, + {value: 0x299b, lo: 0x97, hi: 0x97}, + {value: 0x0001, lo: 0x9f, hi: 0x9f}, + {value: 0x0021, lo: 0xb0, hi: 0xb0}, + {value: 0x0093, lo: 0xb1, hi: 0xb1}, + {value: 0x0029, lo: 0xb4, hi: 0xb9}, + {value: 0x0017, lo: 0xba, hi: 0xba}, + {value: 0x046a, lo: 0xbb, hi: 0xbb}, + {value: 0x003b, lo: 0xbc, hi: 0xbc}, + {value: 0x0011, lo: 0xbd, hi: 0xbe}, + {value: 0x009d, lo: 0xbf, hi: 0xbf}, + // Block 0x3f, offset 0x178 + {value: 0x0002, lo: 0x0f}, + {value: 0x0021, lo: 0x80, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8a}, + {value: 0x046a, lo: 0x8b, hi: 0x8b}, + {value: 0x003b, lo: 0x8c, hi: 0x8c}, + {value: 0x0011, lo: 0x8d, hi: 0x8e}, + {value: 0x0083, lo: 0x90, hi: 0x90}, + {value: 0x008b, lo: 0x91, hi: 0x91}, + {value: 0x009f, lo: 0x92, hi: 0x92}, + {value: 0x00b1, lo: 0x93, hi: 0x93}, + {value: 0x0104, lo: 0x94, hi: 0x94}, + {value: 0x0091, lo: 0x95, hi: 0x95}, + {value: 0x0097, lo: 0x96, hi: 0x99}, + {value: 0x00a1, lo: 0x9a, hi: 0x9a}, + {value: 0x00a7, lo: 0x9b, hi: 0x9c}, + {value: 0x199f, lo: 0xa8, hi: 0xa8}, + // Block 0x40, offset 0x188 + {value: 0x0000, lo: 0x0d}, + {value: 0x8133, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8133, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8133, lo: 0x9b, hi: 0x9c}, + {value: 0x8133, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8133, lo: 0xa7, hi: 0xa7}, + {value: 0x812e, lo: 0xa8, hi: 0xa8}, + {value: 0x8133, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812e, lo: 0xac, hi: 0xaf}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + // Block 0x41, offset 0x196 + {value: 0x0007, lo: 0x06}, + {value: 0x2186, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3bd0, lo: 0x9a, hi: 0x9b}, + {value: 0x3bde, lo: 0xae, hi: 0xae}, + // Block 0x42, offset 0x19d + {value: 0x000e, lo: 0x05}, + {value: 0x3be5, lo: 0x8d, hi: 0x8e}, + {value: 0x3bec, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x43, offset 0x1a3 + {value: 0x017a, lo: 0x0e}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3bfa, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3c01, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3c08, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3c0f, lo: 0xa4, hi: 0xa4}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x3c16, lo: 0xa6, hi: 0xa6}, + {value: 0x26a5, lo: 0xac, hi: 0xad}, + {value: 0x26ac, lo: 0xaf, hi: 0xaf}, + {value: 0x2829, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x44, offset 0x1b2 + {value: 0x0007, lo: 0x03}, + {value: 0x3c7f, lo: 0xa0, hi: 0xa1}, + {value: 0x3ca9, lo: 0xa2, hi: 0xa3}, + {value: 0x3cd3, lo: 0xaa, hi: 0xad}, + // Block 0x45, offset 0x1b6 + {value: 0x0004, lo: 0x01}, + {value: 0x048e, lo: 0xa9, hi: 0xaa}, + // Block 0x46, offset 0x1b8 + {value: 0x0002, lo: 0x03}, + {value: 0x0057, lo: 0x80, hi: 0x8f}, + {value: 0x0083, lo: 0x90, hi: 0xa9}, + {value: 0x0021, lo: 0xaa, hi: 0xaa}, + // Block 0x47, offset 0x1bc + {value: 0x0000, lo: 0x01}, + {value: 0x29a8, lo: 0x8c, hi: 0x8c}, + // Block 0x48, offset 0x1be + {value: 0x0266, lo: 0x02}, + {value: 0x1b92, lo: 0xb4, hi: 0xb4}, + {value: 0x1930, lo: 0xb5, hi: 0xb6}, + // Block 0x49, offset 0x1c1 + {value: 0x0000, lo: 0x01}, + {value: 0x44f4, lo: 0x9c, hi: 0x9c}, + // Block 0x4a, offset 0x1c3 + {value: 0x0000, lo: 0x02}, + {value: 0x0095, lo: 0xbc, hi: 0xbc}, + {value: 0x006d, lo: 0xbd, hi: 0xbd}, + // Block 0x4b, offset 0x1c6 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xaf, hi: 0xb1}, + // Block 0x4c, offset 0x1c8 + {value: 0x0000, lo: 0x02}, + {value: 0x0482, lo: 0xaf, hi: 0xaf}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x4d, offset 0x1cb + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xa0, hi: 0xbf}, + // Block 0x4e, offset 0x1cd + {value: 0x0000, lo: 0x01}, + {value: 0x0dc6, lo: 0x9f, hi: 0x9f}, + // Block 0x4f, offset 0x1cf + {value: 0x0000, lo: 0x01}, + {value: 0x1632, lo: 0xb3, hi: 0xb3}, + // Block 0x50, offset 0x1d1 + {value: 0x0004, lo: 0x0b}, + {value: 0x159a, lo: 0x80, hi: 0x82}, + {value: 0x15b2, lo: 0x83, hi: 0x83}, + {value: 0x15ca, lo: 0x84, hi: 0x85}, + {value: 0x15da, lo: 0x86, hi: 0x89}, + {value: 0x15ee, lo: 0x8a, hi: 0x8c}, + {value: 0x1602, lo: 0x8d, hi: 0x8d}, + {value: 0x160a, lo: 0x8e, hi: 0x8e}, + {value: 0x1612, lo: 0x8f, hi: 0x90}, + {value: 0x161e, lo: 0x91, hi: 0x93}, + {value: 0x162e, lo: 0x94, hi: 0x94}, + {value: 0x1636, lo: 0x95, hi: 0x95}, + // Block 0x51, offset 0x1dd + {value: 0x0004, lo: 0x09}, + {value: 0x0001, lo: 0x80, hi: 0x80}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x8134, lo: 0xac, hi: 0xac}, + {value: 0x812f, lo: 0xad, hi: 0xad}, + {value: 0x8130, lo: 0xae, hi: 0xae}, + {value: 0x8130, lo: 0xaf, hi: 0xaf}, + {value: 0x04b6, lo: 0xb6, hi: 0xb6}, + {value: 0x088a, lo: 0xb8, hi: 0xba}, + // Block 0x52, offset 0x1e7 + {value: 0x0006, lo: 0x09}, + {value: 0x0316, lo: 0xb1, hi: 0xb1}, + {value: 0x031a, lo: 0xb2, hi: 0xb2}, + {value: 0x4a52, lo: 0xb3, hi: 0xb3}, + {value: 0x031e, lo: 0xb4, hi: 0xb4}, + {value: 0x4a58, lo: 0xb5, hi: 0xb6}, + {value: 0x0322, lo: 0xb7, hi: 0xb7}, + {value: 0x0326, lo: 0xb8, hi: 0xb8}, + {value: 0x032a, lo: 0xb9, hi: 0xb9}, + {value: 0x4a64, lo: 0xba, hi: 0xbf}, + // Block 0x53, offset 0x1f1 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0xaf, hi: 0xaf}, + {value: 0x8133, lo: 0xb4, hi: 0xbd}, + // Block 0x54, offset 0x1f4 + {value: 0x0000, lo: 0x03}, + {value: 0x0212, lo: 0x9c, hi: 0x9c}, + {value: 0x0215, lo: 0x9d, hi: 0x9d}, + {value: 0x8133, lo: 0x9e, hi: 0x9f}, + // Block 0x55, offset 0x1f8 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb0, hi: 0xb1}, + // Block 0x56, offset 0x1fa + {value: 0x0000, lo: 0x01}, + {value: 0x163e, lo: 0xb0, hi: 0xb0}, + // Block 0x57, offset 0x1fc + {value: 0x000c, lo: 0x01}, + {value: 0x00d7, lo: 0xb8, hi: 0xb9}, + // Block 0x58, offset 0x1fe + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x86, hi: 0x86}, + {value: 0x8105, lo: 0xac, hi: 0xac}, + // Block 0x59, offset 0x201 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0xa0, hi: 0xb1}, + // Block 0x5a, offset 0x204 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xab, hi: 0xad}, + // Block 0x5b, offset 0x206 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x93, hi: 0x93}, + // Block 0x5c, offset 0x208 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xb3, hi: 0xb3}, + // Block 0x5d, offset 0x20a + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x80, hi: 0x80}, + // Block 0x5e, offset 0x20c + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + {value: 0x8133, lo: 0xb2, hi: 0xb3}, + {value: 0x812e, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb7, hi: 0xb8}, + {value: 0x8133, lo: 0xbe, hi: 0xbf}, + // Block 0x5f, offset 0x212 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x81, hi: 0x81}, + {value: 0x8105, lo: 0xb6, hi: 0xb6}, + // Block 0x60, offset 0x215 + {value: 0x0008, lo: 0x04}, + {value: 0x163a, lo: 0x9c, hi: 0x9d}, + {value: 0x0125, lo: 0x9e, hi: 0x9e}, + {value: 0x1646, lo: 0x9f, hi: 0x9f}, + {value: 0x015e, lo: 0xa9, hi: 0xa9}, + // Block 0x61, offset 0x21a + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xad, hi: 0xad}, + // Block 0x62, offset 0x21c + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x63, offset 0x223 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x64, offset 0x229 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x65, offset 0x22f + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x66, offset 0x237 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x67, offset 0x23d + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x68, offset 0x243 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x69, offset 0x249 + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x6a, offset 0x24d + {value: 0x0002, lo: 0x01}, + {value: 0x0003, lo: 0x81, hi: 0xbf}, + // Block 0x6b, offset 0x24f + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + // Block 0x6c, offset 0x251 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xa0, hi: 0xa0}, + // Block 0x6d, offset 0x253 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb6, hi: 0xba}, + // Block 0x6e, offset 0x255 + {value: 0x002d, lo: 0x05}, + {value: 0x812e, lo: 0x8d, hi: 0x8d}, + {value: 0x8133, lo: 0x8f, hi: 0x8f}, + {value: 0x8133, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x6f, offset 0x25b + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0xa5, hi: 0xa5}, + {value: 0x812e, lo: 0xa6, hi: 0xa6}, + // Block 0x70, offset 0x25e + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xa4, hi: 0xa7}, + // Block 0x71, offset 0x260 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xab, hi: 0xac}, + // Block 0x72, offset 0x262 + {value: 0x0000, lo: 0x05}, + {value: 0x812e, lo: 0x86, hi: 0x87}, + {value: 0x8133, lo: 0x88, hi: 0x8a}, + {value: 0x812e, lo: 0x8b, hi: 0x8b}, + {value: 0x8133, lo: 0x8c, hi: 0x8c}, + {value: 0x812e, lo: 0x8d, hi: 0x90}, + // Block 0x73, offset 0x268 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x86, hi: 0x86}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x74, offset 0x26b + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x424f, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4259, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x4263, lo: 0xab, hi: 0xab}, + {value: 0x8105, lo: 0xb9, hi: 0xba}, + // Block 0x75, offset 0x273 + {value: 0x0000, lo: 0x06}, + {value: 0x8133, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2d8b, lo: 0xae, hi: 0xae}, + {value: 0x2d95, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8105, lo: 0xb3, hi: 0xb4}, + // Block 0x76, offset 0x27a + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x80, hi: 0x80}, + {value: 0x8103, lo: 0x8a, hi: 0x8a}, + // Block 0x77, offset 0x27d + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb5, hi: 0xb5}, + {value: 0x8103, lo: 0xb6, hi: 0xb6}, + // Block 0x78, offset 0x280 + {value: 0x0002, lo: 0x01}, + {value: 0x8103, lo: 0xa9, hi: 0xaa}, + // Block 0x79, offset 0x282 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x7a, offset 0x285 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2d9f, lo: 0x8b, hi: 0x8b}, + {value: 0x2da9, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8133, lo: 0xa6, hi: 0xac}, + {value: 0x8133, lo: 0xb0, hi: 0xb4}, + // Block 0x7b, offset 0x28d + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0x82, hi: 0x82}, + {value: 0x8103, lo: 0x86, hi: 0x86}, + {value: 0x8133, lo: 0x9e, hi: 0x9e}, + // Block 0x7c, offset 0x291 + {value: 0x6b4d, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2dbd, lo: 0xbb, hi: 0xbb}, + {value: 0x2db3, lo: 0xbc, hi: 0xbd}, + {value: 0x2dc7, lo: 0xbe, hi: 0xbe}, + // Block 0x7d, offset 0x298 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x82, hi: 0x82}, + {value: 0x8103, lo: 0x83, hi: 0x83}, + // Block 0x7e, offset 0x29b + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2dd1, lo: 0xba, hi: 0xba}, + {value: 0x2ddb, lo: 0xbb, hi: 0xbb}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x7f, offset 0x2a1 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0x80, hi: 0x80}, + // Block 0x80, offset 0x2a3 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x81, offset 0x2a5 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb6, hi: 0xb6}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + // Block 0x82, offset 0x2a8 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xab, hi: 0xab}, + // Block 0x83, offset 0x2aa + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb9, hi: 0xb9}, + {value: 0x8103, lo: 0xba, hi: 0xba}, + // Block 0x84, offset 0x2ad + {value: 0x0000, lo: 0x04}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb5, hi: 0xb5}, + {value: 0x2de5, lo: 0xb8, hi: 0xb8}, + {value: 0x8105, lo: 0xbd, hi: 0xbe}, + // Block 0x85, offset 0x2b2 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0x83, hi: 0x83}, + // Block 0x86, offset 0x2b4 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xa0, hi: 0xa0}, + // Block 0x87, offset 0x2b6 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xb4, hi: 0xb4}, + // Block 0x88, offset 0x2b8 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x87, hi: 0x87}, + // Block 0x89, offset 0x2ba + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x99, hi: 0x99}, + // Block 0x8a, offset 0x2bc + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0x82, hi: 0x82}, + {value: 0x8105, lo: 0x84, hi: 0x85}, + // Block 0x8b, offset 0x2bf + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x97, hi: 0x97}, + // Block 0x8c, offset 0x2c1 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x8d, offset 0x2c3 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb0, hi: 0xb6}, + // Block 0x8e, offset 0x2c5 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb0, hi: 0xb1}, + // Block 0x8f, offset 0x2c7 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x90, offset 0x2c9 + {value: 0x0000, lo: 0x0c}, + {value: 0x45e3, lo: 0x9e, hi: 0x9e}, + {value: 0x45ed, lo: 0x9f, hi: 0x9f}, + {value: 0x4621, lo: 0xa0, hi: 0xa0}, + {value: 0x462f, lo: 0xa1, hi: 0xa1}, + {value: 0x463d, lo: 0xa2, hi: 0xa2}, + {value: 0x464b, lo: 0xa3, hi: 0xa3}, + {value: 0x4659, lo: 0xa4, hi: 0xa4}, + {value: 0x812c, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8131, lo: 0xad, hi: 0xad}, + {value: 0x812c, lo: 0xae, hi: 0xb2}, + {value: 0x812e, lo: 0xbb, hi: 0xbf}, + // Block 0x91, offset 0x2d6 + {value: 0x0000, lo: 0x09}, + {value: 0x812e, lo: 0x80, hi: 0x82}, + {value: 0x8133, lo: 0x85, hi: 0x89}, + {value: 0x812e, lo: 0x8a, hi: 0x8b}, + {value: 0x8133, lo: 0xaa, hi: 0xad}, + {value: 0x45f7, lo: 0xbb, hi: 0xbb}, + {value: 0x4601, lo: 0xbc, hi: 0xbc}, + {value: 0x4667, lo: 0xbd, hi: 0xbd}, + {value: 0x4683, lo: 0xbe, hi: 0xbe}, + {value: 0x4675, lo: 0xbf, hi: 0xbf}, + // Block 0x92, offset 0x2e0 + {value: 0x0000, lo: 0x01}, + {value: 0x4691, lo: 0x80, hi: 0x80}, + // Block 0x93, offset 0x2e2 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x82, hi: 0x84}, + // Block 0x94, offset 0x2e4 + {value: 0x0002, lo: 0x03}, + {value: 0x0043, lo: 0x80, hi: 0x99}, + {value: 0x0083, lo: 0x9a, hi: 0xb3}, + {value: 0x0043, lo: 0xb4, hi: 0xbf}, + // Block 0x95, offset 0x2e8 + {value: 0x0002, lo: 0x04}, + {value: 0x005b, lo: 0x80, hi: 0x8d}, + {value: 0x0083, lo: 0x8e, hi: 0x94}, + {value: 0x0093, lo: 0x96, hi: 0xa7}, + {value: 0x0043, lo: 0xa8, hi: 0xbf}, + // Block 0x96, offset 0x2ed + {value: 0x0002, lo: 0x0b}, + {value: 0x0073, lo: 0x80, hi: 0x81}, + {value: 0x0083, lo: 0x82, hi: 0x9b}, + {value: 0x0043, lo: 0x9c, hi: 0x9c}, + {value: 0x0047, lo: 0x9e, hi: 0x9f}, + {value: 0x004f, lo: 0xa2, hi: 0xa2}, + {value: 0x0055, lo: 0xa5, hi: 0xa6}, + {value: 0x005d, lo: 0xa9, hi: 0xac}, + {value: 0x0067, lo: 0xae, hi: 0xb5}, + {value: 0x0083, lo: 0xb6, hi: 0xb9}, + {value: 0x008d, lo: 0xbb, hi: 0xbb}, + {value: 0x0091, lo: 0xbd, hi: 0xbf}, + // Block 0x97, offset 0x2f9 + {value: 0x0002, lo: 0x04}, + {value: 0x0097, lo: 0x80, hi: 0x83}, + {value: 0x00a1, lo: 0x85, hi: 0x8f}, + {value: 0x0043, lo: 0x90, hi: 0xa9}, + {value: 0x0083, lo: 0xaa, hi: 0xbf}, + // Block 0x98, offset 0x2fe + {value: 0x0002, lo: 0x08}, + {value: 0x00af, lo: 0x80, hi: 0x83}, + {value: 0x0043, lo: 0x84, hi: 0x85}, + {value: 0x0049, lo: 0x87, hi: 0x8a}, + {value: 0x0055, lo: 0x8d, hi: 0x94}, + {value: 0x0067, lo: 0x96, hi: 0x9c}, + {value: 0x0083, lo: 0x9e, hi: 0xb7}, + {value: 0x0043, lo: 0xb8, hi: 0xb9}, + {value: 0x0049, lo: 0xbb, hi: 0xbe}, + // Block 0x99, offset 0x307 + {value: 0x0002, lo: 0x05}, + {value: 0x0053, lo: 0x80, hi: 0x84}, + {value: 0x005f, lo: 0x86, hi: 0x86}, + {value: 0x0067, lo: 0x8a, hi: 0x90}, + {value: 0x0083, lo: 0x92, hi: 0xab}, + {value: 0x0043, lo: 0xac, hi: 0xbf}, + // Block 0x9a, offset 0x30d + {value: 0x0002, lo: 0x04}, + {value: 0x006b, lo: 0x80, hi: 0x85}, + {value: 0x0083, lo: 0x86, hi: 0x9f}, + {value: 0x0043, lo: 0xa0, hi: 0xb9}, + {value: 0x0083, lo: 0xba, hi: 0xbf}, + // Block 0x9b, offset 0x312 + {value: 0x0002, lo: 0x03}, + {value: 0x008f, lo: 0x80, hi: 0x93}, + {value: 0x0043, lo: 0x94, hi: 0xad}, + {value: 0x0083, lo: 0xae, hi: 0xbf}, + // Block 0x9c, offset 0x316 + {value: 0x0002, lo: 0x04}, + {value: 0x00a7, lo: 0x80, hi: 0x87}, + {value: 0x0043, lo: 0x88, hi: 0xa1}, + {value: 0x0083, lo: 0xa2, hi: 0xbb}, + {value: 0x0043, lo: 0xbc, hi: 0xbf}, + // Block 0x9d, offset 0x31b + {value: 0x0002, lo: 0x03}, + {value: 0x004b, lo: 0x80, hi: 0x95}, + {value: 0x0083, lo: 0x96, hi: 0xaf}, + {value: 0x0043, lo: 0xb0, hi: 0xbf}, + // Block 0x9e, offset 0x31f + {value: 0x0003, lo: 0x0f}, + {value: 0x01bb, lo: 0x80, hi: 0x80}, + {value: 0x0462, lo: 0x81, hi: 0x81}, + {value: 0x01be, lo: 0x82, hi: 0x9a}, + {value: 0x045e, lo: 0x9b, hi: 0x9b}, + {value: 0x01ca, lo: 0x9c, hi: 0x9c}, + {value: 0x01d3, lo: 0x9d, hi: 0x9d}, + {value: 0x01d9, lo: 0x9e, hi: 0x9e}, + {value: 0x01fd, lo: 0x9f, hi: 0x9f}, + {value: 0x01ee, lo: 0xa0, hi: 0xa0}, + {value: 0x01eb, lo: 0xa1, hi: 0xa1}, + {value: 0x0176, lo: 0xa2, hi: 0xb2}, + {value: 0x018b, lo: 0xb3, hi: 0xb3}, + {value: 0x01a9, lo: 0xb4, hi: 0xba}, + {value: 0x0462, lo: 0xbb, hi: 0xbb}, + {value: 0x01be, lo: 0xbc, hi: 0xbf}, + // Block 0x9f, offset 0x32f + {value: 0x0003, lo: 0x0d}, + {value: 0x01ca, lo: 0x80, hi: 0x94}, + {value: 0x045e, lo: 0x95, hi: 0x95}, + {value: 0x01ca, lo: 0x96, hi: 0x96}, + {value: 0x01d3, lo: 0x97, hi: 0x97}, + {value: 0x01d9, lo: 0x98, hi: 0x98}, + {value: 0x01fd, lo: 0x99, hi: 0x99}, + {value: 0x01ee, lo: 0x9a, hi: 0x9a}, + {value: 0x01eb, lo: 0x9b, hi: 0x9b}, + {value: 0x0176, lo: 0x9c, hi: 0xac}, + {value: 0x018b, lo: 0xad, hi: 0xad}, + {value: 0x01a9, lo: 0xae, hi: 0xb4}, + {value: 0x0462, lo: 0xb5, hi: 0xb5}, + {value: 0x01be, lo: 0xb6, hi: 0xbf}, + // Block 0xa0, offset 0x33d + {value: 0x0003, lo: 0x0d}, + {value: 0x01dc, lo: 0x80, hi: 0x8e}, + {value: 0x045e, lo: 0x8f, hi: 0x8f}, + {value: 0x01ca, lo: 0x90, hi: 0x90}, + {value: 0x01d3, lo: 0x91, hi: 0x91}, + {value: 0x01d9, lo: 0x92, hi: 0x92}, + {value: 0x01fd, lo: 0x93, hi: 0x93}, + {value: 0x01ee, lo: 0x94, hi: 0x94}, + {value: 0x01eb, lo: 0x95, hi: 0x95}, + {value: 0x0176, lo: 0x96, hi: 0xa6}, + {value: 0x018b, lo: 0xa7, hi: 0xa7}, + {value: 0x01a9, lo: 0xa8, hi: 0xae}, + {value: 0x0462, lo: 0xaf, hi: 0xaf}, + {value: 0x01be, lo: 0xb0, hi: 0xbf}, + // Block 0xa1, offset 0x34b + {value: 0x0003, lo: 0x0d}, + {value: 0x01ee, lo: 0x80, hi: 0x88}, + {value: 0x045e, lo: 0x89, hi: 0x89}, + {value: 0x01ca, lo: 0x8a, hi: 0x8a}, + {value: 0x01d3, lo: 0x8b, hi: 0x8b}, + {value: 0x01d9, lo: 0x8c, hi: 0x8c}, + {value: 0x01fd, lo: 0x8d, hi: 0x8d}, + {value: 0x01ee, lo: 0x8e, hi: 0x8e}, + {value: 0x01eb, lo: 0x8f, hi: 0x8f}, + {value: 0x0176, lo: 0x90, hi: 0xa0}, + {value: 0x018b, lo: 0xa1, hi: 0xa1}, + {value: 0x01a9, lo: 0xa2, hi: 0xa8}, + {value: 0x0462, lo: 0xa9, hi: 0xa9}, + {value: 0x01be, lo: 0xaa, hi: 0xbf}, + // Block 0xa2, offset 0x359 + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0x80, hi: 0x86}, + {value: 0x8133, lo: 0x88, hi: 0x98}, + {value: 0x8133, lo: 0x9b, hi: 0xa1}, + {value: 0x8133, lo: 0xa3, hi: 0xa4}, + {value: 0x8133, lo: 0xa6, hi: 0xaa}, + // Block 0xa3, offset 0x35f + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xac, hi: 0xaf}, + // Block 0xa4, offset 0x361 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x90, hi: 0x96}, + // Block 0xa5, offset 0x363 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x84, hi: 0x89}, + {value: 0x8103, lo: 0x8a, hi: 0x8a}, + // Block 0xa6, offset 0x366 + {value: 0x0002, lo: 0x0a}, + {value: 0x0063, lo: 0x80, hi: 0x89}, + {value: 0x1954, lo: 0x8a, hi: 0x8a}, + {value: 0x1987, lo: 0x8b, hi: 0x8b}, + {value: 0x19a2, lo: 0x8c, hi: 0x8c}, + {value: 0x19a8, lo: 0x8d, hi: 0x8d}, + {value: 0x1bc6, lo: 0x8e, hi: 0x8e}, + {value: 0x19b4, lo: 0x8f, hi: 0x8f}, + {value: 0x197e, lo: 0xaa, hi: 0xaa}, + {value: 0x1981, lo: 0xab, hi: 0xab}, + {value: 0x1984, lo: 0xac, hi: 0xac}, + // Block 0xa7, offset 0x371 + {value: 0x0000, lo: 0x01}, + {value: 0x1942, lo: 0x90, hi: 0x90}, + // Block 0xa8, offset 0x373 + {value: 0x0028, lo: 0x09}, + {value: 0x286f, lo: 0x80, hi: 0x80}, + {value: 0x2833, lo: 0x81, hi: 0x81}, + {value: 0x283d, lo: 0x82, hi: 0x82}, + {value: 0x2851, lo: 0x83, hi: 0x84}, + {value: 0x285b, lo: 0x85, hi: 0x86}, + {value: 0x2847, lo: 0x87, hi: 0x87}, + {value: 0x2865, lo: 0x88, hi: 0x88}, + {value: 0x0b72, lo: 0x90, hi: 0x90}, + {value: 0x08ea, lo: 0x91, hi: 0x91}, + // Block 0xa9, offset 0x37d + {value: 0x0002, lo: 0x01}, + {value: 0x0021, lo: 0xb0, hi: 0xb9}, +} + +// recompMap: 7528 bytes (entries only) +var recompMap map[uint32]rune +var recompMapOnce sync.Once + +const recompMapPacked = "" + + "\x00A\x03\x00\x00\x00\x00\xc0" + // 0x00410300: 0x000000C0 + "\x00A\x03\x01\x00\x00\x00\xc1" + // 0x00410301: 0x000000C1 + "\x00A\x03\x02\x00\x00\x00\xc2" + // 0x00410302: 0x000000C2 + "\x00A\x03\x03\x00\x00\x00\xc3" + // 0x00410303: 0x000000C3 + "\x00A\x03\b\x00\x00\x00\xc4" + // 0x00410308: 0x000000C4 + "\x00A\x03\n\x00\x00\x00\xc5" + // 0x0041030A: 0x000000C5 + "\x00C\x03'\x00\x00\x00\xc7" + // 0x00430327: 0x000000C7 + "\x00E\x03\x00\x00\x00\x00\xc8" + // 0x00450300: 0x000000C8 + "\x00E\x03\x01\x00\x00\x00\xc9" + // 0x00450301: 0x000000C9 + "\x00E\x03\x02\x00\x00\x00\xca" + // 0x00450302: 0x000000CA + "\x00E\x03\b\x00\x00\x00\xcb" + // 0x00450308: 0x000000CB + "\x00I\x03\x00\x00\x00\x00\xcc" + // 0x00490300: 0x000000CC + "\x00I\x03\x01\x00\x00\x00\xcd" + // 0x00490301: 0x000000CD + "\x00I\x03\x02\x00\x00\x00\xce" + // 0x00490302: 0x000000CE + "\x00I\x03\b\x00\x00\x00\xcf" + // 0x00490308: 0x000000CF + "\x00N\x03\x03\x00\x00\x00\xd1" + // 0x004E0303: 0x000000D1 + "\x00O\x03\x00\x00\x00\x00\xd2" + // 0x004F0300: 0x000000D2 + "\x00O\x03\x01\x00\x00\x00\xd3" + // 0x004F0301: 0x000000D3 + "\x00O\x03\x02\x00\x00\x00\xd4" + // 0x004F0302: 0x000000D4 + "\x00O\x03\x03\x00\x00\x00\xd5" + // 0x004F0303: 0x000000D5 + "\x00O\x03\b\x00\x00\x00\xd6" + // 0x004F0308: 0x000000D6 + "\x00U\x03\x00\x00\x00\x00\xd9" + // 0x00550300: 0x000000D9 + "\x00U\x03\x01\x00\x00\x00\xda" + // 0x00550301: 0x000000DA + "\x00U\x03\x02\x00\x00\x00\xdb" + // 0x00550302: 0x000000DB + "\x00U\x03\b\x00\x00\x00\xdc" + // 0x00550308: 0x000000DC + "\x00Y\x03\x01\x00\x00\x00\xdd" + // 0x00590301: 0x000000DD + "\x00a\x03\x00\x00\x00\x00\xe0" + // 0x00610300: 0x000000E0 + "\x00a\x03\x01\x00\x00\x00\xe1" + // 0x00610301: 0x000000E1 + "\x00a\x03\x02\x00\x00\x00\xe2" + // 0x00610302: 0x000000E2 + "\x00a\x03\x03\x00\x00\x00\xe3" + // 0x00610303: 0x000000E3 + "\x00a\x03\b\x00\x00\x00\xe4" + // 0x00610308: 0x000000E4 + "\x00a\x03\n\x00\x00\x00\xe5" + // 0x0061030A: 0x000000E5 + "\x00c\x03'\x00\x00\x00\xe7" + // 0x00630327: 0x000000E7 + "\x00e\x03\x00\x00\x00\x00\xe8" + // 0x00650300: 0x000000E8 + "\x00e\x03\x01\x00\x00\x00\xe9" + // 0x00650301: 0x000000E9 + "\x00e\x03\x02\x00\x00\x00\xea" + // 0x00650302: 0x000000EA + "\x00e\x03\b\x00\x00\x00\xeb" + // 0x00650308: 0x000000EB + "\x00i\x03\x00\x00\x00\x00\xec" + // 0x00690300: 0x000000EC + "\x00i\x03\x01\x00\x00\x00\xed" + // 0x00690301: 0x000000ED + "\x00i\x03\x02\x00\x00\x00\xee" + // 0x00690302: 0x000000EE + "\x00i\x03\b\x00\x00\x00\xef" + // 0x00690308: 0x000000EF + "\x00n\x03\x03\x00\x00\x00\xf1" + // 0x006E0303: 0x000000F1 + "\x00o\x03\x00\x00\x00\x00\xf2" + // 0x006F0300: 0x000000F2 + "\x00o\x03\x01\x00\x00\x00\xf3" + // 0x006F0301: 0x000000F3 + "\x00o\x03\x02\x00\x00\x00\xf4" + // 0x006F0302: 0x000000F4 + "\x00o\x03\x03\x00\x00\x00\xf5" + // 0x006F0303: 0x000000F5 + "\x00o\x03\b\x00\x00\x00\xf6" + // 0x006F0308: 0x000000F6 + "\x00u\x03\x00\x00\x00\x00\xf9" + // 0x00750300: 0x000000F9 + "\x00u\x03\x01\x00\x00\x00\xfa" + // 0x00750301: 0x000000FA + "\x00u\x03\x02\x00\x00\x00\xfb" + // 0x00750302: 0x000000FB + "\x00u\x03\b\x00\x00\x00\xfc" + // 0x00750308: 0x000000FC + "\x00y\x03\x01\x00\x00\x00\xfd" + // 0x00790301: 0x000000FD + "\x00y\x03\b\x00\x00\x00\xff" + // 0x00790308: 0x000000FF + "\x00A\x03\x04\x00\x00\x01\x00" + // 0x00410304: 0x00000100 + "\x00a\x03\x04\x00\x00\x01\x01" + // 0x00610304: 0x00000101 + "\x00A\x03\x06\x00\x00\x01\x02" + // 0x00410306: 0x00000102 + "\x00a\x03\x06\x00\x00\x01\x03" + // 0x00610306: 0x00000103 + "\x00A\x03(\x00\x00\x01\x04" + // 0x00410328: 0x00000104 + "\x00a\x03(\x00\x00\x01\x05" + // 0x00610328: 0x00000105 + "\x00C\x03\x01\x00\x00\x01\x06" + // 0x00430301: 0x00000106 + "\x00c\x03\x01\x00\x00\x01\a" + // 0x00630301: 0x00000107 + "\x00C\x03\x02\x00\x00\x01\b" + // 0x00430302: 0x00000108 + "\x00c\x03\x02\x00\x00\x01\t" + // 0x00630302: 0x00000109 + "\x00C\x03\a\x00\x00\x01\n" + // 0x00430307: 0x0000010A + "\x00c\x03\a\x00\x00\x01\v" + // 0x00630307: 0x0000010B + "\x00C\x03\f\x00\x00\x01\f" + // 0x0043030C: 0x0000010C + "\x00c\x03\f\x00\x00\x01\r" + // 0x0063030C: 0x0000010D + "\x00D\x03\f\x00\x00\x01\x0e" + // 0x0044030C: 0x0000010E + "\x00d\x03\f\x00\x00\x01\x0f" + // 0x0064030C: 0x0000010F + "\x00E\x03\x04\x00\x00\x01\x12" + // 0x00450304: 0x00000112 + "\x00e\x03\x04\x00\x00\x01\x13" + // 0x00650304: 0x00000113 + "\x00E\x03\x06\x00\x00\x01\x14" + // 0x00450306: 0x00000114 + "\x00e\x03\x06\x00\x00\x01\x15" + // 0x00650306: 0x00000115 + "\x00E\x03\a\x00\x00\x01\x16" + // 0x00450307: 0x00000116 + "\x00e\x03\a\x00\x00\x01\x17" + // 0x00650307: 0x00000117 + "\x00E\x03(\x00\x00\x01\x18" + // 0x00450328: 0x00000118 + "\x00e\x03(\x00\x00\x01\x19" + // 0x00650328: 0x00000119 + "\x00E\x03\f\x00\x00\x01\x1a" + // 0x0045030C: 0x0000011A + "\x00e\x03\f\x00\x00\x01\x1b" + // 0x0065030C: 0x0000011B + "\x00G\x03\x02\x00\x00\x01\x1c" + // 0x00470302: 0x0000011C + "\x00g\x03\x02\x00\x00\x01\x1d" + // 0x00670302: 0x0000011D + "\x00G\x03\x06\x00\x00\x01\x1e" + // 0x00470306: 0x0000011E + "\x00g\x03\x06\x00\x00\x01\x1f" + // 0x00670306: 0x0000011F + "\x00G\x03\a\x00\x00\x01 " + // 0x00470307: 0x00000120 + "\x00g\x03\a\x00\x00\x01!" + // 0x00670307: 0x00000121 + "\x00G\x03'\x00\x00\x01\"" + // 0x00470327: 0x00000122 + "\x00g\x03'\x00\x00\x01#" + // 0x00670327: 0x00000123 + "\x00H\x03\x02\x00\x00\x01$" + // 0x00480302: 0x00000124 + "\x00h\x03\x02\x00\x00\x01%" + // 0x00680302: 0x00000125 + "\x00I\x03\x03\x00\x00\x01(" + // 0x00490303: 0x00000128 + "\x00i\x03\x03\x00\x00\x01)" + // 0x00690303: 0x00000129 + "\x00I\x03\x04\x00\x00\x01*" + // 0x00490304: 0x0000012A + "\x00i\x03\x04\x00\x00\x01+" + // 0x00690304: 0x0000012B + "\x00I\x03\x06\x00\x00\x01," + // 0x00490306: 0x0000012C + "\x00i\x03\x06\x00\x00\x01-" + // 0x00690306: 0x0000012D + "\x00I\x03(\x00\x00\x01." + // 0x00490328: 0x0000012E + "\x00i\x03(\x00\x00\x01/" + // 0x00690328: 0x0000012F + "\x00I\x03\a\x00\x00\x010" + // 0x00490307: 0x00000130 + "\x00J\x03\x02\x00\x00\x014" + // 0x004A0302: 0x00000134 + "\x00j\x03\x02\x00\x00\x015" + // 0x006A0302: 0x00000135 + "\x00K\x03'\x00\x00\x016" + // 0x004B0327: 0x00000136 + "\x00k\x03'\x00\x00\x017" + // 0x006B0327: 0x00000137 + "\x00L\x03\x01\x00\x00\x019" + // 0x004C0301: 0x00000139 + "\x00l\x03\x01\x00\x00\x01:" + // 0x006C0301: 0x0000013A + "\x00L\x03'\x00\x00\x01;" + // 0x004C0327: 0x0000013B + "\x00l\x03'\x00\x00\x01<" + // 0x006C0327: 0x0000013C + "\x00L\x03\f\x00\x00\x01=" + // 0x004C030C: 0x0000013D + "\x00l\x03\f\x00\x00\x01>" + // 0x006C030C: 0x0000013E + "\x00N\x03\x01\x00\x00\x01C" + // 0x004E0301: 0x00000143 + "\x00n\x03\x01\x00\x00\x01D" + // 0x006E0301: 0x00000144 + "\x00N\x03'\x00\x00\x01E" + // 0x004E0327: 0x00000145 + "\x00n\x03'\x00\x00\x01F" + // 0x006E0327: 0x00000146 + "\x00N\x03\f\x00\x00\x01G" + // 0x004E030C: 0x00000147 + "\x00n\x03\f\x00\x00\x01H" + // 0x006E030C: 0x00000148 + "\x00O\x03\x04\x00\x00\x01L" + // 0x004F0304: 0x0000014C + "\x00o\x03\x04\x00\x00\x01M" + // 0x006F0304: 0x0000014D + "\x00O\x03\x06\x00\x00\x01N" + // 0x004F0306: 0x0000014E + "\x00o\x03\x06\x00\x00\x01O" + // 0x006F0306: 0x0000014F + "\x00O\x03\v\x00\x00\x01P" + // 0x004F030B: 0x00000150 + "\x00o\x03\v\x00\x00\x01Q" + // 0x006F030B: 0x00000151 + "\x00R\x03\x01\x00\x00\x01T" + // 0x00520301: 0x00000154 + "\x00r\x03\x01\x00\x00\x01U" + // 0x00720301: 0x00000155 + "\x00R\x03'\x00\x00\x01V" + // 0x00520327: 0x00000156 + "\x00r\x03'\x00\x00\x01W" + // 0x00720327: 0x00000157 + "\x00R\x03\f\x00\x00\x01X" + // 0x0052030C: 0x00000158 + "\x00r\x03\f\x00\x00\x01Y" + // 0x0072030C: 0x00000159 + "\x00S\x03\x01\x00\x00\x01Z" + // 0x00530301: 0x0000015A + "\x00s\x03\x01\x00\x00\x01[" + // 0x00730301: 0x0000015B + "\x00S\x03\x02\x00\x00\x01\\" + // 0x00530302: 0x0000015C + "\x00s\x03\x02\x00\x00\x01]" + // 0x00730302: 0x0000015D + "\x00S\x03'\x00\x00\x01^" + // 0x00530327: 0x0000015E + "\x00s\x03'\x00\x00\x01_" + // 0x00730327: 0x0000015F + "\x00S\x03\f\x00\x00\x01`" + // 0x0053030C: 0x00000160 + "\x00s\x03\f\x00\x00\x01a" + // 0x0073030C: 0x00000161 + "\x00T\x03'\x00\x00\x01b" + // 0x00540327: 0x00000162 + "\x00t\x03'\x00\x00\x01c" + // 0x00740327: 0x00000163 + "\x00T\x03\f\x00\x00\x01d" + // 0x0054030C: 0x00000164 + "\x00t\x03\f\x00\x00\x01e" + // 0x0074030C: 0x00000165 + "\x00U\x03\x03\x00\x00\x01h" + // 0x00550303: 0x00000168 + "\x00u\x03\x03\x00\x00\x01i" + // 0x00750303: 0x00000169 + "\x00U\x03\x04\x00\x00\x01j" + // 0x00550304: 0x0000016A + "\x00u\x03\x04\x00\x00\x01k" + // 0x00750304: 0x0000016B + "\x00U\x03\x06\x00\x00\x01l" + // 0x00550306: 0x0000016C + "\x00u\x03\x06\x00\x00\x01m" + // 0x00750306: 0x0000016D + "\x00U\x03\n\x00\x00\x01n" + // 0x0055030A: 0x0000016E + "\x00u\x03\n\x00\x00\x01o" + // 0x0075030A: 0x0000016F + "\x00U\x03\v\x00\x00\x01p" + // 0x0055030B: 0x00000170 + "\x00u\x03\v\x00\x00\x01q" + // 0x0075030B: 0x00000171 + "\x00U\x03(\x00\x00\x01r" + // 0x00550328: 0x00000172 + "\x00u\x03(\x00\x00\x01s" + // 0x00750328: 0x00000173 + "\x00W\x03\x02\x00\x00\x01t" + // 0x00570302: 0x00000174 + "\x00w\x03\x02\x00\x00\x01u" + // 0x00770302: 0x00000175 + "\x00Y\x03\x02\x00\x00\x01v" + // 0x00590302: 0x00000176 + "\x00y\x03\x02\x00\x00\x01w" + // 0x00790302: 0x00000177 + "\x00Y\x03\b\x00\x00\x01x" + // 0x00590308: 0x00000178 + "\x00Z\x03\x01\x00\x00\x01y" + // 0x005A0301: 0x00000179 + "\x00z\x03\x01\x00\x00\x01z" + // 0x007A0301: 0x0000017A + "\x00Z\x03\a\x00\x00\x01{" + // 0x005A0307: 0x0000017B + "\x00z\x03\a\x00\x00\x01|" + // 0x007A0307: 0x0000017C + "\x00Z\x03\f\x00\x00\x01}" + // 0x005A030C: 0x0000017D + "\x00z\x03\f\x00\x00\x01~" + // 0x007A030C: 0x0000017E + "\x00O\x03\x1b\x00\x00\x01\xa0" + // 0x004F031B: 0x000001A0 + "\x00o\x03\x1b\x00\x00\x01\xa1" + // 0x006F031B: 0x000001A1 + "\x00U\x03\x1b\x00\x00\x01\xaf" + // 0x0055031B: 0x000001AF + "\x00u\x03\x1b\x00\x00\x01\xb0" + // 0x0075031B: 0x000001B0 + "\x00A\x03\f\x00\x00\x01\xcd" + // 0x0041030C: 0x000001CD + "\x00a\x03\f\x00\x00\x01\xce" + // 0x0061030C: 0x000001CE + "\x00I\x03\f\x00\x00\x01\xcf" + // 0x0049030C: 0x000001CF + "\x00i\x03\f\x00\x00\x01\xd0" + // 0x0069030C: 0x000001D0 + "\x00O\x03\f\x00\x00\x01\xd1" + // 0x004F030C: 0x000001D1 + "\x00o\x03\f\x00\x00\x01\xd2" + // 0x006F030C: 0x000001D2 + "\x00U\x03\f\x00\x00\x01\xd3" + // 0x0055030C: 0x000001D3 + "\x00u\x03\f\x00\x00\x01\xd4" + // 0x0075030C: 0x000001D4 + "\x00\xdc\x03\x04\x00\x00\x01\xd5" + // 0x00DC0304: 0x000001D5 + "\x00\xfc\x03\x04\x00\x00\x01\xd6" + // 0x00FC0304: 0x000001D6 + "\x00\xdc\x03\x01\x00\x00\x01\xd7" + // 0x00DC0301: 0x000001D7 + "\x00\xfc\x03\x01\x00\x00\x01\xd8" + // 0x00FC0301: 0x000001D8 + "\x00\xdc\x03\f\x00\x00\x01\xd9" + // 0x00DC030C: 0x000001D9 + "\x00\xfc\x03\f\x00\x00\x01\xda" + // 0x00FC030C: 0x000001DA + "\x00\xdc\x03\x00\x00\x00\x01\xdb" + // 0x00DC0300: 0x000001DB + "\x00\xfc\x03\x00\x00\x00\x01\xdc" + // 0x00FC0300: 0x000001DC + "\x00\xc4\x03\x04\x00\x00\x01\xde" + // 0x00C40304: 0x000001DE + "\x00\xe4\x03\x04\x00\x00\x01\xdf" + // 0x00E40304: 0x000001DF + "\x02&\x03\x04\x00\x00\x01\xe0" + // 0x02260304: 0x000001E0 + "\x02'\x03\x04\x00\x00\x01\xe1" + // 0x02270304: 0x000001E1 + "\x00\xc6\x03\x04\x00\x00\x01\xe2" + // 0x00C60304: 0x000001E2 + "\x00\xe6\x03\x04\x00\x00\x01\xe3" + // 0x00E60304: 0x000001E3 + "\x00G\x03\f\x00\x00\x01\xe6" + // 0x0047030C: 0x000001E6 + "\x00g\x03\f\x00\x00\x01\xe7" + // 0x0067030C: 0x000001E7 + "\x00K\x03\f\x00\x00\x01\xe8" + // 0x004B030C: 0x000001E8 + "\x00k\x03\f\x00\x00\x01\xe9" + // 0x006B030C: 0x000001E9 + "\x00O\x03(\x00\x00\x01\xea" + // 0x004F0328: 0x000001EA + "\x00o\x03(\x00\x00\x01\xeb" + // 0x006F0328: 0x000001EB + "\x01\xea\x03\x04\x00\x00\x01\xec" + // 0x01EA0304: 0x000001EC + "\x01\xeb\x03\x04\x00\x00\x01\xed" + // 0x01EB0304: 0x000001ED + "\x01\xb7\x03\f\x00\x00\x01\xee" + // 0x01B7030C: 0x000001EE + "\x02\x92\x03\f\x00\x00\x01\xef" + // 0x0292030C: 0x000001EF + "\x00j\x03\f\x00\x00\x01\xf0" + // 0x006A030C: 0x000001F0 + "\x00G\x03\x01\x00\x00\x01\xf4" + // 0x00470301: 0x000001F4 + "\x00g\x03\x01\x00\x00\x01\xf5" + // 0x00670301: 0x000001F5 + "\x00N\x03\x00\x00\x00\x01\xf8" + // 0x004E0300: 0x000001F8 + "\x00n\x03\x00\x00\x00\x01\xf9" + // 0x006E0300: 0x000001F9 + "\x00\xc5\x03\x01\x00\x00\x01\xfa" + // 0x00C50301: 0x000001FA + "\x00\xe5\x03\x01\x00\x00\x01\xfb" + // 0x00E50301: 0x000001FB + "\x00\xc6\x03\x01\x00\x00\x01\xfc" + // 0x00C60301: 0x000001FC + "\x00\xe6\x03\x01\x00\x00\x01\xfd" + // 0x00E60301: 0x000001FD + "\x00\xd8\x03\x01\x00\x00\x01\xfe" + // 0x00D80301: 0x000001FE + "\x00\xf8\x03\x01\x00\x00\x01\xff" + // 0x00F80301: 0x000001FF + "\x00A\x03\x0f\x00\x00\x02\x00" + // 0x0041030F: 0x00000200 + "\x00a\x03\x0f\x00\x00\x02\x01" + // 0x0061030F: 0x00000201 + "\x00A\x03\x11\x00\x00\x02\x02" + // 0x00410311: 0x00000202 + "\x00a\x03\x11\x00\x00\x02\x03" + // 0x00610311: 0x00000203 + "\x00E\x03\x0f\x00\x00\x02\x04" + // 0x0045030F: 0x00000204 + "\x00e\x03\x0f\x00\x00\x02\x05" + // 0x0065030F: 0x00000205 + "\x00E\x03\x11\x00\x00\x02\x06" + // 0x00450311: 0x00000206 + "\x00e\x03\x11\x00\x00\x02\a" + // 0x00650311: 0x00000207 + "\x00I\x03\x0f\x00\x00\x02\b" + // 0x0049030F: 0x00000208 + "\x00i\x03\x0f\x00\x00\x02\t" + // 0x0069030F: 0x00000209 + "\x00I\x03\x11\x00\x00\x02\n" + // 0x00490311: 0x0000020A + "\x00i\x03\x11\x00\x00\x02\v" + // 0x00690311: 0x0000020B + "\x00O\x03\x0f\x00\x00\x02\f" + // 0x004F030F: 0x0000020C + "\x00o\x03\x0f\x00\x00\x02\r" + // 0x006F030F: 0x0000020D + "\x00O\x03\x11\x00\x00\x02\x0e" + // 0x004F0311: 0x0000020E + "\x00o\x03\x11\x00\x00\x02\x0f" + // 0x006F0311: 0x0000020F + "\x00R\x03\x0f\x00\x00\x02\x10" + // 0x0052030F: 0x00000210 + "\x00r\x03\x0f\x00\x00\x02\x11" + // 0x0072030F: 0x00000211 + "\x00R\x03\x11\x00\x00\x02\x12" + // 0x00520311: 0x00000212 + "\x00r\x03\x11\x00\x00\x02\x13" + // 0x00720311: 0x00000213 + "\x00U\x03\x0f\x00\x00\x02\x14" + // 0x0055030F: 0x00000214 + "\x00u\x03\x0f\x00\x00\x02\x15" + // 0x0075030F: 0x00000215 + "\x00U\x03\x11\x00\x00\x02\x16" + // 0x00550311: 0x00000216 + "\x00u\x03\x11\x00\x00\x02\x17" + // 0x00750311: 0x00000217 + "\x00S\x03&\x00\x00\x02\x18" + // 0x00530326: 0x00000218 + "\x00s\x03&\x00\x00\x02\x19" + // 0x00730326: 0x00000219 + "\x00T\x03&\x00\x00\x02\x1a" + // 0x00540326: 0x0000021A + "\x00t\x03&\x00\x00\x02\x1b" + // 0x00740326: 0x0000021B + "\x00H\x03\f\x00\x00\x02\x1e" + // 0x0048030C: 0x0000021E + "\x00h\x03\f\x00\x00\x02\x1f" + // 0x0068030C: 0x0000021F + "\x00A\x03\a\x00\x00\x02&" + // 0x00410307: 0x00000226 + "\x00a\x03\a\x00\x00\x02'" + // 0x00610307: 0x00000227 + "\x00E\x03'\x00\x00\x02(" + // 0x00450327: 0x00000228 + "\x00e\x03'\x00\x00\x02)" + // 0x00650327: 0x00000229 + "\x00\xd6\x03\x04\x00\x00\x02*" + // 0x00D60304: 0x0000022A + "\x00\xf6\x03\x04\x00\x00\x02+" + // 0x00F60304: 0x0000022B + "\x00\xd5\x03\x04\x00\x00\x02," + // 0x00D50304: 0x0000022C + "\x00\xf5\x03\x04\x00\x00\x02-" + // 0x00F50304: 0x0000022D + "\x00O\x03\a\x00\x00\x02." + // 0x004F0307: 0x0000022E + "\x00o\x03\a\x00\x00\x02/" + // 0x006F0307: 0x0000022F + "\x02.\x03\x04\x00\x00\x020" + // 0x022E0304: 0x00000230 + "\x02/\x03\x04\x00\x00\x021" + // 0x022F0304: 0x00000231 + "\x00Y\x03\x04\x00\x00\x022" + // 0x00590304: 0x00000232 + "\x00y\x03\x04\x00\x00\x023" + // 0x00790304: 0x00000233 + "\x00\xa8\x03\x01\x00\x00\x03\x85" + // 0x00A80301: 0x00000385 + "\x03\x91\x03\x01\x00\x00\x03\x86" + // 0x03910301: 0x00000386 + "\x03\x95\x03\x01\x00\x00\x03\x88" + // 0x03950301: 0x00000388 + "\x03\x97\x03\x01\x00\x00\x03\x89" + // 0x03970301: 0x00000389 + "\x03\x99\x03\x01\x00\x00\x03\x8a" + // 0x03990301: 0x0000038A + "\x03\x9f\x03\x01\x00\x00\x03\x8c" + // 0x039F0301: 0x0000038C + "\x03\xa5\x03\x01\x00\x00\x03\x8e" + // 0x03A50301: 0x0000038E + "\x03\xa9\x03\x01\x00\x00\x03\x8f" + // 0x03A90301: 0x0000038F + "\x03\xca\x03\x01\x00\x00\x03\x90" + // 0x03CA0301: 0x00000390 + "\x03\x99\x03\b\x00\x00\x03\xaa" + // 0x03990308: 0x000003AA + "\x03\xa5\x03\b\x00\x00\x03\xab" + // 0x03A50308: 0x000003AB + "\x03\xb1\x03\x01\x00\x00\x03\xac" + // 0x03B10301: 0x000003AC + "\x03\xb5\x03\x01\x00\x00\x03\xad" + // 0x03B50301: 0x000003AD + "\x03\xb7\x03\x01\x00\x00\x03\xae" + // 0x03B70301: 0x000003AE + "\x03\xb9\x03\x01\x00\x00\x03\xaf" + // 0x03B90301: 0x000003AF + "\x03\xcb\x03\x01\x00\x00\x03\xb0" + // 0x03CB0301: 0x000003B0 + "\x03\xb9\x03\b\x00\x00\x03\xca" + // 0x03B90308: 0x000003CA + "\x03\xc5\x03\b\x00\x00\x03\xcb" + // 0x03C50308: 0x000003CB + "\x03\xbf\x03\x01\x00\x00\x03\xcc" + // 0x03BF0301: 0x000003CC + "\x03\xc5\x03\x01\x00\x00\x03\xcd" + // 0x03C50301: 0x000003CD + "\x03\xc9\x03\x01\x00\x00\x03\xce" + // 0x03C90301: 0x000003CE + "\x03\xd2\x03\x01\x00\x00\x03\xd3" + // 0x03D20301: 0x000003D3 + "\x03\xd2\x03\b\x00\x00\x03\xd4" + // 0x03D20308: 0x000003D4 + "\x04\x15\x03\x00\x00\x00\x04\x00" + // 0x04150300: 0x00000400 + "\x04\x15\x03\b\x00\x00\x04\x01" + // 0x04150308: 0x00000401 + "\x04\x13\x03\x01\x00\x00\x04\x03" + // 0x04130301: 0x00000403 + "\x04\x06\x03\b\x00\x00\x04\a" + // 0x04060308: 0x00000407 + "\x04\x1a\x03\x01\x00\x00\x04\f" + // 0x041A0301: 0x0000040C + "\x04\x18\x03\x00\x00\x00\x04\r" + // 0x04180300: 0x0000040D + "\x04#\x03\x06\x00\x00\x04\x0e" + // 0x04230306: 0x0000040E + "\x04\x18\x03\x06\x00\x00\x04\x19" + // 0x04180306: 0x00000419 + "\x048\x03\x06\x00\x00\x049" + // 0x04380306: 0x00000439 + "\x045\x03\x00\x00\x00\x04P" + // 0x04350300: 0x00000450 + "\x045\x03\b\x00\x00\x04Q" + // 0x04350308: 0x00000451 + "\x043\x03\x01\x00\x00\x04S" + // 0x04330301: 0x00000453 + "\x04V\x03\b\x00\x00\x04W" + // 0x04560308: 0x00000457 + "\x04:\x03\x01\x00\x00\x04\\" + // 0x043A0301: 0x0000045C + "\x048\x03\x00\x00\x00\x04]" + // 0x04380300: 0x0000045D + "\x04C\x03\x06\x00\x00\x04^" + // 0x04430306: 0x0000045E + "\x04t\x03\x0f\x00\x00\x04v" + // 0x0474030F: 0x00000476 + "\x04u\x03\x0f\x00\x00\x04w" + // 0x0475030F: 0x00000477 + "\x04\x16\x03\x06\x00\x00\x04\xc1" + // 0x04160306: 0x000004C1 + "\x046\x03\x06\x00\x00\x04\xc2" + // 0x04360306: 0x000004C2 + "\x04\x10\x03\x06\x00\x00\x04\xd0" + // 0x04100306: 0x000004D0 + "\x040\x03\x06\x00\x00\x04\xd1" + // 0x04300306: 0x000004D1 + "\x04\x10\x03\b\x00\x00\x04\xd2" + // 0x04100308: 0x000004D2 + "\x040\x03\b\x00\x00\x04\xd3" + // 0x04300308: 0x000004D3 + "\x04\x15\x03\x06\x00\x00\x04\xd6" + // 0x04150306: 0x000004D6 + "\x045\x03\x06\x00\x00\x04\xd7" + // 0x04350306: 0x000004D7 + "\x04\xd8\x03\b\x00\x00\x04\xda" + // 0x04D80308: 0x000004DA + "\x04\xd9\x03\b\x00\x00\x04\xdb" + // 0x04D90308: 0x000004DB + "\x04\x16\x03\b\x00\x00\x04\xdc" + // 0x04160308: 0x000004DC + "\x046\x03\b\x00\x00\x04\xdd" + // 0x04360308: 0x000004DD + "\x04\x17\x03\b\x00\x00\x04\xde" + // 0x04170308: 0x000004DE + "\x047\x03\b\x00\x00\x04\xdf" + // 0x04370308: 0x000004DF + "\x04\x18\x03\x04\x00\x00\x04\xe2" + // 0x04180304: 0x000004E2 + "\x048\x03\x04\x00\x00\x04\xe3" + // 0x04380304: 0x000004E3 + "\x04\x18\x03\b\x00\x00\x04\xe4" + // 0x04180308: 0x000004E4 + "\x048\x03\b\x00\x00\x04\xe5" + // 0x04380308: 0x000004E5 + "\x04\x1e\x03\b\x00\x00\x04\xe6" + // 0x041E0308: 0x000004E6 + "\x04>\x03\b\x00\x00\x04\xe7" + // 0x043E0308: 0x000004E7 + "\x04\xe8\x03\b\x00\x00\x04\xea" + // 0x04E80308: 0x000004EA + "\x04\xe9\x03\b\x00\x00\x04\xeb" + // 0x04E90308: 0x000004EB + "\x04-\x03\b\x00\x00\x04\xec" + // 0x042D0308: 0x000004EC + "\x04M\x03\b\x00\x00\x04\xed" + // 0x044D0308: 0x000004ED + "\x04#\x03\x04\x00\x00\x04\xee" + // 0x04230304: 0x000004EE + "\x04C\x03\x04\x00\x00\x04\xef" + // 0x04430304: 0x000004EF + "\x04#\x03\b\x00\x00\x04\xf0" + // 0x04230308: 0x000004F0 + "\x04C\x03\b\x00\x00\x04\xf1" + // 0x04430308: 0x000004F1 + "\x04#\x03\v\x00\x00\x04\xf2" + // 0x0423030B: 0x000004F2 + "\x04C\x03\v\x00\x00\x04\xf3" + // 0x0443030B: 0x000004F3 + "\x04'\x03\b\x00\x00\x04\xf4" + // 0x04270308: 0x000004F4 + "\x04G\x03\b\x00\x00\x04\xf5" + // 0x04470308: 0x000004F5 + "\x04+\x03\b\x00\x00\x04\xf8" + // 0x042B0308: 0x000004F8 + "\x04K\x03\b\x00\x00\x04\xf9" + // 0x044B0308: 0x000004F9 + "\x06'\x06S\x00\x00\x06\"" + // 0x06270653: 0x00000622 + "\x06'\x06T\x00\x00\x06#" + // 0x06270654: 0x00000623 + "\x06H\x06T\x00\x00\x06$" + // 0x06480654: 0x00000624 + "\x06'\x06U\x00\x00\x06%" + // 0x06270655: 0x00000625 + "\x06J\x06T\x00\x00\x06&" + // 0x064A0654: 0x00000626 + "\x06\xd5\x06T\x00\x00\x06\xc0" + // 0x06D50654: 0x000006C0 + "\x06\xc1\x06T\x00\x00\x06\xc2" + // 0x06C10654: 0x000006C2 + "\x06\xd2\x06T\x00\x00\x06\xd3" + // 0x06D20654: 0x000006D3 + "\t(\t<\x00\x00\t)" + // 0x0928093C: 0x00000929 + "\t0\t<\x00\x00\t1" + // 0x0930093C: 0x00000931 + "\t3\t<\x00\x00\t4" + // 0x0933093C: 0x00000934 + "\t\xc7\t\xbe\x00\x00\t\xcb" + // 0x09C709BE: 0x000009CB + "\t\xc7\t\xd7\x00\x00\t\xcc" + // 0x09C709D7: 0x000009CC + "\vG\vV\x00\x00\vH" + // 0x0B470B56: 0x00000B48 + "\vG\v>\x00\x00\vK" + // 0x0B470B3E: 0x00000B4B + "\vG\vW\x00\x00\vL" + // 0x0B470B57: 0x00000B4C + "\v\x92\v\xd7\x00\x00\v\x94" + // 0x0B920BD7: 0x00000B94 + "\v\xc6\v\xbe\x00\x00\v\xca" + // 0x0BC60BBE: 0x00000BCA + "\v\xc7\v\xbe\x00\x00\v\xcb" + // 0x0BC70BBE: 0x00000BCB + "\v\xc6\v\xd7\x00\x00\v\xcc" + // 0x0BC60BD7: 0x00000BCC + "\fF\fV\x00\x00\fH" + // 0x0C460C56: 0x00000C48 + "\f\xbf\f\xd5\x00\x00\f\xc0" + // 0x0CBF0CD5: 0x00000CC0 + "\f\xc6\f\xd5\x00\x00\f\xc7" + // 0x0CC60CD5: 0x00000CC7 + "\f\xc6\f\xd6\x00\x00\f\xc8" + // 0x0CC60CD6: 0x00000CC8 + "\f\xc6\f\xc2\x00\x00\f\xca" + // 0x0CC60CC2: 0x00000CCA + "\f\xca\f\xd5\x00\x00\f\xcb" + // 0x0CCA0CD5: 0x00000CCB + "\rF\r>\x00\x00\rJ" + // 0x0D460D3E: 0x00000D4A + "\rG\r>\x00\x00\rK" + // 0x0D470D3E: 0x00000D4B + "\rF\rW\x00\x00\rL" + // 0x0D460D57: 0x00000D4C + "\r\xd9\r\xca\x00\x00\r\xda" + // 0x0DD90DCA: 0x00000DDA + "\r\xd9\r\xcf\x00\x00\r\xdc" + // 0x0DD90DCF: 0x00000DDC + "\r\xdc\r\xca\x00\x00\r\xdd" + // 0x0DDC0DCA: 0x00000DDD + "\r\xd9\r\xdf\x00\x00\r\xde" + // 0x0DD90DDF: 0x00000DDE + "\x10%\x10.\x00\x00\x10&" + // 0x1025102E: 0x00001026 + "\x1b\x05\x1b5\x00\x00\x1b\x06" + // 0x1B051B35: 0x00001B06 + "\x1b\a\x1b5\x00\x00\x1b\b" + // 0x1B071B35: 0x00001B08 + "\x1b\t\x1b5\x00\x00\x1b\n" + // 0x1B091B35: 0x00001B0A + "\x1b\v\x1b5\x00\x00\x1b\f" + // 0x1B0B1B35: 0x00001B0C + "\x1b\r\x1b5\x00\x00\x1b\x0e" + // 0x1B0D1B35: 0x00001B0E + "\x1b\x11\x1b5\x00\x00\x1b\x12" + // 0x1B111B35: 0x00001B12 + "\x1b:\x1b5\x00\x00\x1b;" + // 0x1B3A1B35: 0x00001B3B + "\x1b<\x1b5\x00\x00\x1b=" + // 0x1B3C1B35: 0x00001B3D + "\x1b>\x1b5\x00\x00\x1b@" + // 0x1B3E1B35: 0x00001B40 + "\x1b?\x1b5\x00\x00\x1bA" + // 0x1B3F1B35: 0x00001B41 + "\x1bB\x1b5\x00\x00\x1bC" + // 0x1B421B35: 0x00001B43 + "\x00A\x03%\x00\x00\x1e\x00" + // 0x00410325: 0x00001E00 + "\x00a\x03%\x00\x00\x1e\x01" + // 0x00610325: 0x00001E01 + "\x00B\x03\a\x00\x00\x1e\x02" + // 0x00420307: 0x00001E02 + "\x00b\x03\a\x00\x00\x1e\x03" + // 0x00620307: 0x00001E03 + "\x00B\x03#\x00\x00\x1e\x04" + // 0x00420323: 0x00001E04 + "\x00b\x03#\x00\x00\x1e\x05" + // 0x00620323: 0x00001E05 + "\x00B\x031\x00\x00\x1e\x06" + // 0x00420331: 0x00001E06 + "\x00b\x031\x00\x00\x1e\a" + // 0x00620331: 0x00001E07 + "\x00\xc7\x03\x01\x00\x00\x1e\b" + // 0x00C70301: 0x00001E08 + "\x00\xe7\x03\x01\x00\x00\x1e\t" + // 0x00E70301: 0x00001E09 + "\x00D\x03\a\x00\x00\x1e\n" + // 0x00440307: 0x00001E0A + "\x00d\x03\a\x00\x00\x1e\v" + // 0x00640307: 0x00001E0B + "\x00D\x03#\x00\x00\x1e\f" + // 0x00440323: 0x00001E0C + "\x00d\x03#\x00\x00\x1e\r" + // 0x00640323: 0x00001E0D + "\x00D\x031\x00\x00\x1e\x0e" + // 0x00440331: 0x00001E0E + "\x00d\x031\x00\x00\x1e\x0f" + // 0x00640331: 0x00001E0F + "\x00D\x03'\x00\x00\x1e\x10" + // 0x00440327: 0x00001E10 + "\x00d\x03'\x00\x00\x1e\x11" + // 0x00640327: 0x00001E11 + "\x00D\x03-\x00\x00\x1e\x12" + // 0x0044032D: 0x00001E12 + "\x00d\x03-\x00\x00\x1e\x13" + // 0x0064032D: 0x00001E13 + "\x01\x12\x03\x00\x00\x00\x1e\x14" + // 0x01120300: 0x00001E14 + "\x01\x13\x03\x00\x00\x00\x1e\x15" + // 0x01130300: 0x00001E15 + "\x01\x12\x03\x01\x00\x00\x1e\x16" + // 0x01120301: 0x00001E16 + "\x01\x13\x03\x01\x00\x00\x1e\x17" + // 0x01130301: 0x00001E17 + "\x00E\x03-\x00\x00\x1e\x18" + // 0x0045032D: 0x00001E18 + "\x00e\x03-\x00\x00\x1e\x19" + // 0x0065032D: 0x00001E19 + "\x00E\x030\x00\x00\x1e\x1a" + // 0x00450330: 0x00001E1A + "\x00e\x030\x00\x00\x1e\x1b" + // 0x00650330: 0x00001E1B + "\x02(\x03\x06\x00\x00\x1e\x1c" + // 0x02280306: 0x00001E1C + "\x02)\x03\x06\x00\x00\x1e\x1d" + // 0x02290306: 0x00001E1D + "\x00F\x03\a\x00\x00\x1e\x1e" + // 0x00460307: 0x00001E1E + "\x00f\x03\a\x00\x00\x1e\x1f" + // 0x00660307: 0x00001E1F + "\x00G\x03\x04\x00\x00\x1e " + // 0x00470304: 0x00001E20 + "\x00g\x03\x04\x00\x00\x1e!" + // 0x00670304: 0x00001E21 + "\x00H\x03\a\x00\x00\x1e\"" + // 0x00480307: 0x00001E22 + "\x00h\x03\a\x00\x00\x1e#" + // 0x00680307: 0x00001E23 + "\x00H\x03#\x00\x00\x1e$" + // 0x00480323: 0x00001E24 + "\x00h\x03#\x00\x00\x1e%" + // 0x00680323: 0x00001E25 + "\x00H\x03\b\x00\x00\x1e&" + // 0x00480308: 0x00001E26 + "\x00h\x03\b\x00\x00\x1e'" + // 0x00680308: 0x00001E27 + "\x00H\x03'\x00\x00\x1e(" + // 0x00480327: 0x00001E28 + "\x00h\x03'\x00\x00\x1e)" + // 0x00680327: 0x00001E29 + "\x00H\x03.\x00\x00\x1e*" + // 0x0048032E: 0x00001E2A + "\x00h\x03.\x00\x00\x1e+" + // 0x0068032E: 0x00001E2B + "\x00I\x030\x00\x00\x1e," + // 0x00490330: 0x00001E2C + "\x00i\x030\x00\x00\x1e-" + // 0x00690330: 0x00001E2D + "\x00\xcf\x03\x01\x00\x00\x1e." + // 0x00CF0301: 0x00001E2E + "\x00\xef\x03\x01\x00\x00\x1e/" + // 0x00EF0301: 0x00001E2F + "\x00K\x03\x01\x00\x00\x1e0" + // 0x004B0301: 0x00001E30 + "\x00k\x03\x01\x00\x00\x1e1" + // 0x006B0301: 0x00001E31 + "\x00K\x03#\x00\x00\x1e2" + // 0x004B0323: 0x00001E32 + "\x00k\x03#\x00\x00\x1e3" + // 0x006B0323: 0x00001E33 + "\x00K\x031\x00\x00\x1e4" + // 0x004B0331: 0x00001E34 + "\x00k\x031\x00\x00\x1e5" + // 0x006B0331: 0x00001E35 + "\x00L\x03#\x00\x00\x1e6" + // 0x004C0323: 0x00001E36 + "\x00l\x03#\x00\x00\x1e7" + // 0x006C0323: 0x00001E37 + "\x1e6\x03\x04\x00\x00\x1e8" + // 0x1E360304: 0x00001E38 + "\x1e7\x03\x04\x00\x00\x1e9" + // 0x1E370304: 0x00001E39 + "\x00L\x031\x00\x00\x1e:" + // 0x004C0331: 0x00001E3A + "\x00l\x031\x00\x00\x1e;" + // 0x006C0331: 0x00001E3B + "\x00L\x03-\x00\x00\x1e<" + // 0x004C032D: 0x00001E3C + "\x00l\x03-\x00\x00\x1e=" + // 0x006C032D: 0x00001E3D + "\x00M\x03\x01\x00\x00\x1e>" + // 0x004D0301: 0x00001E3E + "\x00m\x03\x01\x00\x00\x1e?" + // 0x006D0301: 0x00001E3F + "\x00M\x03\a\x00\x00\x1e@" + // 0x004D0307: 0x00001E40 + "\x00m\x03\a\x00\x00\x1eA" + // 0x006D0307: 0x00001E41 + "\x00M\x03#\x00\x00\x1eB" + // 0x004D0323: 0x00001E42 + "\x00m\x03#\x00\x00\x1eC" + // 0x006D0323: 0x00001E43 + "\x00N\x03\a\x00\x00\x1eD" + // 0x004E0307: 0x00001E44 + "\x00n\x03\a\x00\x00\x1eE" + // 0x006E0307: 0x00001E45 + "\x00N\x03#\x00\x00\x1eF" + // 0x004E0323: 0x00001E46 + "\x00n\x03#\x00\x00\x1eG" + // 0x006E0323: 0x00001E47 + "\x00N\x031\x00\x00\x1eH" + // 0x004E0331: 0x00001E48 + "\x00n\x031\x00\x00\x1eI" + // 0x006E0331: 0x00001E49 + "\x00N\x03-\x00\x00\x1eJ" + // 0x004E032D: 0x00001E4A + "\x00n\x03-\x00\x00\x1eK" + // 0x006E032D: 0x00001E4B + "\x00\xd5\x03\x01\x00\x00\x1eL" + // 0x00D50301: 0x00001E4C + "\x00\xf5\x03\x01\x00\x00\x1eM" + // 0x00F50301: 0x00001E4D + "\x00\xd5\x03\b\x00\x00\x1eN" + // 0x00D50308: 0x00001E4E + "\x00\xf5\x03\b\x00\x00\x1eO" + // 0x00F50308: 0x00001E4F + "\x01L\x03\x00\x00\x00\x1eP" + // 0x014C0300: 0x00001E50 + "\x01M\x03\x00\x00\x00\x1eQ" + // 0x014D0300: 0x00001E51 + "\x01L\x03\x01\x00\x00\x1eR" + // 0x014C0301: 0x00001E52 + "\x01M\x03\x01\x00\x00\x1eS" + // 0x014D0301: 0x00001E53 + "\x00P\x03\x01\x00\x00\x1eT" + // 0x00500301: 0x00001E54 + "\x00p\x03\x01\x00\x00\x1eU" + // 0x00700301: 0x00001E55 + "\x00P\x03\a\x00\x00\x1eV" + // 0x00500307: 0x00001E56 + "\x00p\x03\a\x00\x00\x1eW" + // 0x00700307: 0x00001E57 + "\x00R\x03\a\x00\x00\x1eX" + // 0x00520307: 0x00001E58 + "\x00r\x03\a\x00\x00\x1eY" + // 0x00720307: 0x00001E59 + "\x00R\x03#\x00\x00\x1eZ" + // 0x00520323: 0x00001E5A + "\x00r\x03#\x00\x00\x1e[" + // 0x00720323: 0x00001E5B + "\x1eZ\x03\x04\x00\x00\x1e\\" + // 0x1E5A0304: 0x00001E5C + "\x1e[\x03\x04\x00\x00\x1e]" + // 0x1E5B0304: 0x00001E5D + "\x00R\x031\x00\x00\x1e^" + // 0x00520331: 0x00001E5E + "\x00r\x031\x00\x00\x1e_" + // 0x00720331: 0x00001E5F + "\x00S\x03\a\x00\x00\x1e`" + // 0x00530307: 0x00001E60 + "\x00s\x03\a\x00\x00\x1ea" + // 0x00730307: 0x00001E61 + "\x00S\x03#\x00\x00\x1eb" + // 0x00530323: 0x00001E62 + "\x00s\x03#\x00\x00\x1ec" + // 0x00730323: 0x00001E63 + "\x01Z\x03\a\x00\x00\x1ed" + // 0x015A0307: 0x00001E64 + "\x01[\x03\a\x00\x00\x1ee" + // 0x015B0307: 0x00001E65 + "\x01`\x03\a\x00\x00\x1ef" + // 0x01600307: 0x00001E66 + "\x01a\x03\a\x00\x00\x1eg" + // 0x01610307: 0x00001E67 + "\x1eb\x03\a\x00\x00\x1eh" + // 0x1E620307: 0x00001E68 + "\x1ec\x03\a\x00\x00\x1ei" + // 0x1E630307: 0x00001E69 + "\x00T\x03\a\x00\x00\x1ej" + // 0x00540307: 0x00001E6A + "\x00t\x03\a\x00\x00\x1ek" + // 0x00740307: 0x00001E6B + "\x00T\x03#\x00\x00\x1el" + // 0x00540323: 0x00001E6C + "\x00t\x03#\x00\x00\x1em" + // 0x00740323: 0x00001E6D + "\x00T\x031\x00\x00\x1en" + // 0x00540331: 0x00001E6E + "\x00t\x031\x00\x00\x1eo" + // 0x00740331: 0x00001E6F + "\x00T\x03-\x00\x00\x1ep" + // 0x0054032D: 0x00001E70 + "\x00t\x03-\x00\x00\x1eq" + // 0x0074032D: 0x00001E71 + "\x00U\x03$\x00\x00\x1er" + // 0x00550324: 0x00001E72 + "\x00u\x03$\x00\x00\x1es" + // 0x00750324: 0x00001E73 + "\x00U\x030\x00\x00\x1et" + // 0x00550330: 0x00001E74 + "\x00u\x030\x00\x00\x1eu" + // 0x00750330: 0x00001E75 + "\x00U\x03-\x00\x00\x1ev" + // 0x0055032D: 0x00001E76 + "\x00u\x03-\x00\x00\x1ew" + // 0x0075032D: 0x00001E77 + "\x01h\x03\x01\x00\x00\x1ex" + // 0x01680301: 0x00001E78 + "\x01i\x03\x01\x00\x00\x1ey" + // 0x01690301: 0x00001E79 + "\x01j\x03\b\x00\x00\x1ez" + // 0x016A0308: 0x00001E7A + "\x01k\x03\b\x00\x00\x1e{" + // 0x016B0308: 0x00001E7B + "\x00V\x03\x03\x00\x00\x1e|" + // 0x00560303: 0x00001E7C + "\x00v\x03\x03\x00\x00\x1e}" + // 0x00760303: 0x00001E7D + "\x00V\x03#\x00\x00\x1e~" + // 0x00560323: 0x00001E7E + "\x00v\x03#\x00\x00\x1e\u007f" + // 0x00760323: 0x00001E7F + "\x00W\x03\x00\x00\x00\x1e\x80" + // 0x00570300: 0x00001E80 + "\x00w\x03\x00\x00\x00\x1e\x81" + // 0x00770300: 0x00001E81 + "\x00W\x03\x01\x00\x00\x1e\x82" + // 0x00570301: 0x00001E82 + "\x00w\x03\x01\x00\x00\x1e\x83" + // 0x00770301: 0x00001E83 + "\x00W\x03\b\x00\x00\x1e\x84" + // 0x00570308: 0x00001E84 + "\x00w\x03\b\x00\x00\x1e\x85" + // 0x00770308: 0x00001E85 + "\x00W\x03\a\x00\x00\x1e\x86" + // 0x00570307: 0x00001E86 + "\x00w\x03\a\x00\x00\x1e\x87" + // 0x00770307: 0x00001E87 + "\x00W\x03#\x00\x00\x1e\x88" + // 0x00570323: 0x00001E88 + "\x00w\x03#\x00\x00\x1e\x89" + // 0x00770323: 0x00001E89 + "\x00X\x03\a\x00\x00\x1e\x8a" + // 0x00580307: 0x00001E8A + "\x00x\x03\a\x00\x00\x1e\x8b" + // 0x00780307: 0x00001E8B + "\x00X\x03\b\x00\x00\x1e\x8c" + // 0x00580308: 0x00001E8C + "\x00x\x03\b\x00\x00\x1e\x8d" + // 0x00780308: 0x00001E8D + "\x00Y\x03\a\x00\x00\x1e\x8e" + // 0x00590307: 0x00001E8E + "\x00y\x03\a\x00\x00\x1e\x8f" + // 0x00790307: 0x00001E8F + "\x00Z\x03\x02\x00\x00\x1e\x90" + // 0x005A0302: 0x00001E90 + "\x00z\x03\x02\x00\x00\x1e\x91" + // 0x007A0302: 0x00001E91 + "\x00Z\x03#\x00\x00\x1e\x92" + // 0x005A0323: 0x00001E92 + "\x00z\x03#\x00\x00\x1e\x93" + // 0x007A0323: 0x00001E93 + "\x00Z\x031\x00\x00\x1e\x94" + // 0x005A0331: 0x00001E94 + "\x00z\x031\x00\x00\x1e\x95" + // 0x007A0331: 0x00001E95 + "\x00h\x031\x00\x00\x1e\x96" + // 0x00680331: 0x00001E96 + "\x00t\x03\b\x00\x00\x1e\x97" + // 0x00740308: 0x00001E97 + "\x00w\x03\n\x00\x00\x1e\x98" + // 0x0077030A: 0x00001E98 + "\x00y\x03\n\x00\x00\x1e\x99" + // 0x0079030A: 0x00001E99 + "\x01\u007f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B + "\x00A\x03#\x00\x00\x1e\xa0" + // 0x00410323: 0x00001EA0 + "\x00a\x03#\x00\x00\x1e\xa1" + // 0x00610323: 0x00001EA1 + "\x00A\x03\t\x00\x00\x1e\xa2" + // 0x00410309: 0x00001EA2 + "\x00a\x03\t\x00\x00\x1e\xa3" + // 0x00610309: 0x00001EA3 + "\x00\xc2\x03\x01\x00\x00\x1e\xa4" + // 0x00C20301: 0x00001EA4 + "\x00\xe2\x03\x01\x00\x00\x1e\xa5" + // 0x00E20301: 0x00001EA5 + "\x00\xc2\x03\x00\x00\x00\x1e\xa6" + // 0x00C20300: 0x00001EA6 + "\x00\xe2\x03\x00\x00\x00\x1e\xa7" + // 0x00E20300: 0x00001EA7 + "\x00\xc2\x03\t\x00\x00\x1e\xa8" + // 0x00C20309: 0x00001EA8 + "\x00\xe2\x03\t\x00\x00\x1e\xa9" + // 0x00E20309: 0x00001EA9 + "\x00\xc2\x03\x03\x00\x00\x1e\xaa" + // 0x00C20303: 0x00001EAA + "\x00\xe2\x03\x03\x00\x00\x1e\xab" + // 0x00E20303: 0x00001EAB + "\x1e\xa0\x03\x02\x00\x00\x1e\xac" + // 0x1EA00302: 0x00001EAC + "\x1e\xa1\x03\x02\x00\x00\x1e\xad" + // 0x1EA10302: 0x00001EAD + "\x01\x02\x03\x01\x00\x00\x1e\xae" + // 0x01020301: 0x00001EAE + "\x01\x03\x03\x01\x00\x00\x1e\xaf" + // 0x01030301: 0x00001EAF + "\x01\x02\x03\x00\x00\x00\x1e\xb0" + // 0x01020300: 0x00001EB0 + "\x01\x03\x03\x00\x00\x00\x1e\xb1" + // 0x01030300: 0x00001EB1 + "\x01\x02\x03\t\x00\x00\x1e\xb2" + // 0x01020309: 0x00001EB2 + "\x01\x03\x03\t\x00\x00\x1e\xb3" + // 0x01030309: 0x00001EB3 + "\x01\x02\x03\x03\x00\x00\x1e\xb4" + // 0x01020303: 0x00001EB4 + "\x01\x03\x03\x03\x00\x00\x1e\xb5" + // 0x01030303: 0x00001EB5 + "\x1e\xa0\x03\x06\x00\x00\x1e\xb6" + // 0x1EA00306: 0x00001EB6 + "\x1e\xa1\x03\x06\x00\x00\x1e\xb7" + // 0x1EA10306: 0x00001EB7 + "\x00E\x03#\x00\x00\x1e\xb8" + // 0x00450323: 0x00001EB8 + "\x00e\x03#\x00\x00\x1e\xb9" + // 0x00650323: 0x00001EB9 + "\x00E\x03\t\x00\x00\x1e\xba" + // 0x00450309: 0x00001EBA + "\x00e\x03\t\x00\x00\x1e\xbb" + // 0x00650309: 0x00001EBB + "\x00E\x03\x03\x00\x00\x1e\xbc" + // 0x00450303: 0x00001EBC + "\x00e\x03\x03\x00\x00\x1e\xbd" + // 0x00650303: 0x00001EBD + "\x00\xca\x03\x01\x00\x00\x1e\xbe" + // 0x00CA0301: 0x00001EBE + "\x00\xea\x03\x01\x00\x00\x1e\xbf" + // 0x00EA0301: 0x00001EBF + "\x00\xca\x03\x00\x00\x00\x1e\xc0" + // 0x00CA0300: 0x00001EC0 + "\x00\xea\x03\x00\x00\x00\x1e\xc1" + // 0x00EA0300: 0x00001EC1 + "\x00\xca\x03\t\x00\x00\x1e\xc2" + // 0x00CA0309: 0x00001EC2 + "\x00\xea\x03\t\x00\x00\x1e\xc3" + // 0x00EA0309: 0x00001EC3 + "\x00\xca\x03\x03\x00\x00\x1e\xc4" + // 0x00CA0303: 0x00001EC4 + "\x00\xea\x03\x03\x00\x00\x1e\xc5" + // 0x00EA0303: 0x00001EC5 + "\x1e\xb8\x03\x02\x00\x00\x1e\xc6" + // 0x1EB80302: 0x00001EC6 + "\x1e\xb9\x03\x02\x00\x00\x1e\xc7" + // 0x1EB90302: 0x00001EC7 + "\x00I\x03\t\x00\x00\x1e\xc8" + // 0x00490309: 0x00001EC8 + "\x00i\x03\t\x00\x00\x1e\xc9" + // 0x00690309: 0x00001EC9 + "\x00I\x03#\x00\x00\x1e\xca" + // 0x00490323: 0x00001ECA + "\x00i\x03#\x00\x00\x1e\xcb" + // 0x00690323: 0x00001ECB + "\x00O\x03#\x00\x00\x1e\xcc" + // 0x004F0323: 0x00001ECC + "\x00o\x03#\x00\x00\x1e\xcd" + // 0x006F0323: 0x00001ECD + "\x00O\x03\t\x00\x00\x1e\xce" + // 0x004F0309: 0x00001ECE + "\x00o\x03\t\x00\x00\x1e\xcf" + // 0x006F0309: 0x00001ECF + "\x00\xd4\x03\x01\x00\x00\x1e\xd0" + // 0x00D40301: 0x00001ED0 + "\x00\xf4\x03\x01\x00\x00\x1e\xd1" + // 0x00F40301: 0x00001ED1 + "\x00\xd4\x03\x00\x00\x00\x1e\xd2" + // 0x00D40300: 0x00001ED2 + "\x00\xf4\x03\x00\x00\x00\x1e\xd3" + // 0x00F40300: 0x00001ED3 + "\x00\xd4\x03\t\x00\x00\x1e\xd4" + // 0x00D40309: 0x00001ED4 + "\x00\xf4\x03\t\x00\x00\x1e\xd5" + // 0x00F40309: 0x00001ED5 + "\x00\xd4\x03\x03\x00\x00\x1e\xd6" + // 0x00D40303: 0x00001ED6 + "\x00\xf4\x03\x03\x00\x00\x1e\xd7" + // 0x00F40303: 0x00001ED7 + "\x1e\xcc\x03\x02\x00\x00\x1e\xd8" + // 0x1ECC0302: 0x00001ED8 + "\x1e\xcd\x03\x02\x00\x00\x1e\xd9" + // 0x1ECD0302: 0x00001ED9 + "\x01\xa0\x03\x01\x00\x00\x1e\xda" + // 0x01A00301: 0x00001EDA + "\x01\xa1\x03\x01\x00\x00\x1e\xdb" + // 0x01A10301: 0x00001EDB + "\x01\xa0\x03\x00\x00\x00\x1e\xdc" + // 0x01A00300: 0x00001EDC + "\x01\xa1\x03\x00\x00\x00\x1e\xdd" + // 0x01A10300: 0x00001EDD + "\x01\xa0\x03\t\x00\x00\x1e\xde" + // 0x01A00309: 0x00001EDE + "\x01\xa1\x03\t\x00\x00\x1e\xdf" + // 0x01A10309: 0x00001EDF + "\x01\xa0\x03\x03\x00\x00\x1e\xe0" + // 0x01A00303: 0x00001EE0 + "\x01\xa1\x03\x03\x00\x00\x1e\xe1" + // 0x01A10303: 0x00001EE1 + "\x01\xa0\x03#\x00\x00\x1e\xe2" + // 0x01A00323: 0x00001EE2 + "\x01\xa1\x03#\x00\x00\x1e\xe3" + // 0x01A10323: 0x00001EE3 + "\x00U\x03#\x00\x00\x1e\xe4" + // 0x00550323: 0x00001EE4 + "\x00u\x03#\x00\x00\x1e\xe5" + // 0x00750323: 0x00001EE5 + "\x00U\x03\t\x00\x00\x1e\xe6" + // 0x00550309: 0x00001EE6 + "\x00u\x03\t\x00\x00\x1e\xe7" + // 0x00750309: 0x00001EE7 + "\x01\xaf\x03\x01\x00\x00\x1e\xe8" + // 0x01AF0301: 0x00001EE8 + "\x01\xb0\x03\x01\x00\x00\x1e\xe9" + // 0x01B00301: 0x00001EE9 + "\x01\xaf\x03\x00\x00\x00\x1e\xea" + // 0x01AF0300: 0x00001EEA + "\x01\xb0\x03\x00\x00\x00\x1e\xeb" + // 0x01B00300: 0x00001EEB + "\x01\xaf\x03\t\x00\x00\x1e\xec" + // 0x01AF0309: 0x00001EEC + "\x01\xb0\x03\t\x00\x00\x1e\xed" + // 0x01B00309: 0x00001EED + "\x01\xaf\x03\x03\x00\x00\x1e\xee" + // 0x01AF0303: 0x00001EEE + "\x01\xb0\x03\x03\x00\x00\x1e\xef" + // 0x01B00303: 0x00001EEF + "\x01\xaf\x03#\x00\x00\x1e\xf0" + // 0x01AF0323: 0x00001EF0 + "\x01\xb0\x03#\x00\x00\x1e\xf1" + // 0x01B00323: 0x00001EF1 + "\x00Y\x03\x00\x00\x00\x1e\xf2" + // 0x00590300: 0x00001EF2 + "\x00y\x03\x00\x00\x00\x1e\xf3" + // 0x00790300: 0x00001EF3 + "\x00Y\x03#\x00\x00\x1e\xf4" + // 0x00590323: 0x00001EF4 + "\x00y\x03#\x00\x00\x1e\xf5" + // 0x00790323: 0x00001EF5 + "\x00Y\x03\t\x00\x00\x1e\xf6" + // 0x00590309: 0x00001EF6 + "\x00y\x03\t\x00\x00\x1e\xf7" + // 0x00790309: 0x00001EF7 + "\x00Y\x03\x03\x00\x00\x1e\xf8" + // 0x00590303: 0x00001EF8 + "\x00y\x03\x03\x00\x00\x1e\xf9" + // 0x00790303: 0x00001EF9 + "\x03\xb1\x03\x13\x00\x00\x1f\x00" + // 0x03B10313: 0x00001F00 + "\x03\xb1\x03\x14\x00\x00\x1f\x01" + // 0x03B10314: 0x00001F01 + "\x1f\x00\x03\x00\x00\x00\x1f\x02" + // 0x1F000300: 0x00001F02 + "\x1f\x01\x03\x00\x00\x00\x1f\x03" + // 0x1F010300: 0x00001F03 + "\x1f\x00\x03\x01\x00\x00\x1f\x04" + // 0x1F000301: 0x00001F04 + "\x1f\x01\x03\x01\x00\x00\x1f\x05" + // 0x1F010301: 0x00001F05 + "\x1f\x00\x03B\x00\x00\x1f\x06" + // 0x1F000342: 0x00001F06 + "\x1f\x01\x03B\x00\x00\x1f\a" + // 0x1F010342: 0x00001F07 + "\x03\x91\x03\x13\x00\x00\x1f\b" + // 0x03910313: 0x00001F08 + "\x03\x91\x03\x14\x00\x00\x1f\t" + // 0x03910314: 0x00001F09 + "\x1f\b\x03\x00\x00\x00\x1f\n" + // 0x1F080300: 0x00001F0A + "\x1f\t\x03\x00\x00\x00\x1f\v" + // 0x1F090300: 0x00001F0B + "\x1f\b\x03\x01\x00\x00\x1f\f" + // 0x1F080301: 0x00001F0C + "\x1f\t\x03\x01\x00\x00\x1f\r" + // 0x1F090301: 0x00001F0D + "\x1f\b\x03B\x00\x00\x1f\x0e" + // 0x1F080342: 0x00001F0E + "\x1f\t\x03B\x00\x00\x1f\x0f" + // 0x1F090342: 0x00001F0F + "\x03\xb5\x03\x13\x00\x00\x1f\x10" + // 0x03B50313: 0x00001F10 + "\x03\xb5\x03\x14\x00\x00\x1f\x11" + // 0x03B50314: 0x00001F11 + "\x1f\x10\x03\x00\x00\x00\x1f\x12" + // 0x1F100300: 0x00001F12 + "\x1f\x11\x03\x00\x00\x00\x1f\x13" + // 0x1F110300: 0x00001F13 + "\x1f\x10\x03\x01\x00\x00\x1f\x14" + // 0x1F100301: 0x00001F14 + "\x1f\x11\x03\x01\x00\x00\x1f\x15" + // 0x1F110301: 0x00001F15 + "\x03\x95\x03\x13\x00\x00\x1f\x18" + // 0x03950313: 0x00001F18 + "\x03\x95\x03\x14\x00\x00\x1f\x19" + // 0x03950314: 0x00001F19 + "\x1f\x18\x03\x00\x00\x00\x1f\x1a" + // 0x1F180300: 0x00001F1A + "\x1f\x19\x03\x00\x00\x00\x1f\x1b" + // 0x1F190300: 0x00001F1B + "\x1f\x18\x03\x01\x00\x00\x1f\x1c" + // 0x1F180301: 0x00001F1C + "\x1f\x19\x03\x01\x00\x00\x1f\x1d" + // 0x1F190301: 0x00001F1D + "\x03\xb7\x03\x13\x00\x00\x1f " + // 0x03B70313: 0x00001F20 + "\x03\xb7\x03\x14\x00\x00\x1f!" + // 0x03B70314: 0x00001F21 + "\x1f \x03\x00\x00\x00\x1f\"" + // 0x1F200300: 0x00001F22 + "\x1f!\x03\x00\x00\x00\x1f#" + // 0x1F210300: 0x00001F23 + "\x1f \x03\x01\x00\x00\x1f$" + // 0x1F200301: 0x00001F24 + "\x1f!\x03\x01\x00\x00\x1f%" + // 0x1F210301: 0x00001F25 + "\x1f \x03B\x00\x00\x1f&" + // 0x1F200342: 0x00001F26 + "\x1f!\x03B\x00\x00\x1f'" + // 0x1F210342: 0x00001F27 + "\x03\x97\x03\x13\x00\x00\x1f(" + // 0x03970313: 0x00001F28 + "\x03\x97\x03\x14\x00\x00\x1f)" + // 0x03970314: 0x00001F29 + "\x1f(\x03\x00\x00\x00\x1f*" + // 0x1F280300: 0x00001F2A + "\x1f)\x03\x00\x00\x00\x1f+" + // 0x1F290300: 0x00001F2B + "\x1f(\x03\x01\x00\x00\x1f," + // 0x1F280301: 0x00001F2C + "\x1f)\x03\x01\x00\x00\x1f-" + // 0x1F290301: 0x00001F2D + "\x1f(\x03B\x00\x00\x1f." + // 0x1F280342: 0x00001F2E + "\x1f)\x03B\x00\x00\x1f/" + // 0x1F290342: 0x00001F2F + "\x03\xb9\x03\x13\x00\x00\x1f0" + // 0x03B90313: 0x00001F30 + "\x03\xb9\x03\x14\x00\x00\x1f1" + // 0x03B90314: 0x00001F31 + "\x1f0\x03\x00\x00\x00\x1f2" + // 0x1F300300: 0x00001F32 + "\x1f1\x03\x00\x00\x00\x1f3" + // 0x1F310300: 0x00001F33 + "\x1f0\x03\x01\x00\x00\x1f4" + // 0x1F300301: 0x00001F34 + "\x1f1\x03\x01\x00\x00\x1f5" + // 0x1F310301: 0x00001F35 + "\x1f0\x03B\x00\x00\x1f6" + // 0x1F300342: 0x00001F36 + "\x1f1\x03B\x00\x00\x1f7" + // 0x1F310342: 0x00001F37 + "\x03\x99\x03\x13\x00\x00\x1f8" + // 0x03990313: 0x00001F38 + "\x03\x99\x03\x14\x00\x00\x1f9" + // 0x03990314: 0x00001F39 + "\x1f8\x03\x00\x00\x00\x1f:" + // 0x1F380300: 0x00001F3A + "\x1f9\x03\x00\x00\x00\x1f;" + // 0x1F390300: 0x00001F3B + "\x1f8\x03\x01\x00\x00\x1f<" + // 0x1F380301: 0x00001F3C + "\x1f9\x03\x01\x00\x00\x1f=" + // 0x1F390301: 0x00001F3D + "\x1f8\x03B\x00\x00\x1f>" + // 0x1F380342: 0x00001F3E + "\x1f9\x03B\x00\x00\x1f?" + // 0x1F390342: 0x00001F3F + "\x03\xbf\x03\x13\x00\x00\x1f@" + // 0x03BF0313: 0x00001F40 + "\x03\xbf\x03\x14\x00\x00\x1fA" + // 0x03BF0314: 0x00001F41 + "\x1f@\x03\x00\x00\x00\x1fB" + // 0x1F400300: 0x00001F42 + "\x1fA\x03\x00\x00\x00\x1fC" + // 0x1F410300: 0x00001F43 + "\x1f@\x03\x01\x00\x00\x1fD" + // 0x1F400301: 0x00001F44 + "\x1fA\x03\x01\x00\x00\x1fE" + // 0x1F410301: 0x00001F45 + "\x03\x9f\x03\x13\x00\x00\x1fH" + // 0x039F0313: 0x00001F48 + "\x03\x9f\x03\x14\x00\x00\x1fI" + // 0x039F0314: 0x00001F49 + "\x1fH\x03\x00\x00\x00\x1fJ" + // 0x1F480300: 0x00001F4A + "\x1fI\x03\x00\x00\x00\x1fK" + // 0x1F490300: 0x00001F4B + "\x1fH\x03\x01\x00\x00\x1fL" + // 0x1F480301: 0x00001F4C + "\x1fI\x03\x01\x00\x00\x1fM" + // 0x1F490301: 0x00001F4D + "\x03\xc5\x03\x13\x00\x00\x1fP" + // 0x03C50313: 0x00001F50 + "\x03\xc5\x03\x14\x00\x00\x1fQ" + // 0x03C50314: 0x00001F51 + "\x1fP\x03\x00\x00\x00\x1fR" + // 0x1F500300: 0x00001F52 + "\x1fQ\x03\x00\x00\x00\x1fS" + // 0x1F510300: 0x00001F53 + "\x1fP\x03\x01\x00\x00\x1fT" + // 0x1F500301: 0x00001F54 + "\x1fQ\x03\x01\x00\x00\x1fU" + // 0x1F510301: 0x00001F55 + "\x1fP\x03B\x00\x00\x1fV" + // 0x1F500342: 0x00001F56 + "\x1fQ\x03B\x00\x00\x1fW" + // 0x1F510342: 0x00001F57 + "\x03\xa5\x03\x14\x00\x00\x1fY" + // 0x03A50314: 0x00001F59 + "\x1fY\x03\x00\x00\x00\x1f[" + // 0x1F590300: 0x00001F5B + "\x1fY\x03\x01\x00\x00\x1f]" + // 0x1F590301: 0x00001F5D + "\x1fY\x03B\x00\x00\x1f_" + // 0x1F590342: 0x00001F5F + "\x03\xc9\x03\x13\x00\x00\x1f`" + // 0x03C90313: 0x00001F60 + "\x03\xc9\x03\x14\x00\x00\x1fa" + // 0x03C90314: 0x00001F61 + "\x1f`\x03\x00\x00\x00\x1fb" + // 0x1F600300: 0x00001F62 + "\x1fa\x03\x00\x00\x00\x1fc" + // 0x1F610300: 0x00001F63 + "\x1f`\x03\x01\x00\x00\x1fd" + // 0x1F600301: 0x00001F64 + "\x1fa\x03\x01\x00\x00\x1fe" + // 0x1F610301: 0x00001F65 + "\x1f`\x03B\x00\x00\x1ff" + // 0x1F600342: 0x00001F66 + "\x1fa\x03B\x00\x00\x1fg" + // 0x1F610342: 0x00001F67 + "\x03\xa9\x03\x13\x00\x00\x1fh" + // 0x03A90313: 0x00001F68 + "\x03\xa9\x03\x14\x00\x00\x1fi" + // 0x03A90314: 0x00001F69 + "\x1fh\x03\x00\x00\x00\x1fj" + // 0x1F680300: 0x00001F6A + "\x1fi\x03\x00\x00\x00\x1fk" + // 0x1F690300: 0x00001F6B + "\x1fh\x03\x01\x00\x00\x1fl" + // 0x1F680301: 0x00001F6C + "\x1fi\x03\x01\x00\x00\x1fm" + // 0x1F690301: 0x00001F6D + "\x1fh\x03B\x00\x00\x1fn" + // 0x1F680342: 0x00001F6E + "\x1fi\x03B\x00\x00\x1fo" + // 0x1F690342: 0x00001F6F + "\x03\xb1\x03\x00\x00\x00\x1fp" + // 0x03B10300: 0x00001F70 + "\x03\xb5\x03\x00\x00\x00\x1fr" + // 0x03B50300: 0x00001F72 + "\x03\xb7\x03\x00\x00\x00\x1ft" + // 0x03B70300: 0x00001F74 + "\x03\xb9\x03\x00\x00\x00\x1fv" + // 0x03B90300: 0x00001F76 + "\x03\xbf\x03\x00\x00\x00\x1fx" + // 0x03BF0300: 0x00001F78 + "\x03\xc5\x03\x00\x00\x00\x1fz" + // 0x03C50300: 0x00001F7A + "\x03\xc9\x03\x00\x00\x00\x1f|" + // 0x03C90300: 0x00001F7C + "\x1f\x00\x03E\x00\x00\x1f\x80" + // 0x1F000345: 0x00001F80 + "\x1f\x01\x03E\x00\x00\x1f\x81" + // 0x1F010345: 0x00001F81 + "\x1f\x02\x03E\x00\x00\x1f\x82" + // 0x1F020345: 0x00001F82 + "\x1f\x03\x03E\x00\x00\x1f\x83" + // 0x1F030345: 0x00001F83 + "\x1f\x04\x03E\x00\x00\x1f\x84" + // 0x1F040345: 0x00001F84 + "\x1f\x05\x03E\x00\x00\x1f\x85" + // 0x1F050345: 0x00001F85 + "\x1f\x06\x03E\x00\x00\x1f\x86" + // 0x1F060345: 0x00001F86 + "\x1f\a\x03E\x00\x00\x1f\x87" + // 0x1F070345: 0x00001F87 + "\x1f\b\x03E\x00\x00\x1f\x88" + // 0x1F080345: 0x00001F88 + "\x1f\t\x03E\x00\x00\x1f\x89" + // 0x1F090345: 0x00001F89 + "\x1f\n\x03E\x00\x00\x1f\x8a" + // 0x1F0A0345: 0x00001F8A + "\x1f\v\x03E\x00\x00\x1f\x8b" + // 0x1F0B0345: 0x00001F8B + "\x1f\f\x03E\x00\x00\x1f\x8c" + // 0x1F0C0345: 0x00001F8C + "\x1f\r\x03E\x00\x00\x1f\x8d" + // 0x1F0D0345: 0x00001F8D + "\x1f\x0e\x03E\x00\x00\x1f\x8e" + // 0x1F0E0345: 0x00001F8E + "\x1f\x0f\x03E\x00\x00\x1f\x8f" + // 0x1F0F0345: 0x00001F8F + "\x1f \x03E\x00\x00\x1f\x90" + // 0x1F200345: 0x00001F90 + "\x1f!\x03E\x00\x00\x1f\x91" + // 0x1F210345: 0x00001F91 + "\x1f\"\x03E\x00\x00\x1f\x92" + // 0x1F220345: 0x00001F92 + "\x1f#\x03E\x00\x00\x1f\x93" + // 0x1F230345: 0x00001F93 + "\x1f$\x03E\x00\x00\x1f\x94" + // 0x1F240345: 0x00001F94 + "\x1f%\x03E\x00\x00\x1f\x95" + // 0x1F250345: 0x00001F95 + "\x1f&\x03E\x00\x00\x1f\x96" + // 0x1F260345: 0x00001F96 + "\x1f'\x03E\x00\x00\x1f\x97" + // 0x1F270345: 0x00001F97 + "\x1f(\x03E\x00\x00\x1f\x98" + // 0x1F280345: 0x00001F98 + "\x1f)\x03E\x00\x00\x1f\x99" + // 0x1F290345: 0x00001F99 + "\x1f*\x03E\x00\x00\x1f\x9a" + // 0x1F2A0345: 0x00001F9A + "\x1f+\x03E\x00\x00\x1f\x9b" + // 0x1F2B0345: 0x00001F9B + "\x1f,\x03E\x00\x00\x1f\x9c" + // 0x1F2C0345: 0x00001F9C + "\x1f-\x03E\x00\x00\x1f\x9d" + // 0x1F2D0345: 0x00001F9D + "\x1f.\x03E\x00\x00\x1f\x9e" + // 0x1F2E0345: 0x00001F9E + "\x1f/\x03E\x00\x00\x1f\x9f" + // 0x1F2F0345: 0x00001F9F + "\x1f`\x03E\x00\x00\x1f\xa0" + // 0x1F600345: 0x00001FA0 + "\x1fa\x03E\x00\x00\x1f\xa1" + // 0x1F610345: 0x00001FA1 + "\x1fb\x03E\x00\x00\x1f\xa2" + // 0x1F620345: 0x00001FA2 + "\x1fc\x03E\x00\x00\x1f\xa3" + // 0x1F630345: 0x00001FA3 + "\x1fd\x03E\x00\x00\x1f\xa4" + // 0x1F640345: 0x00001FA4 + "\x1fe\x03E\x00\x00\x1f\xa5" + // 0x1F650345: 0x00001FA5 + "\x1ff\x03E\x00\x00\x1f\xa6" + // 0x1F660345: 0x00001FA6 + "\x1fg\x03E\x00\x00\x1f\xa7" + // 0x1F670345: 0x00001FA7 + "\x1fh\x03E\x00\x00\x1f\xa8" + // 0x1F680345: 0x00001FA8 + "\x1fi\x03E\x00\x00\x1f\xa9" + // 0x1F690345: 0x00001FA9 + "\x1fj\x03E\x00\x00\x1f\xaa" + // 0x1F6A0345: 0x00001FAA + "\x1fk\x03E\x00\x00\x1f\xab" + // 0x1F6B0345: 0x00001FAB + "\x1fl\x03E\x00\x00\x1f\xac" + // 0x1F6C0345: 0x00001FAC + "\x1fm\x03E\x00\x00\x1f\xad" + // 0x1F6D0345: 0x00001FAD + "\x1fn\x03E\x00\x00\x1f\xae" + // 0x1F6E0345: 0x00001FAE + "\x1fo\x03E\x00\x00\x1f\xaf" + // 0x1F6F0345: 0x00001FAF + "\x03\xb1\x03\x06\x00\x00\x1f\xb0" + // 0x03B10306: 0x00001FB0 + "\x03\xb1\x03\x04\x00\x00\x1f\xb1" + // 0x03B10304: 0x00001FB1 + "\x1fp\x03E\x00\x00\x1f\xb2" + // 0x1F700345: 0x00001FB2 + "\x03\xb1\x03E\x00\x00\x1f\xb3" + // 0x03B10345: 0x00001FB3 + "\x03\xac\x03E\x00\x00\x1f\xb4" + // 0x03AC0345: 0x00001FB4 + "\x03\xb1\x03B\x00\x00\x1f\xb6" + // 0x03B10342: 0x00001FB6 + "\x1f\xb6\x03E\x00\x00\x1f\xb7" + // 0x1FB60345: 0x00001FB7 + "\x03\x91\x03\x06\x00\x00\x1f\xb8" + // 0x03910306: 0x00001FB8 + "\x03\x91\x03\x04\x00\x00\x1f\xb9" + // 0x03910304: 0x00001FB9 + "\x03\x91\x03\x00\x00\x00\x1f\xba" + // 0x03910300: 0x00001FBA + "\x03\x91\x03E\x00\x00\x1f\xbc" + // 0x03910345: 0x00001FBC + "\x00\xa8\x03B\x00\x00\x1f\xc1" + // 0x00A80342: 0x00001FC1 + "\x1ft\x03E\x00\x00\x1f\xc2" + // 0x1F740345: 0x00001FC2 + "\x03\xb7\x03E\x00\x00\x1f\xc3" + // 0x03B70345: 0x00001FC3 + "\x03\xae\x03E\x00\x00\x1f\xc4" + // 0x03AE0345: 0x00001FC4 + "\x03\xb7\x03B\x00\x00\x1f\xc6" + // 0x03B70342: 0x00001FC6 + "\x1f\xc6\x03E\x00\x00\x1f\xc7" + // 0x1FC60345: 0x00001FC7 + "\x03\x95\x03\x00\x00\x00\x1f\xc8" + // 0x03950300: 0x00001FC8 + "\x03\x97\x03\x00\x00\x00\x1f\xca" + // 0x03970300: 0x00001FCA + "\x03\x97\x03E\x00\x00\x1f\xcc" + // 0x03970345: 0x00001FCC + "\x1f\xbf\x03\x00\x00\x00\x1f\xcd" + // 0x1FBF0300: 0x00001FCD + "\x1f\xbf\x03\x01\x00\x00\x1f\xce" + // 0x1FBF0301: 0x00001FCE + "\x1f\xbf\x03B\x00\x00\x1f\xcf" + // 0x1FBF0342: 0x00001FCF + "\x03\xb9\x03\x06\x00\x00\x1f\xd0" + // 0x03B90306: 0x00001FD0 + "\x03\xb9\x03\x04\x00\x00\x1f\xd1" + // 0x03B90304: 0x00001FD1 + "\x03\xca\x03\x00\x00\x00\x1f\xd2" + // 0x03CA0300: 0x00001FD2 + "\x03\xb9\x03B\x00\x00\x1f\xd6" + // 0x03B90342: 0x00001FD6 + "\x03\xca\x03B\x00\x00\x1f\xd7" + // 0x03CA0342: 0x00001FD7 + "\x03\x99\x03\x06\x00\x00\x1f\xd8" + // 0x03990306: 0x00001FD8 + "\x03\x99\x03\x04\x00\x00\x1f\xd9" + // 0x03990304: 0x00001FD9 + "\x03\x99\x03\x00\x00\x00\x1f\xda" + // 0x03990300: 0x00001FDA + "\x1f\xfe\x03\x00\x00\x00\x1f\xdd" + // 0x1FFE0300: 0x00001FDD + "\x1f\xfe\x03\x01\x00\x00\x1f\xde" + // 0x1FFE0301: 0x00001FDE + "\x1f\xfe\x03B\x00\x00\x1f\xdf" + // 0x1FFE0342: 0x00001FDF + "\x03\xc5\x03\x06\x00\x00\x1f\xe0" + // 0x03C50306: 0x00001FE0 + "\x03\xc5\x03\x04\x00\x00\x1f\xe1" + // 0x03C50304: 0x00001FE1 + "\x03\xcb\x03\x00\x00\x00\x1f\xe2" + // 0x03CB0300: 0x00001FE2 + "\x03\xc1\x03\x13\x00\x00\x1f\xe4" + // 0x03C10313: 0x00001FE4 + "\x03\xc1\x03\x14\x00\x00\x1f\xe5" + // 0x03C10314: 0x00001FE5 + "\x03\xc5\x03B\x00\x00\x1f\xe6" + // 0x03C50342: 0x00001FE6 + "\x03\xcb\x03B\x00\x00\x1f\xe7" + // 0x03CB0342: 0x00001FE7 + "\x03\xa5\x03\x06\x00\x00\x1f\xe8" + // 0x03A50306: 0x00001FE8 + "\x03\xa5\x03\x04\x00\x00\x1f\xe9" + // 0x03A50304: 0x00001FE9 + "\x03\xa5\x03\x00\x00\x00\x1f\xea" + // 0x03A50300: 0x00001FEA + "\x03\xa1\x03\x14\x00\x00\x1f\xec" + // 0x03A10314: 0x00001FEC + "\x00\xa8\x03\x00\x00\x00\x1f\xed" + // 0x00A80300: 0x00001FED + "\x1f|\x03E\x00\x00\x1f\xf2" + // 0x1F7C0345: 0x00001FF2 + "\x03\xc9\x03E\x00\x00\x1f\xf3" + // 0x03C90345: 0x00001FF3 + "\x03\xce\x03E\x00\x00\x1f\xf4" + // 0x03CE0345: 0x00001FF4 + "\x03\xc9\x03B\x00\x00\x1f\xf6" + // 0x03C90342: 0x00001FF6 + "\x1f\xf6\x03E\x00\x00\x1f\xf7" + // 0x1FF60345: 0x00001FF7 + "\x03\x9f\x03\x00\x00\x00\x1f\xf8" + // 0x039F0300: 0x00001FF8 + "\x03\xa9\x03\x00\x00\x00\x1f\xfa" + // 0x03A90300: 0x00001FFA + "\x03\xa9\x03E\x00\x00\x1f\xfc" + // 0x03A90345: 0x00001FFC + "!\x90\x038\x00\x00!\x9a" + // 0x21900338: 0x0000219A + "!\x92\x038\x00\x00!\x9b" + // 0x21920338: 0x0000219B + "!\x94\x038\x00\x00!\xae" + // 0x21940338: 0x000021AE + "!\xd0\x038\x00\x00!\xcd" + // 0x21D00338: 0x000021CD + "!\xd4\x038\x00\x00!\xce" + // 0x21D40338: 0x000021CE + "!\xd2\x038\x00\x00!\xcf" + // 0x21D20338: 0x000021CF + "\"\x03\x038\x00\x00\"\x04" + // 0x22030338: 0x00002204 + "\"\b\x038\x00\x00\"\t" + // 0x22080338: 0x00002209 + "\"\v\x038\x00\x00\"\f" + // 0x220B0338: 0x0000220C + "\"#\x038\x00\x00\"$" + // 0x22230338: 0x00002224 + "\"%\x038\x00\x00\"&" + // 0x22250338: 0x00002226 + "\"<\x038\x00\x00\"A" + // 0x223C0338: 0x00002241 + "\"C\x038\x00\x00\"D" + // 0x22430338: 0x00002244 + "\"E\x038\x00\x00\"G" + // 0x22450338: 0x00002247 + "\"H\x038\x00\x00\"I" + // 0x22480338: 0x00002249 + "\x00=\x038\x00\x00\"`" + // 0x003D0338: 0x00002260 + "\"a\x038\x00\x00\"b" + // 0x22610338: 0x00002262 + "\"M\x038\x00\x00\"m" + // 0x224D0338: 0x0000226D + "\x00<\x038\x00\x00\"n" + // 0x003C0338: 0x0000226E + "\x00>\x038\x00\x00\"o" + // 0x003E0338: 0x0000226F + "\"d\x038\x00\x00\"p" + // 0x22640338: 0x00002270 + "\"e\x038\x00\x00\"q" + // 0x22650338: 0x00002271 + "\"r\x038\x00\x00\"t" + // 0x22720338: 0x00002274 + "\"s\x038\x00\x00\"u" + // 0x22730338: 0x00002275 + "\"v\x038\x00\x00\"x" + // 0x22760338: 0x00002278 + "\"w\x038\x00\x00\"y" + // 0x22770338: 0x00002279 + "\"z\x038\x00\x00\"\x80" + // 0x227A0338: 0x00002280 + "\"{\x038\x00\x00\"\x81" + // 0x227B0338: 0x00002281 + "\"\x82\x038\x00\x00\"\x84" + // 0x22820338: 0x00002284 + "\"\x83\x038\x00\x00\"\x85" + // 0x22830338: 0x00002285 + "\"\x86\x038\x00\x00\"\x88" + // 0x22860338: 0x00002288 + "\"\x87\x038\x00\x00\"\x89" + // 0x22870338: 0x00002289 + "\"\xa2\x038\x00\x00\"\xac" + // 0x22A20338: 0x000022AC + "\"\xa8\x038\x00\x00\"\xad" + // 0x22A80338: 0x000022AD + "\"\xa9\x038\x00\x00\"\xae" + // 0x22A90338: 0x000022AE + "\"\xab\x038\x00\x00\"\xaf" + // 0x22AB0338: 0x000022AF + "\"|\x038\x00\x00\"\xe0" + // 0x227C0338: 0x000022E0 + "\"}\x038\x00\x00\"\xe1" + // 0x227D0338: 0x000022E1 + "\"\x91\x038\x00\x00\"\xe2" + // 0x22910338: 0x000022E2 + "\"\x92\x038\x00\x00\"\xe3" + // 0x22920338: 0x000022E3 + "\"\xb2\x038\x00\x00\"\xea" + // 0x22B20338: 0x000022EA + "\"\xb3\x038\x00\x00\"\xeb" + // 0x22B30338: 0x000022EB + "\"\xb4\x038\x00\x00\"\xec" + // 0x22B40338: 0x000022EC + "\"\xb5\x038\x00\x00\"\xed" + // 0x22B50338: 0x000022ED + "0K0\x99\x00\x000L" + // 0x304B3099: 0x0000304C + "0M0\x99\x00\x000N" + // 0x304D3099: 0x0000304E + "0O0\x99\x00\x000P" + // 0x304F3099: 0x00003050 + "0Q0\x99\x00\x000R" + // 0x30513099: 0x00003052 + "0S0\x99\x00\x000T" + // 0x30533099: 0x00003054 + "0U0\x99\x00\x000V" + // 0x30553099: 0x00003056 + "0W0\x99\x00\x000X" + // 0x30573099: 0x00003058 + "0Y0\x99\x00\x000Z" + // 0x30593099: 0x0000305A + "0[0\x99\x00\x000\\" + // 0x305B3099: 0x0000305C + "0]0\x99\x00\x000^" + // 0x305D3099: 0x0000305E + "0_0\x99\x00\x000`" + // 0x305F3099: 0x00003060 + "0a0\x99\x00\x000b" + // 0x30613099: 0x00003062 + "0d0\x99\x00\x000e" + // 0x30643099: 0x00003065 + "0f0\x99\x00\x000g" + // 0x30663099: 0x00003067 + "0h0\x99\x00\x000i" + // 0x30683099: 0x00003069 + "0o0\x99\x00\x000p" + // 0x306F3099: 0x00003070 + "0o0\x9a\x00\x000q" + // 0x306F309A: 0x00003071 + "0r0\x99\x00\x000s" + // 0x30723099: 0x00003073 + "0r0\x9a\x00\x000t" + // 0x3072309A: 0x00003074 + "0u0\x99\x00\x000v" + // 0x30753099: 0x00003076 + "0u0\x9a\x00\x000w" + // 0x3075309A: 0x00003077 + "0x0\x99\x00\x000y" + // 0x30783099: 0x00003079 + "0x0\x9a\x00\x000z" + // 0x3078309A: 0x0000307A + "0{0\x99\x00\x000|" + // 0x307B3099: 0x0000307C + "0{0\x9a\x00\x000}" + // 0x307B309A: 0x0000307D + "0F0\x99\x00\x000\x94" + // 0x30463099: 0x00003094 + "0\x9d0\x99\x00\x000\x9e" + // 0x309D3099: 0x0000309E + "0\xab0\x99\x00\x000\xac" + // 0x30AB3099: 0x000030AC + "0\xad0\x99\x00\x000\xae" + // 0x30AD3099: 0x000030AE + "0\xaf0\x99\x00\x000\xb0" + // 0x30AF3099: 0x000030B0 + "0\xb10\x99\x00\x000\xb2" + // 0x30B13099: 0x000030B2 + "0\xb30\x99\x00\x000\xb4" + // 0x30B33099: 0x000030B4 + "0\xb50\x99\x00\x000\xb6" + // 0x30B53099: 0x000030B6 + "0\xb70\x99\x00\x000\xb8" + // 0x30B73099: 0x000030B8 + "0\xb90\x99\x00\x000\xba" + // 0x30B93099: 0x000030BA + "0\xbb0\x99\x00\x000\xbc" + // 0x30BB3099: 0x000030BC + "0\xbd0\x99\x00\x000\xbe" + // 0x30BD3099: 0x000030BE + "0\xbf0\x99\x00\x000\xc0" + // 0x30BF3099: 0x000030C0 + "0\xc10\x99\x00\x000\xc2" + // 0x30C13099: 0x000030C2 + "0\xc40\x99\x00\x000\xc5" + // 0x30C43099: 0x000030C5 + "0\xc60\x99\x00\x000\xc7" + // 0x30C63099: 0x000030C7 + "0\xc80\x99\x00\x000\xc9" + // 0x30C83099: 0x000030C9 + "0\xcf0\x99\x00\x000\xd0" + // 0x30CF3099: 0x000030D0 + "0\xcf0\x9a\x00\x000\xd1" + // 0x30CF309A: 0x000030D1 + "0\xd20\x99\x00\x000\xd3" + // 0x30D23099: 0x000030D3 + "0\xd20\x9a\x00\x000\xd4" + // 0x30D2309A: 0x000030D4 + "0\xd50\x99\x00\x000\xd6" + // 0x30D53099: 0x000030D6 + "0\xd50\x9a\x00\x000\xd7" + // 0x30D5309A: 0x000030D7 + "0\xd80\x99\x00\x000\xd9" + // 0x30D83099: 0x000030D9 + "0\xd80\x9a\x00\x000\xda" + // 0x30D8309A: 0x000030DA + "0\xdb0\x99\x00\x000\xdc" + // 0x30DB3099: 0x000030DC + "0\xdb0\x9a\x00\x000\xdd" + // 0x30DB309A: 0x000030DD + "0\xa60\x99\x00\x000\xf4" + // 0x30A63099: 0x000030F4 + "0\xef0\x99\x00\x000\xf7" + // 0x30EF3099: 0x000030F7 + "0\xf00\x99\x00\x000\xf8" + // 0x30F03099: 0x000030F8 + "0\xf10\x99\x00\x000\xf9" + // 0x30F13099: 0x000030F9 + "0\xf20\x99\x00\x000\xfa" + // 0x30F23099: 0x000030FA + "0\xfd0\x99\x00\x000\xfe" + // 0x30FD3099: 0x000030FE + "\x10\x99\x10\xba\x00\x01\x10\x9a" + // 0x109910BA: 0x0001109A + "\x10\x9b\x10\xba\x00\x01\x10\x9c" + // 0x109B10BA: 0x0001109C + "\x10\xa5\x10\xba\x00\x01\x10\xab" + // 0x10A510BA: 0x000110AB + "\x111\x11'\x00\x01\x11." + // 0x11311127: 0x0001112E + "\x112\x11'\x00\x01\x11/" + // 0x11321127: 0x0001112F + "\x13G\x13>\x00\x01\x13K" + // 0x1347133E: 0x0001134B + "\x13G\x13W\x00\x01\x13L" + // 0x13471357: 0x0001134C + "\x14\xb9\x14\xba\x00\x01\x14\xbb" + // 0x14B914BA: 0x000114BB + "\x14\xb9\x14\xb0\x00\x01\x14\xbc" + // 0x14B914B0: 0x000114BC + "\x14\xb9\x14\xbd\x00\x01\x14\xbe" + // 0x14B914BD: 0x000114BE + "\x15\xb8\x15\xaf\x00\x01\x15\xba" + // 0x15B815AF: 0x000115BA + "\x15\xb9\x15\xaf\x00\x01\x15\xbb" + // 0x15B915AF: 0x000115BB + "\x195\x190\x00\x01\x198" + // 0x19351930: 0x00011938 + "" + // Total size of tables: 55KB (56160 bytes) diff --git a/vendor/golang.org/x/text/width/tables12.0.0.go b/vendor/golang.org/x/text/width/tables12.0.0.go index 5c859677a..543942b9e 100644 --- a/vendor/golang.org/x/text/width/tables12.0.0.go +++ b/vendor/golang.org/x/text/width/tables12.0.0.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.14 +// +build go1.14,!go1.16 package width diff --git a/vendor/golang.org/x/text/width/tables13.0.0.go b/vendor/golang.org/x/text/width/tables13.0.0.go new file mode 100644 index 000000000..804264ca6 --- /dev/null +++ b/vendor/golang.org/x/text/width/tables13.0.0.go @@ -0,0 +1,1351 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.16 + +package width + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "13.0.0" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// widthTrie. Total size: 14848 bytes (14.50 KiB). Checksum: 17e24343536472f6. +type widthTrie struct{} + +func newWidthTrie(i int) *widthTrie { + return &widthTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { + switch { + default: + return uint16(widthValues[n<<6+uint32(b)]) + } +} + +// widthValues: 105 blocks, 6720 entries, 13440 bytes +// The third block is the zero block. +var widthValues = [6720]uint16{ + // Block 0x0, offset 0x0 + 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, + 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, + 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, + 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, + 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, + 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, + // Block 0x1, offset 0x40 + 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, + 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, + 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, + 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, + 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, + 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, + 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, + 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, + 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, + 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, + 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, + 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, + 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, + 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, + 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, + 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, + // Block 0x4, offset 0x100 + 0x106: 0x2000, + 0x110: 0x2000, + 0x117: 0x2000, + 0x118: 0x2000, + 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, + 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, + 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, + 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, + 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, + 0x13c: 0x2000, 0x13e: 0x2000, + // Block 0x5, offset 0x140 + 0x141: 0x2000, + 0x151: 0x2000, + 0x153: 0x2000, + 0x15b: 0x2000, + 0x166: 0x2000, 0x167: 0x2000, + 0x16b: 0x2000, + 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, + 0x178: 0x2000, + 0x17f: 0x2000, + // Block 0x6, offset 0x180 + 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, + 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, + 0x18d: 0x2000, + 0x192: 0x2000, 0x193: 0x2000, + 0x1a6: 0x2000, 0x1a7: 0x2000, + 0x1ab: 0x2000, + // Block 0x7, offset 0x1c0 + 0x1ce: 0x2000, 0x1d0: 0x2000, + 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, + 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, + // Block 0x8, offset 0x200 + 0x211: 0x2000, + 0x221: 0x2000, + // Block 0x9, offset 0x240 + 0x244: 0x2000, + 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, + 0x24d: 0x2000, 0x250: 0x2000, + 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, + 0x25f: 0x2000, + // Block 0xa, offset 0x280 + 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, + 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, + 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, + 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, + 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, + 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, + 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, + 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, + 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, + 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, + 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, + 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, + 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, + 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, + 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, + 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, + 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, + 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, + // Block 0xc, offset 0x300 + 0x311: 0x2000, + 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, + 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, + 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, + 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, + 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, + 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, + 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, + // Block 0xd, offset 0x340 + 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, + 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, + // Block 0xe, offset 0x380 + 0x381: 0x2000, + 0x390: 0x2000, 0x391: 0x2000, + 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, + 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, + 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, + 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, + 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, + 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, + 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, + 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, + 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, + 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, + // Block 0x10, offset 0x400 + 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, + 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, + 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, + 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, + 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, + 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, + 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, + 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, + 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, + 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, + 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, + // Block 0x11, offset 0x440 + 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, + 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, + 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, + 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, + 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, + 0x45e: 0x4000, 0x45f: 0x4000, + // Block 0x12, offset 0x480 + 0x490: 0x2000, + 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, + 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, + 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, + 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, + 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, + 0x4bb: 0x2000, + 0x4be: 0x2000, + // Block 0x13, offset 0x4c0 + 0x4f4: 0x2000, + 0x4ff: 0x2000, + // Block 0x14, offset 0x500 + 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, + 0x529: 0xa009, + 0x52c: 0x2000, + // Block 0x15, offset 0x540 + 0x543: 0x2000, 0x545: 0x2000, + 0x549: 0x2000, + 0x553: 0x2000, 0x556: 0x2000, + 0x561: 0x2000, 0x562: 0x2000, + 0x566: 0x2000, + 0x56b: 0x2000, + // Block 0x16, offset 0x580 + 0x593: 0x2000, 0x594: 0x2000, + 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, + 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, + 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, + 0x5aa: 0x2000, 0x5ab: 0x2000, + 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, + 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, + // Block 0x17, offset 0x5c0 + 0x5c9: 0x2000, + 0x5d0: 0x200a, 0x5d1: 0x200b, + 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, + 0x5d8: 0x2000, 0x5d9: 0x2000, + 0x5f8: 0x2000, 0x5f9: 0x2000, + // Block 0x18, offset 0x600 + 0x612: 0x2000, 0x614: 0x2000, + 0x627: 0x2000, + // Block 0x19, offset 0x640 + 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, + 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, + 0x64f: 0x2000, 0x651: 0x2000, + 0x655: 0x2000, + 0x65a: 0x2000, 0x65d: 0x2000, + 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, + 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, + 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, + 0x674: 0x2000, 0x675: 0x2000, + 0x676: 0x2000, 0x677: 0x2000, + 0x67c: 0x2000, 0x67d: 0x2000, + // Block 0x1a, offset 0x680 + 0x688: 0x2000, + 0x68c: 0x2000, + 0x692: 0x2000, + 0x6a0: 0x2000, 0x6a1: 0x2000, + 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, + 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, + // Block 0x1b, offset 0x6c0 + 0x6c2: 0x2000, 0x6c3: 0x2000, + 0x6c6: 0x2000, 0x6c7: 0x2000, + 0x6d5: 0x2000, + 0x6d9: 0x2000, + 0x6e5: 0x2000, + 0x6ff: 0x2000, + // Block 0x1c, offset 0x700 + 0x712: 0x2000, + 0x71a: 0x4000, 0x71b: 0x4000, + 0x729: 0x4000, + 0x72a: 0x4000, + // Block 0x1d, offset 0x740 + 0x769: 0x4000, + 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, + 0x770: 0x4000, 0x773: 0x4000, + // Block 0x1e, offset 0x780 + 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, + 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, + 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, + 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, + 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, + 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, + 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, + 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, + 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, + 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, + 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, + 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, + 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, + 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, + 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, + 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, + // Block 0x20, offset 0x800 + 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, + 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, + 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, + 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, + 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, + 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, + 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, + 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, + 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, + 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, + 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, + // Block 0x21, offset 0x840 + 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, + 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, + 0x850: 0x2000, 0x851: 0x2000, + 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, + 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, + 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, + 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, + 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, + 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, + // Block 0x22, offset 0x880 + 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, + 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, + 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, + 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, + 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, + 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, + 0x8b2: 0x2000, 0x8b3: 0x2000, + 0x8b6: 0x2000, 0x8b7: 0x2000, + 0x8bc: 0x2000, 0x8bd: 0x2000, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x2000, 0x8c1: 0x2000, + 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, + 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, + 0x8e2: 0x2000, 0x8e3: 0x2000, + 0x8e4: 0x2000, 0x8e5: 0x2000, + 0x8ef: 0x2000, + 0x8fd: 0x4000, 0x8fe: 0x4000, + // Block 0x24, offset 0x900 + 0x905: 0x2000, + 0x906: 0x2000, 0x909: 0x2000, + 0x90e: 0x2000, 0x90f: 0x2000, + 0x914: 0x4000, 0x915: 0x4000, + 0x91c: 0x2000, + 0x91e: 0x2000, + // Block 0x25, offset 0x940 + 0x940: 0x2000, 0x942: 0x2000, + 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, + 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, + 0x952: 0x4000, 0x953: 0x4000, + 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, + 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, + 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, + 0x97f: 0x4000, + // Block 0x26, offset 0x980 + 0x993: 0x4000, + 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, + 0x9aa: 0x4000, 0x9ab: 0x4000, + 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, + // Block 0x27, offset 0x9c0 + 0x9c4: 0x4000, 0x9c5: 0x4000, + 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, + 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, + 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, + 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, + 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, + 0x9e8: 0x2000, 0x9e9: 0x2000, + 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, + 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, + 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, + 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, + // Block 0x28, offset 0xa00 + 0xa05: 0x4000, + 0xa0a: 0x4000, 0xa0b: 0x4000, + 0xa28: 0x4000, + 0xa3d: 0x2000, + // Block 0x29, offset 0xa40 + 0xa4c: 0x4000, 0xa4e: 0x4000, + 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, + 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, + 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, + // Block 0x2a, offset 0xa80 + 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, + 0xab0: 0x4000, + 0xabf: 0x4000, + // Block 0x2b, offset 0xac0 + 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, + 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, + // Block 0x2c, offset 0xb00 + 0xb05: 0x6010, + 0xb06: 0x6011, + // Block 0x2d, offset 0xb40 + 0xb5b: 0x4000, 0xb5c: 0x4000, + // Block 0x2e, offset 0xb80 + 0xb90: 0x4000, + 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, + 0xb98: 0x2000, 0xb99: 0x2000, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, + 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, + 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, + 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, + 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, + 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, + 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, + 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, + 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, + 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, + 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, + // Block 0x30, offset 0xc00 + 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, + 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, + 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, + 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, + 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, + 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, + 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, + 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, + 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, + // Block 0x31, offset 0xc40 + 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, + 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, + 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, + 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, + 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, + 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, + // Block 0x32, offset 0xc80 + 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, + 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, + 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, + 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, + 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, + 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, + 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, + 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, + 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, + 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, + 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, + // Block 0x33, offset 0xcc0 + 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, + 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, + 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, + 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, + 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, + 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, + 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, + 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, + 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, + 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, + 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, + // Block 0x34, offset 0xd00 + 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, + 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, + 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, + 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, + 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, + 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, + 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, + 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, + 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, + 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, + 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, + // Block 0x35, offset 0xd40 + 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, + 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, + 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, + 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, + 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, + 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, + 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, + 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, + 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, + 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, + 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, + // Block 0x36, offset 0xd80 + 0xd85: 0x4000, + 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, + 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, + 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, + 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, + 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, + 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, + 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdaf: 0x4000, + 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, + 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, + 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, + 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, + 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, + 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, + 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, + 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, + 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, + 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, + 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, + 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, + 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, + // Block 0x38, offset 0xe00 + 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, + 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, + 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, + 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, + 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, + 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, + 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, + 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, + 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, + 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, 0xe3b: 0x4000, + 0xe3c: 0x4000, 0xe3d: 0x4000, 0xe3e: 0x4000, 0xe3f: 0x4000, + // Block 0x39, offset 0xe40 + 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, + 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, + 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, + 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, + 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, + 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, + 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, + 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, + 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, + // Block 0x3a, offset 0xe80 + 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, + 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, + 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, + 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, + 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, + 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, + 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, + 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, + 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, + 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, + 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, + // Block 0x3b, offset 0xec0 + 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, + 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, + 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, + 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, + 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, + 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, + 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, + 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, + 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, + 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, + 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, + // Block 0x3c, offset 0xf00 + 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, + 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, + 0xf0c: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, + 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, + 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, + 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, + 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, + 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, + 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, + 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, + 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, 0xf3f: 0x4000, + // Block 0x3d, offset 0xf40 + 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, + 0xf46: 0x4000, + // Block 0x3e, offset 0xf80 + 0xfa0: 0x4000, 0xfa1: 0x4000, 0xfa2: 0x4000, 0xfa3: 0x4000, + 0xfa4: 0x4000, 0xfa5: 0x4000, 0xfa6: 0x4000, 0xfa7: 0x4000, 0xfa8: 0x4000, 0xfa9: 0x4000, + 0xfaa: 0x4000, 0xfab: 0x4000, 0xfac: 0x4000, 0xfad: 0x4000, 0xfae: 0x4000, 0xfaf: 0x4000, + 0xfb0: 0x4000, 0xfb1: 0x4000, 0xfb2: 0x4000, 0xfb3: 0x4000, 0xfb4: 0x4000, 0xfb5: 0x4000, + 0xfb6: 0x4000, 0xfb7: 0x4000, 0xfb8: 0x4000, 0xfb9: 0x4000, 0xfba: 0x4000, 0xfbb: 0x4000, + 0xfbc: 0x4000, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x4000, 0xfc1: 0x4000, 0xfc2: 0x4000, 0xfc3: 0x4000, 0xfc4: 0x4000, 0xfc5: 0x4000, + 0xfc6: 0x4000, 0xfc7: 0x4000, 0xfc8: 0x4000, 0xfc9: 0x4000, 0xfca: 0x4000, 0xfcb: 0x4000, + 0xfcc: 0x4000, 0xfcd: 0x4000, 0xfce: 0x4000, 0xfcf: 0x4000, 0xfd0: 0x4000, 0xfd1: 0x4000, + 0xfd2: 0x4000, 0xfd3: 0x4000, 0xfd4: 0x4000, 0xfd5: 0x4000, 0xfd6: 0x4000, 0xfd7: 0x4000, + 0xfd8: 0x4000, 0xfd9: 0x4000, 0xfda: 0x4000, 0xfdb: 0x4000, 0xfdc: 0x4000, 0xfdd: 0x4000, + 0xfde: 0x4000, 0xfdf: 0x4000, 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, + // Block 0x40, offset 0x1000 + 0x1000: 0x2000, 0x1001: 0x2000, 0x1002: 0x2000, 0x1003: 0x2000, 0x1004: 0x2000, 0x1005: 0x2000, + 0x1006: 0x2000, 0x1007: 0x2000, 0x1008: 0x2000, 0x1009: 0x2000, 0x100a: 0x2000, 0x100b: 0x2000, + 0x100c: 0x2000, 0x100d: 0x2000, 0x100e: 0x2000, 0x100f: 0x2000, 0x1010: 0x4000, 0x1011: 0x4000, + 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, + 0x1018: 0x4000, 0x1019: 0x4000, + 0x1030: 0x4000, 0x1031: 0x4000, 0x1032: 0x4000, 0x1033: 0x4000, 0x1034: 0x4000, 0x1035: 0x4000, + 0x1036: 0x4000, 0x1037: 0x4000, 0x1038: 0x4000, 0x1039: 0x4000, 0x103a: 0x4000, 0x103b: 0x4000, + 0x103c: 0x4000, 0x103d: 0x4000, 0x103e: 0x4000, 0x103f: 0x4000, + // Block 0x41, offset 0x1040 + 0x1040: 0x4000, 0x1041: 0x4000, 0x1042: 0x4000, 0x1043: 0x4000, 0x1044: 0x4000, 0x1045: 0x4000, + 0x1046: 0x4000, 0x1047: 0x4000, 0x1048: 0x4000, 0x1049: 0x4000, 0x104a: 0x4000, 0x104b: 0x4000, + 0x104c: 0x4000, 0x104d: 0x4000, 0x104e: 0x4000, 0x104f: 0x4000, 0x1050: 0x4000, 0x1051: 0x4000, + 0x1052: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, + 0x1058: 0x4000, 0x1059: 0x4000, 0x105a: 0x4000, 0x105b: 0x4000, 0x105c: 0x4000, 0x105d: 0x4000, + 0x105e: 0x4000, 0x105f: 0x4000, 0x1060: 0x4000, 0x1061: 0x4000, 0x1062: 0x4000, 0x1063: 0x4000, + 0x1064: 0x4000, 0x1065: 0x4000, 0x1066: 0x4000, 0x1068: 0x4000, 0x1069: 0x4000, + 0x106a: 0x4000, 0x106b: 0x4000, + // Block 0x42, offset 0x1080 + 0x1081: 0x9012, 0x1082: 0x9012, 0x1083: 0x9012, 0x1084: 0x9012, 0x1085: 0x9012, + 0x1086: 0x9012, 0x1087: 0x9012, 0x1088: 0x9012, 0x1089: 0x9012, 0x108a: 0x9012, 0x108b: 0x9012, + 0x108c: 0x9012, 0x108d: 0x9012, 0x108e: 0x9012, 0x108f: 0x9012, 0x1090: 0x9012, 0x1091: 0x9012, + 0x1092: 0x9012, 0x1093: 0x9012, 0x1094: 0x9012, 0x1095: 0x9012, 0x1096: 0x9012, 0x1097: 0x9012, + 0x1098: 0x9012, 0x1099: 0x9012, 0x109a: 0x9012, 0x109b: 0x9012, 0x109c: 0x9012, 0x109d: 0x9012, + 0x109e: 0x9012, 0x109f: 0x9012, 0x10a0: 0x9049, 0x10a1: 0x9049, 0x10a2: 0x9049, 0x10a3: 0x9049, + 0x10a4: 0x9049, 0x10a5: 0x9049, 0x10a6: 0x9049, 0x10a7: 0x9049, 0x10a8: 0x9049, 0x10a9: 0x9049, + 0x10aa: 0x9049, 0x10ab: 0x9049, 0x10ac: 0x9049, 0x10ad: 0x9049, 0x10ae: 0x9049, 0x10af: 0x9049, + 0x10b0: 0x9049, 0x10b1: 0x9049, 0x10b2: 0x9049, 0x10b3: 0x9049, 0x10b4: 0x9049, 0x10b5: 0x9049, + 0x10b6: 0x9049, 0x10b7: 0x9049, 0x10b8: 0x9049, 0x10b9: 0x9049, 0x10ba: 0x9049, 0x10bb: 0x9049, + 0x10bc: 0x9049, 0x10bd: 0x9049, 0x10be: 0x9049, 0x10bf: 0x9049, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x9049, 0x10c1: 0x9049, 0x10c2: 0x9049, 0x10c3: 0x9049, 0x10c4: 0x9049, 0x10c5: 0x9049, + 0x10c6: 0x9049, 0x10c7: 0x9049, 0x10c8: 0x9049, 0x10c9: 0x9049, 0x10ca: 0x9049, 0x10cb: 0x9049, + 0x10cc: 0x9049, 0x10cd: 0x9049, 0x10ce: 0x9049, 0x10cf: 0x9049, 0x10d0: 0x9049, 0x10d1: 0x9049, + 0x10d2: 0x9049, 0x10d3: 0x9049, 0x10d4: 0x9049, 0x10d5: 0x9049, 0x10d6: 0x9049, 0x10d7: 0x9049, + 0x10d8: 0x9049, 0x10d9: 0x9049, 0x10da: 0x9049, 0x10db: 0x9049, 0x10dc: 0x9049, 0x10dd: 0x9049, + 0x10de: 0x9049, 0x10df: 0x904a, 0x10e0: 0x904b, 0x10e1: 0xb04c, 0x10e2: 0xb04d, 0x10e3: 0xb04d, + 0x10e4: 0xb04e, 0x10e5: 0xb04f, 0x10e6: 0xb050, 0x10e7: 0xb051, 0x10e8: 0xb052, 0x10e9: 0xb053, + 0x10ea: 0xb054, 0x10eb: 0xb055, 0x10ec: 0xb056, 0x10ed: 0xb057, 0x10ee: 0xb058, 0x10ef: 0xb059, + 0x10f0: 0xb05a, 0x10f1: 0xb05b, 0x10f2: 0xb05c, 0x10f3: 0xb05d, 0x10f4: 0xb05e, 0x10f5: 0xb05f, + 0x10f6: 0xb060, 0x10f7: 0xb061, 0x10f8: 0xb062, 0x10f9: 0xb063, 0x10fa: 0xb064, 0x10fb: 0xb065, + 0x10fc: 0xb052, 0x10fd: 0xb066, 0x10fe: 0xb067, 0x10ff: 0xb055, + // Block 0x44, offset 0x1100 + 0x1100: 0xb068, 0x1101: 0xb069, 0x1102: 0xb06a, 0x1103: 0xb06b, 0x1104: 0xb05a, 0x1105: 0xb056, + 0x1106: 0xb06c, 0x1107: 0xb06d, 0x1108: 0xb06b, 0x1109: 0xb06e, 0x110a: 0xb06b, 0x110b: 0xb06f, + 0x110c: 0xb06f, 0x110d: 0xb070, 0x110e: 0xb070, 0x110f: 0xb071, 0x1110: 0xb056, 0x1111: 0xb072, + 0x1112: 0xb073, 0x1113: 0xb072, 0x1114: 0xb074, 0x1115: 0xb073, 0x1116: 0xb075, 0x1117: 0xb075, + 0x1118: 0xb076, 0x1119: 0xb076, 0x111a: 0xb077, 0x111b: 0xb077, 0x111c: 0xb073, 0x111d: 0xb078, + 0x111e: 0xb079, 0x111f: 0xb067, 0x1120: 0xb07a, 0x1121: 0xb07b, 0x1122: 0xb07b, 0x1123: 0xb07b, + 0x1124: 0xb07b, 0x1125: 0xb07b, 0x1126: 0xb07b, 0x1127: 0xb07b, 0x1128: 0xb07b, 0x1129: 0xb07b, + 0x112a: 0xb07b, 0x112b: 0xb07b, 0x112c: 0xb07b, 0x112d: 0xb07b, 0x112e: 0xb07b, 0x112f: 0xb07b, + 0x1130: 0xb07c, 0x1131: 0xb07c, 0x1132: 0xb07c, 0x1133: 0xb07c, 0x1134: 0xb07c, 0x1135: 0xb07c, + 0x1136: 0xb07c, 0x1137: 0xb07c, 0x1138: 0xb07c, 0x1139: 0xb07c, 0x113a: 0xb07c, 0x113b: 0xb07c, + 0x113c: 0xb07c, 0x113d: 0xb07c, 0x113e: 0xb07c, + // Block 0x45, offset 0x1140 + 0x1142: 0xb07d, 0x1143: 0xb07e, 0x1144: 0xb07f, 0x1145: 0xb080, + 0x1146: 0xb07f, 0x1147: 0xb07e, 0x114a: 0xb081, 0x114b: 0xb082, + 0x114c: 0xb083, 0x114d: 0xb07f, 0x114e: 0xb080, 0x114f: 0xb07f, + 0x1152: 0xb084, 0x1153: 0xb085, 0x1154: 0xb084, 0x1155: 0xb086, 0x1156: 0xb084, 0x1157: 0xb087, + 0x115a: 0xb088, 0x115b: 0xb089, 0x115c: 0xb08a, + 0x1160: 0x908b, 0x1161: 0x908b, 0x1162: 0x908c, 0x1163: 0x908d, + 0x1164: 0x908b, 0x1165: 0x908e, 0x1166: 0x908f, 0x1168: 0xb090, 0x1169: 0xb091, + 0x116a: 0xb092, 0x116b: 0xb091, 0x116c: 0xb093, 0x116d: 0xb094, 0x116e: 0xb095, + 0x117d: 0x2000, + // Block 0x46, offset 0x1180 + 0x11a0: 0x4000, 0x11a1: 0x4000, 0x11a2: 0x4000, 0x11a3: 0x4000, + 0x11a4: 0x4000, + 0x11b0: 0x4000, 0x11b1: 0x4000, + // Block 0x47, offset 0x11c0 + 0x11c0: 0x4000, 0x11c1: 0x4000, 0x11c2: 0x4000, 0x11c3: 0x4000, 0x11c4: 0x4000, 0x11c5: 0x4000, + 0x11c6: 0x4000, 0x11c7: 0x4000, 0x11c8: 0x4000, 0x11c9: 0x4000, 0x11ca: 0x4000, 0x11cb: 0x4000, + 0x11cc: 0x4000, 0x11cd: 0x4000, 0x11ce: 0x4000, 0x11cf: 0x4000, 0x11d0: 0x4000, 0x11d1: 0x4000, + 0x11d2: 0x4000, 0x11d3: 0x4000, 0x11d4: 0x4000, 0x11d5: 0x4000, 0x11d6: 0x4000, 0x11d7: 0x4000, + 0x11d8: 0x4000, 0x11d9: 0x4000, 0x11da: 0x4000, 0x11db: 0x4000, 0x11dc: 0x4000, 0x11dd: 0x4000, + 0x11de: 0x4000, 0x11df: 0x4000, 0x11e0: 0x4000, 0x11e1: 0x4000, 0x11e2: 0x4000, 0x11e3: 0x4000, + 0x11e4: 0x4000, 0x11e5: 0x4000, 0x11e6: 0x4000, 0x11e7: 0x4000, 0x11e8: 0x4000, 0x11e9: 0x4000, + 0x11ea: 0x4000, 0x11eb: 0x4000, 0x11ec: 0x4000, 0x11ed: 0x4000, 0x11ee: 0x4000, 0x11ef: 0x4000, + 0x11f0: 0x4000, 0x11f1: 0x4000, 0x11f2: 0x4000, 0x11f3: 0x4000, 0x11f4: 0x4000, 0x11f5: 0x4000, + 0x11f6: 0x4000, 0x11f7: 0x4000, + // Block 0x48, offset 0x1200 + 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, + 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, + 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, + 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, + // Block 0x49, offset 0x1240 + 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, + 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, + // Block 0x4a, offset 0x1280 + 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, + 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, + 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, + 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, + 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, + 0x129e: 0x4000, + // Block 0x4b, offset 0x12c0 + 0x12d0: 0x4000, 0x12d1: 0x4000, + 0x12d2: 0x4000, + 0x12e4: 0x4000, 0x12e5: 0x4000, 0x12e6: 0x4000, 0x12e7: 0x4000, + 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, + 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, + 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, + // Block 0x4c, offset 0x1300 + 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, + 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, + 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, + 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, + 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, + 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, + 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, + 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, + 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, + 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, + // Block 0x4d, offset 0x1340 + 0x1344: 0x4000, + // Block 0x4e, offset 0x1380 + 0x138f: 0x4000, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, + 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, + 0x13d0: 0x2000, 0x13d1: 0x2000, + 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, + 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, + 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, + 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, + 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, + 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, + 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, + 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, + // Block 0x50, offset 0x1400 + 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, + 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, + 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, + 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, + 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, + 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, + 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, + 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, + 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, + 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, + // Block 0x51, offset 0x1440 + 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, + 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, + 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, + 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, + 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, + 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, + 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, + 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, + // Block 0x52, offset 0x1480 + 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, + 0x1490: 0x4000, 0x1491: 0x4000, + 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, + 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, + 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, + 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, + 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, + 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, + 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, + 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, + 0x14d0: 0x4000, 0x14d1: 0x4000, + 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, + 0x14e4: 0x4000, 0x14e5: 0x4000, + // Block 0x54, offset 0x1500 + 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, + 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, + 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, + 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, + 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, + 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, + 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, + 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, + 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, + 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, + // Block 0x55, offset 0x1540 + 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, + 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, + 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, + 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, + 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, + 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, + 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, + 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, + 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, + 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, + 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, + // Block 0x56, offset 0x1580 + 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, + 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, + 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, + 0x1592: 0x4000, 0x1593: 0x4000, + 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, + 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, + 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, + 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, + 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, + 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, + 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, + 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, + 0x15d2: 0x4000, 0x15d3: 0x4000, + 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, + 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, + 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, + 0x15f0: 0x4000, 0x15f4: 0x4000, + 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, + 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, + // Block 0x58, offset 0x1600 + 0x1600: 0x4000, 0x1601: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, + 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, + 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, + 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, + 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, + 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, + 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, + 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, + 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, + 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, + 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, + // Block 0x59, offset 0x1640 + 0x1640: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, + 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, + 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, + 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, + 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, + 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, + 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, + 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, + 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, + 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, + 0x167c: 0x4000, 0x167d: 0x4000, 0x167e: 0x4000, 0x167f: 0x4000, + // Block 0x5a, offset 0x1680 + 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, + 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, + 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, + 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, + 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, + 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, + 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, + 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, + 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, + 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, + 0x16bc: 0x4000, 0x16bf: 0x4000, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x4000, 0x16c1: 0x4000, 0x16c2: 0x4000, 0x16c3: 0x4000, 0x16c4: 0x4000, 0x16c5: 0x4000, + 0x16c6: 0x4000, 0x16c7: 0x4000, 0x16c8: 0x4000, 0x16c9: 0x4000, 0x16ca: 0x4000, 0x16cb: 0x4000, + 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16cf: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, + 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, + 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, + 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, + 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, 0x16e8: 0x4000, 0x16e9: 0x4000, + 0x16ea: 0x4000, 0x16eb: 0x4000, 0x16ec: 0x4000, 0x16ed: 0x4000, 0x16ee: 0x4000, 0x16ef: 0x4000, + 0x16f0: 0x4000, 0x16f1: 0x4000, 0x16f2: 0x4000, 0x16f3: 0x4000, 0x16f4: 0x4000, 0x16f5: 0x4000, + 0x16f6: 0x4000, 0x16f7: 0x4000, 0x16f8: 0x4000, 0x16f9: 0x4000, 0x16fa: 0x4000, 0x16fb: 0x4000, + 0x16fc: 0x4000, 0x16fd: 0x4000, + // Block 0x5c, offset 0x1700 + 0x170b: 0x4000, + 0x170c: 0x4000, 0x170d: 0x4000, 0x170e: 0x4000, 0x1710: 0x4000, 0x1711: 0x4000, + 0x1712: 0x4000, 0x1713: 0x4000, 0x1714: 0x4000, 0x1715: 0x4000, 0x1716: 0x4000, 0x1717: 0x4000, + 0x1718: 0x4000, 0x1719: 0x4000, 0x171a: 0x4000, 0x171b: 0x4000, 0x171c: 0x4000, 0x171d: 0x4000, + 0x171e: 0x4000, 0x171f: 0x4000, 0x1720: 0x4000, 0x1721: 0x4000, 0x1722: 0x4000, 0x1723: 0x4000, + 0x1724: 0x4000, 0x1725: 0x4000, 0x1726: 0x4000, 0x1727: 0x4000, + 0x173a: 0x4000, + // Block 0x5d, offset 0x1740 + 0x1755: 0x4000, 0x1756: 0x4000, + 0x1764: 0x4000, + // Block 0x5e, offset 0x1780 + 0x17bb: 0x4000, + 0x17bc: 0x4000, 0x17bd: 0x4000, 0x17be: 0x4000, 0x17bf: 0x4000, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, + 0x17c6: 0x4000, 0x17c7: 0x4000, 0x17c8: 0x4000, 0x17c9: 0x4000, 0x17ca: 0x4000, 0x17cb: 0x4000, + 0x17cc: 0x4000, 0x17cd: 0x4000, 0x17ce: 0x4000, 0x17cf: 0x4000, + // Block 0x60, offset 0x1800 + 0x1800: 0x4000, 0x1801: 0x4000, 0x1802: 0x4000, 0x1803: 0x4000, 0x1804: 0x4000, 0x1805: 0x4000, + 0x180c: 0x4000, 0x1810: 0x4000, 0x1811: 0x4000, + 0x1812: 0x4000, 0x1815: 0x4000, 0x1816: 0x4000, 0x1817: 0x4000, + 0x182b: 0x4000, 0x182c: 0x4000, + 0x1834: 0x4000, 0x1835: 0x4000, + 0x1836: 0x4000, 0x1837: 0x4000, 0x1838: 0x4000, 0x1839: 0x4000, 0x183a: 0x4000, 0x183b: 0x4000, + 0x183c: 0x4000, + // Block 0x61, offset 0x1840 + 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, + 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, + 0x186a: 0x4000, 0x186b: 0x4000, + // Block 0x62, offset 0x1880 + 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, + 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, + 0x1898: 0x4000, 0x1899: 0x4000, 0x189a: 0x4000, 0x189b: 0x4000, 0x189c: 0x4000, 0x189d: 0x4000, + 0x189e: 0x4000, 0x189f: 0x4000, 0x18a0: 0x4000, 0x18a1: 0x4000, 0x18a2: 0x4000, 0x18a3: 0x4000, + 0x18a4: 0x4000, 0x18a5: 0x4000, 0x18a6: 0x4000, 0x18a7: 0x4000, 0x18a8: 0x4000, 0x18a9: 0x4000, + 0x18aa: 0x4000, 0x18ab: 0x4000, 0x18ac: 0x4000, 0x18ad: 0x4000, 0x18ae: 0x4000, 0x18af: 0x4000, + 0x18b0: 0x4000, 0x18b1: 0x4000, 0x18b2: 0x4000, 0x18b3: 0x4000, 0x18b4: 0x4000, 0x18b5: 0x4000, + 0x18b6: 0x4000, 0x18b7: 0x4000, 0x18b8: 0x4000, 0x18b9: 0x4000, 0x18ba: 0x4000, + 0x18bc: 0x4000, 0x18bd: 0x4000, 0x18be: 0x4000, 0x18bf: 0x4000, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x4000, 0x18c1: 0x4000, 0x18c2: 0x4000, 0x18c3: 0x4000, 0x18c4: 0x4000, 0x18c5: 0x4000, + 0x18c7: 0x4000, 0x18c8: 0x4000, 0x18c9: 0x4000, 0x18ca: 0x4000, 0x18cb: 0x4000, + 0x18cc: 0x4000, 0x18cd: 0x4000, 0x18ce: 0x4000, 0x18cf: 0x4000, 0x18d0: 0x4000, 0x18d1: 0x4000, + 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, + 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, + 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000, + 0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000, 0x18e7: 0x4000, 0x18e8: 0x4000, 0x18e9: 0x4000, + 0x18ea: 0x4000, 0x18eb: 0x4000, 0x18ec: 0x4000, 0x18ed: 0x4000, 0x18ee: 0x4000, 0x18ef: 0x4000, + 0x18f0: 0x4000, 0x18f1: 0x4000, 0x18f2: 0x4000, 0x18f3: 0x4000, 0x18f4: 0x4000, 0x18f5: 0x4000, + 0x18f6: 0x4000, 0x18f7: 0x4000, 0x18f8: 0x4000, 0x18fa: 0x4000, 0x18fb: 0x4000, + 0x18fc: 0x4000, 0x18fd: 0x4000, 0x18fe: 0x4000, 0x18ff: 0x4000, + // Block 0x64, offset 0x1900 + 0x1900: 0x4000, 0x1901: 0x4000, 0x1902: 0x4000, 0x1903: 0x4000, 0x1904: 0x4000, 0x1905: 0x4000, + 0x1906: 0x4000, 0x1907: 0x4000, 0x1908: 0x4000, 0x1909: 0x4000, 0x190a: 0x4000, 0x190b: 0x4000, + 0x190d: 0x4000, 0x190e: 0x4000, 0x190f: 0x4000, 0x1910: 0x4000, 0x1911: 0x4000, + 0x1912: 0x4000, 0x1913: 0x4000, 0x1914: 0x4000, 0x1915: 0x4000, 0x1916: 0x4000, 0x1917: 0x4000, + 0x1918: 0x4000, 0x1919: 0x4000, 0x191a: 0x4000, 0x191b: 0x4000, 0x191c: 0x4000, 0x191d: 0x4000, + 0x191e: 0x4000, 0x191f: 0x4000, 0x1920: 0x4000, 0x1921: 0x4000, 0x1922: 0x4000, 0x1923: 0x4000, + 0x1924: 0x4000, 0x1925: 0x4000, 0x1926: 0x4000, 0x1927: 0x4000, 0x1928: 0x4000, 0x1929: 0x4000, + 0x192a: 0x4000, 0x192b: 0x4000, 0x192c: 0x4000, 0x192d: 0x4000, 0x192e: 0x4000, 0x192f: 0x4000, + 0x1930: 0x4000, 0x1931: 0x4000, 0x1932: 0x4000, 0x1933: 0x4000, 0x1934: 0x4000, 0x1935: 0x4000, + 0x1936: 0x4000, 0x1937: 0x4000, 0x1938: 0x4000, 0x1939: 0x4000, 0x193a: 0x4000, 0x193b: 0x4000, + 0x193c: 0x4000, 0x193d: 0x4000, 0x193e: 0x4000, 0x193f: 0x4000, + // Block 0x65, offset 0x1940 + 0x1970: 0x4000, 0x1971: 0x4000, 0x1972: 0x4000, 0x1973: 0x4000, 0x1974: 0x4000, + 0x1978: 0x4000, 0x1979: 0x4000, 0x197a: 0x4000, + // Block 0x66, offset 0x1980 + 0x1980: 0x4000, 0x1981: 0x4000, 0x1982: 0x4000, 0x1983: 0x4000, 0x1984: 0x4000, 0x1985: 0x4000, + 0x1986: 0x4000, + 0x1990: 0x4000, 0x1991: 0x4000, + 0x1992: 0x4000, 0x1993: 0x4000, 0x1994: 0x4000, 0x1995: 0x4000, 0x1996: 0x4000, 0x1997: 0x4000, + 0x1998: 0x4000, 0x1999: 0x4000, 0x199a: 0x4000, 0x199b: 0x4000, 0x199c: 0x4000, 0x199d: 0x4000, + 0x199e: 0x4000, 0x199f: 0x4000, 0x19a0: 0x4000, 0x19a1: 0x4000, 0x19a2: 0x4000, 0x19a3: 0x4000, + 0x19a4: 0x4000, 0x19a5: 0x4000, 0x19a6: 0x4000, 0x19a7: 0x4000, 0x19a8: 0x4000, + 0x19b0: 0x4000, 0x19b1: 0x4000, 0x19b2: 0x4000, 0x19b3: 0x4000, 0x19b4: 0x4000, 0x19b5: 0x4000, + 0x19b6: 0x4000, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x4000, 0x19c1: 0x4000, 0x19c2: 0x4000, + 0x19d0: 0x4000, 0x19d1: 0x4000, + 0x19d2: 0x4000, 0x19d3: 0x4000, 0x19d4: 0x4000, 0x19d5: 0x4000, 0x19d6: 0x4000, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x2000, 0x1a01: 0x2000, 0x1a02: 0x2000, 0x1a03: 0x2000, 0x1a04: 0x2000, 0x1a05: 0x2000, + 0x1a06: 0x2000, 0x1a07: 0x2000, 0x1a08: 0x2000, 0x1a09: 0x2000, 0x1a0a: 0x2000, 0x1a0b: 0x2000, + 0x1a0c: 0x2000, 0x1a0d: 0x2000, 0x1a0e: 0x2000, 0x1a0f: 0x2000, 0x1a10: 0x2000, 0x1a11: 0x2000, + 0x1a12: 0x2000, 0x1a13: 0x2000, 0x1a14: 0x2000, 0x1a15: 0x2000, 0x1a16: 0x2000, 0x1a17: 0x2000, + 0x1a18: 0x2000, 0x1a19: 0x2000, 0x1a1a: 0x2000, 0x1a1b: 0x2000, 0x1a1c: 0x2000, 0x1a1d: 0x2000, + 0x1a1e: 0x2000, 0x1a1f: 0x2000, 0x1a20: 0x2000, 0x1a21: 0x2000, 0x1a22: 0x2000, 0x1a23: 0x2000, + 0x1a24: 0x2000, 0x1a25: 0x2000, 0x1a26: 0x2000, 0x1a27: 0x2000, 0x1a28: 0x2000, 0x1a29: 0x2000, + 0x1a2a: 0x2000, 0x1a2b: 0x2000, 0x1a2c: 0x2000, 0x1a2d: 0x2000, 0x1a2e: 0x2000, 0x1a2f: 0x2000, + 0x1a30: 0x2000, 0x1a31: 0x2000, 0x1a32: 0x2000, 0x1a33: 0x2000, 0x1a34: 0x2000, 0x1a35: 0x2000, + 0x1a36: 0x2000, 0x1a37: 0x2000, 0x1a38: 0x2000, 0x1a39: 0x2000, 0x1a3a: 0x2000, 0x1a3b: 0x2000, + 0x1a3c: 0x2000, 0x1a3d: 0x2000, +} + +// widthIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var widthIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, + 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, + 0xd0: 0x0c, 0xd1: 0x0d, + 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, + 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, + 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, + // Block 0x4, offset 0x100 + 0x104: 0x0e, 0x105: 0x0f, + // Block 0x5, offset 0x140 + 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, + 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, + 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, + 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, + 0x166: 0x2a, + 0x16c: 0x2b, 0x16d: 0x2c, + 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, + // Block 0x6, offset 0x180 + 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, + 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x0e, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, + 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, + 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, + 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, + 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, + 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, + 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, + 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, + 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, + 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, + 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, + 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, + 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, + 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, + // Block 0x8, offset 0x200 + 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, + 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, + 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, + 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, + 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, + 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, + 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, + 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, + // Block 0x9, offset 0x240 + 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, + 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, + 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3a, 0x253: 0x3b, + 0x265: 0x3c, + 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, + 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, + // Block 0xa, offset 0x280 + 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, + 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, + 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, + 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, + 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, + 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, + 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, + 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, + 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, + 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, + 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, + // Block 0xc, offset 0x300 + 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, + 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, + 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, + 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, + 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, + 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, + 0x338: 0x3e, 0x339: 0x3f, 0x33c: 0x40, 0x33d: 0x41, 0x33e: 0x42, 0x33f: 0x43, + // Block 0xd, offset 0x340 + 0x37f: 0x44, + // Block 0xe, offset 0x380 + 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, + 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, + 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, + 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x45, + 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, + 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x0e, 0x3ac: 0x0e, 0x3ad: 0x0e, 0x3ae: 0x0e, 0x3af: 0x0e, + 0x3b0: 0x0e, 0x3b1: 0x0e, 0x3b2: 0x0e, 0x3b3: 0x46, 0x3b4: 0x47, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, + 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, + // Block 0x10, offset 0x400 + 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, + 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, + 0x410: 0x56, 0x411: 0x57, 0x412: 0x0e, 0x413: 0x58, 0x414: 0x59, 0x415: 0x5a, 0x416: 0x5b, 0x417: 0x5c, + 0x418: 0x0e, 0x419: 0x5d, 0x41a: 0x0e, 0x41b: 0x5e, 0x41f: 0x5f, + 0x424: 0x60, 0x425: 0x61, 0x426: 0x0e, 0x427: 0x62, + 0x429: 0x63, 0x42a: 0x64, 0x42b: 0x65, + // Block 0x11, offset 0x440 + 0x456: 0x0b, 0x457: 0x06, + 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, + 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, + 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, + 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, + 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, + // Block 0x12, offset 0x480 + 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, + 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, + 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, + 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, + 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, + 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, + 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, + 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x66, + // Block 0x14, offset 0x500 + 0x520: 0x10, + 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, + 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, + // Block 0x15, offset 0x540 + 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, + 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, +} + +// inverseData contains 4-byte entries of the following format: +// <0 padding> +// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the +// UTF-8 encoding of the original rune. Mappings often have the following +// pattern: +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// By xor-ing the last byte the same entry can be shared by many mappings. This +// reduces the total number of distinct entries by about two thirds. +// The resulting entry for the aforementioned mappings is +// { 0x01, 0xE0, 0x00, 0x00 } +// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get +// E0 ^ A1 = 41. +// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get +// E0 ^ A2 = 42. +// Note that because of the xor-ing, the byte sequence stored in the entry is +// not valid UTF-8. +var inverseData = [150][4]byte{ + {0x00, 0x00, 0x00, 0x00}, + {0x03, 0xe3, 0x80, 0xa0}, + {0x03, 0xef, 0xbc, 0xa0}, + {0x03, 0xef, 0xbc, 0xe0}, + {0x03, 0xef, 0xbd, 0xe0}, + {0x03, 0xef, 0xbf, 0x02}, + {0x03, 0xef, 0xbf, 0x00}, + {0x03, 0xef, 0xbf, 0x0e}, + {0x03, 0xef, 0xbf, 0x0c}, + {0x03, 0xef, 0xbf, 0x0f}, + {0x03, 0xef, 0xbf, 0x39}, + {0x03, 0xef, 0xbf, 0x3b}, + {0x03, 0xef, 0xbf, 0x3f}, + {0x03, 0xef, 0xbf, 0x2a}, + {0x03, 0xef, 0xbf, 0x0d}, + {0x03, 0xef, 0xbf, 0x25}, + {0x03, 0xef, 0xbd, 0x1a}, + {0x03, 0xef, 0xbd, 0x26}, + {0x01, 0xa0, 0x00, 0x00}, + {0x03, 0xef, 0xbd, 0x25}, + {0x03, 0xef, 0xbd, 0x23}, + {0x03, 0xef, 0xbd, 0x2e}, + {0x03, 0xef, 0xbe, 0x07}, + {0x03, 0xef, 0xbe, 0x05}, + {0x03, 0xef, 0xbd, 0x06}, + {0x03, 0xef, 0xbd, 0x13}, + {0x03, 0xef, 0xbd, 0x0b}, + {0x03, 0xef, 0xbd, 0x16}, + {0x03, 0xef, 0xbd, 0x0c}, + {0x03, 0xef, 0xbd, 0x15}, + {0x03, 0xef, 0xbd, 0x0d}, + {0x03, 0xef, 0xbd, 0x1c}, + {0x03, 0xef, 0xbd, 0x02}, + {0x03, 0xef, 0xbd, 0x1f}, + {0x03, 0xef, 0xbd, 0x1d}, + {0x03, 0xef, 0xbd, 0x17}, + {0x03, 0xef, 0xbd, 0x08}, + {0x03, 0xef, 0xbd, 0x09}, + {0x03, 0xef, 0xbd, 0x0e}, + {0x03, 0xef, 0xbd, 0x04}, + {0x03, 0xef, 0xbd, 0x05}, + {0x03, 0xef, 0xbe, 0x3f}, + {0x03, 0xef, 0xbe, 0x00}, + {0x03, 0xef, 0xbd, 0x2c}, + {0x03, 0xef, 0xbe, 0x06}, + {0x03, 0xef, 0xbe, 0x0c}, + {0x03, 0xef, 0xbe, 0x0f}, + {0x03, 0xef, 0xbe, 0x0d}, + {0x03, 0xef, 0xbe, 0x0b}, + {0x03, 0xef, 0xbe, 0x19}, + {0x03, 0xef, 0xbe, 0x15}, + {0x03, 0xef, 0xbe, 0x11}, + {0x03, 0xef, 0xbe, 0x31}, + {0x03, 0xef, 0xbe, 0x33}, + {0x03, 0xef, 0xbd, 0x0f}, + {0x03, 0xef, 0xbe, 0x30}, + {0x03, 0xef, 0xbe, 0x3e}, + {0x03, 0xef, 0xbe, 0x32}, + {0x03, 0xef, 0xbe, 0x36}, + {0x03, 0xef, 0xbd, 0x14}, + {0x03, 0xef, 0xbe, 0x2e}, + {0x03, 0xef, 0xbd, 0x1e}, + {0x03, 0xef, 0xbe, 0x10}, + {0x03, 0xef, 0xbf, 0x13}, + {0x03, 0xef, 0xbf, 0x15}, + {0x03, 0xef, 0xbf, 0x17}, + {0x03, 0xef, 0xbf, 0x1f}, + {0x03, 0xef, 0xbf, 0x1d}, + {0x03, 0xef, 0xbf, 0x1b}, + {0x03, 0xef, 0xbf, 0x09}, + {0x03, 0xef, 0xbf, 0x0b}, + {0x03, 0xef, 0xbf, 0x37}, + {0x03, 0xef, 0xbe, 0x04}, + {0x01, 0xe0, 0x00, 0x00}, + {0x03, 0xe2, 0xa6, 0x1a}, + {0x03, 0xe2, 0xa6, 0x26}, + {0x03, 0xe3, 0x80, 0x23}, + {0x03, 0xe3, 0x80, 0x2e}, + {0x03, 0xe3, 0x80, 0x25}, + {0x03, 0xe3, 0x83, 0x1e}, + {0x03, 0xe3, 0x83, 0x14}, + {0x03, 0xe3, 0x82, 0x06}, + {0x03, 0xe3, 0x82, 0x0b}, + {0x03, 0xe3, 0x82, 0x0c}, + {0x03, 0xe3, 0x82, 0x0d}, + {0x03, 0xe3, 0x82, 0x02}, + {0x03, 0xe3, 0x83, 0x0f}, + {0x03, 0xe3, 0x83, 0x08}, + {0x03, 0xe3, 0x83, 0x09}, + {0x03, 0xe3, 0x83, 0x2c}, + {0x03, 0xe3, 0x83, 0x0c}, + {0x03, 0xe3, 0x82, 0x13}, + {0x03, 0xe3, 0x82, 0x16}, + {0x03, 0xe3, 0x82, 0x15}, + {0x03, 0xe3, 0x82, 0x1c}, + {0x03, 0xe3, 0x82, 0x1f}, + {0x03, 0xe3, 0x82, 0x1d}, + {0x03, 0xe3, 0x82, 0x1a}, + {0x03, 0xe3, 0x82, 0x17}, + {0x03, 0xe3, 0x82, 0x08}, + {0x03, 0xe3, 0x82, 0x09}, + {0x03, 0xe3, 0x82, 0x0e}, + {0x03, 0xe3, 0x82, 0x04}, + {0x03, 0xe3, 0x82, 0x05}, + {0x03, 0xe3, 0x82, 0x3f}, + {0x03, 0xe3, 0x83, 0x00}, + {0x03, 0xe3, 0x83, 0x06}, + {0x03, 0xe3, 0x83, 0x05}, + {0x03, 0xe3, 0x83, 0x0d}, + {0x03, 0xe3, 0x83, 0x0b}, + {0x03, 0xe3, 0x83, 0x07}, + {0x03, 0xe3, 0x83, 0x19}, + {0x03, 0xe3, 0x83, 0x15}, + {0x03, 0xe3, 0x83, 0x11}, + {0x03, 0xe3, 0x83, 0x31}, + {0x03, 0xe3, 0x83, 0x33}, + {0x03, 0xe3, 0x83, 0x30}, + {0x03, 0xe3, 0x83, 0x3e}, + {0x03, 0xe3, 0x83, 0x32}, + {0x03, 0xe3, 0x83, 0x36}, + {0x03, 0xe3, 0x83, 0x2e}, + {0x03, 0xe3, 0x82, 0x07}, + {0x03, 0xe3, 0x85, 0x04}, + {0x03, 0xe3, 0x84, 0x10}, + {0x03, 0xe3, 0x85, 0x30}, + {0x03, 0xe3, 0x85, 0x0d}, + {0x03, 0xe3, 0x85, 0x13}, + {0x03, 0xe3, 0x85, 0x15}, + {0x03, 0xe3, 0x85, 0x17}, + {0x03, 0xe3, 0x85, 0x1f}, + {0x03, 0xe3, 0x85, 0x1d}, + {0x03, 0xe3, 0x85, 0x1b}, + {0x03, 0xe3, 0x85, 0x09}, + {0x03, 0xe3, 0x85, 0x0f}, + {0x03, 0xe3, 0x85, 0x0b}, + {0x03, 0xe3, 0x85, 0x37}, + {0x03, 0xe3, 0x85, 0x3b}, + {0x03, 0xe3, 0x85, 0x39}, + {0x03, 0xe3, 0x85, 0x3f}, + {0x02, 0xc2, 0x02, 0x00}, + {0x02, 0xc2, 0x0e, 0x00}, + {0x02, 0xc2, 0x0c, 0x00}, + {0x02, 0xc2, 0x00, 0x00}, + {0x03, 0xe2, 0x82, 0x0f}, + {0x03, 0xe2, 0x94, 0x2a}, + {0x03, 0xe2, 0x86, 0x39}, + {0x03, 0xe2, 0x86, 0x3b}, + {0x03, 0xe2, 0x86, 0x3f}, + {0x03, 0xe2, 0x96, 0x0d}, + {0x03, 0xe2, 0x97, 0x25}, +} + +// Total table size 15448 bytes (15KiB) diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 563f70429..a98fe7782 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -53,10 +53,9 @@ func Every(interval time.Duration) Limit { // // The methods AllowN, ReserveN, and WaitN consume n tokens. type Limiter struct { - limit Limit - burst int - mu sync.Mutex + limit Limit + burst int tokens float64 // last is the last time the limiter's tokens field was updated last time.Time @@ -76,6 +75,8 @@ func (lim *Limiter) Limit() Limit { // Burst values allow more events to happen at once. // A zero Burst allows no events, unless limit == Inf. func (lim *Limiter) Burst() int { + lim.mu.Lock() + defer lim.mu.Unlock() return lim.burst } @@ -196,7 +197,7 @@ func (lim *Limiter) Reserve() *Reservation { // ReserveN returns a Reservation that indicates how long the caller must wait before n events happen. // The Limiter takes this Reservation into account when allowing future events. -// ReserveN returns false if n exceeds the Limiter's burst size. +// The returned Reservation’s OK() method returns false if n exceeds the Limiter's burst size. // Usage example: // r := lim.ReserveN(time.Now(), 1) // if !r.OK() { @@ -229,7 +230,7 @@ func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { lim.mu.Unlock() if n > burst && limit != Inf { - return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst) + return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, burst) } // Check if ctx is already cancelled select { @@ -359,6 +360,7 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio // advance calculates and returns an updated state for lim resulting from the passage of time. // lim is not changed. +// advance requires that lim.mu is held. func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { last := lim.last if now.Before(last) { diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index 063d724cb..e79a53884 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/rpc/status.proto package status @@ -25,9 +25,9 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - any "github.com/golang/protobuf/ptypes/any" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" ) const ( @@ -61,7 +61,7 @@ type Status struct { Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` // A list of messages that carry the error details. There is a common set of // message types for APIs to use. - Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` + Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` } func (x *Status) Reset() { @@ -110,7 +110,7 @@ func (x *Status) GetMessage() string { return "" } -func (x *Status) GetDetails() []*any.Any { +func (x *Status) GetDetails() []*anypb.Any { if x != nil { return x.Details } @@ -154,8 +154,8 @@ func file_google_rpc_status_proto_rawDescGZIP() []byte { var file_google_rpc_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_google_rpc_status_proto_goTypes = []interface{}{ - (*Status)(nil), // 0: google.rpc.Status - (*any.Any)(nil), // 1: google.protobuf.Any + (*Status)(nil), // 0: google.rpc.Status + (*anypb.Any)(nil), // 1: google.protobuf.Any } var file_google_rpc_status_proto_depIdxs = []int32{ 1, // 0: google.rpc.Status.details:type_name -> google.protobuf.Any diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index d952f09f3..d7d72918a 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -21,6 +21,7 @@ package base import ( "context" "errors" + "fmt" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" @@ -76,6 +77,9 @@ type baseBalancer struct { picker balancer.Picker v2Picker balancer.V2Picker config Config + + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure } func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { @@ -83,13 +87,23 @@ func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) } func (b *baseBalancer) ResolverError(err error) { - switch b.state { - case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: - if b.picker != nil { - b.picker = NewErrPicker(err) - } else { - b.v2Picker = NewErrPickerV2(err) - } + b.resolverErr = err + if len(b.subConns) == 0 { + b.state = connectivity.TransientFailure + } + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.regeneratePicker() + if b.picker != nil { + b.cc.UpdateBalancerState(b.state, b.picker) + } else { + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.state, + Picker: b.v2Picker, + }) } } @@ -99,6 +113,12 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { if grpclog.V(2) { grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s) } + if len(s.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + // Successful resolution; clear resolver error and ensure we return nil. + b.resolverErr = nil // addrsSet is the set converted from addrs, it's used for quick lookup of an address. addrsSet := make(map[resolver.Address]struct{}) for _, a := range s.ResolverState.Addresses { @@ -127,24 +147,30 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { return nil } +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.state is TransientFailure. +func (b *baseBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) +} + // regeneratePicker takes a snapshot of the balancer, and generates a picker // from it. The picker is -// - errPicker with ErrTransientFailure if the balancer is in TransientFailure, +// - errPicker if the balancer is in TransientFailure, // - built by the pickerBuilder with all READY SubConns otherwise. -func (b *baseBalancer) regeneratePicker(err error) { +func (b *baseBalancer) regeneratePicker() { if b.state == connectivity.TransientFailure { if b.pickerBuilder != nil { b.picker = NewErrPicker(balancer.ErrTransientFailure) } else { - if err != nil { - b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(err)) - } else { - // This means the last subchannel transition was not to - // TransientFailure (otherwise err must be set), but the - // aggregate state of the balancer is TransientFailure, meaning - // there are no other addresses. - b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(errors.New("resolver returned no addresses"))) - } + b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(b.mergeErrors())) } return } @@ -200,6 +226,9 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su oldAggrState := b.state b.state = b.csEvltr.RecordTransition(oldS, s) + // Set or clear the last connection error accordingly. + b.connErr = state.ConnectionError + // Regenerate picker when one of the following happens: // - this sc became ready from not-ready // - this sc became not-ready from ready @@ -207,7 +236,7 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su // - the aggregated state of balancer became non-TransientFailure from TransientFailure if (s == connectivity.Ready) != (oldS == connectivity.Ready) || (b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) { - b.regeneratePicker(state.ConnectionError) + b.regeneratePicker() } if b.picker != nil { diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go new file mode 100644 index 000000000..14aa6f20a --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +// +// Deprecated: this package is imported by grpc and should not need to be +// imported directly by users. +package dns + +import ( + "google.golang.org/grpc/internal/resolver/dns" + "google.golang.org/grpc/resolver" +) + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +// +// Deprecated: import grpc and use resolver.Get("dns") instead. +func NewBuilder() resolver.Builder { + return dns.NewBuilder() +} diff --git a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go new file mode 100644 index 000000000..c8a0c3daa --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go @@ -0,0 +1,26 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package passthrough implements a pass-through resolver. It sends the target +// name without scheme back to gRPC as resolved address. +// +// Deprecated: this package is imported by grpc and should not need to be +// imported directly by users. +package passthrough + +import _ "google.golang.org/grpc/internal/resolver/passthrough" // import for side effects after package was moved diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index cf193820f..1a831b159 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.27.0" +const Version = "1.27.1" diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index c2f8f28f2..cab95a427 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -12,8 +12,8 @@ import ( "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/encoding/text" "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/fieldnum" "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/set" "google.golang.org/protobuf/internal/strs" @@ -108,7 +108,7 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { return errors.New("no support for proto1 MessageSets") } - if messageDesc.FullName() == "google.protobuf.Any" { + if messageDesc.FullName() == genid.Any_message_fullname { return d.unmarshalAny(m, checkDelims) } @@ -538,14 +538,13 @@ Loop: return d.unexpectedTokenError(tok) } - name := tok.IdentName() - switch name { - case "key": + switch name := pref.Name(tok.IdentName()); name { + case genid.MapEntry_Key_field_name: if !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") } if key.IsValid() { - return d.newError(tok.Pos(), `map entry "key" cannot be repeated`) + return d.newError(tok.Pos(), "map entry %q cannot be repeated", name) } val, err := d.unmarshalScalar(fd.MapKey()) if err != nil { @@ -553,14 +552,14 @@ Loop: } key = val.MapKey() - case "value": + case genid.MapEntry_Value_field_name: if kind := fd.MapValue().Kind(); (kind != pref.MessageKind) && (kind != pref.GroupKind) { if !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") } } if pval.IsValid() { - return d.newError(tok.Pos(), `map entry "value" cannot be repeated`) + return d.newError(tok.Pos(), "map entry %q cannot be repeated", name) } pval, err = unmarshalMapValue() if err != nil { @@ -597,13 +596,9 @@ Loop: func (d decoder) unmarshalAny(m pref.Message, checkDelims bool) error { var typeURL string var bValue []byte - - // hasFields tracks which valid fields have been seen in the loop below in - // order to flag an error if there are duplicates or conflicts. It may - // contain the strings "type_url", "value" and "expanded". The literal - // "expanded" is used to indicate that the expanded form has been - // encountered already. - hasFields := map[string]bool{} + var seenTypeUrl bool + var seenValue bool + var isExpanded bool if checkDelims { tok, err := d.Read() @@ -642,12 +637,12 @@ Loop: return d.syntaxError(tok.Pos(), "missing field separator :") } - switch tok.IdentName() { - case "type_url": - if hasFields["type_url"] { - return d.newError(tok.Pos(), "duplicate Any type_url field") + switch name := pref.Name(tok.IdentName()); name { + case genid.Any_TypeUrl_field_name: + if seenTypeUrl { + return d.newError(tok.Pos(), "duplicate %v field", genid.Any_TypeUrl_field_fullname) } - if hasFields["expanded"] { + if isExpanded { return d.newError(tok.Pos(), "conflict with [%s] field", typeURL) } tok, err := d.Read() @@ -657,15 +652,15 @@ Loop: var ok bool typeURL, ok = tok.String() if !ok { - return d.newError(tok.Pos(), "invalid Any type_url: %v", tok.RawString()) + return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_TypeUrl_field_fullname, tok.RawString()) } - hasFields["type_url"] = true + seenTypeUrl = true - case "value": - if hasFields["value"] { - return d.newError(tok.Pos(), "duplicate Any value field") + case genid.Any_Value_field_name: + if seenValue { + return d.newError(tok.Pos(), "duplicate %v field", genid.Any_Value_field_fullname) } - if hasFields["expanded"] { + if isExpanded { return d.newError(tok.Pos(), "conflict with [%s] field", typeURL) } tok, err := d.Read() @@ -674,22 +669,22 @@ Loop: } s, ok := tok.String() if !ok { - return d.newError(tok.Pos(), "invalid Any value: %v", tok.RawString()) + return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_Value_field_fullname, tok.RawString()) } bValue = []byte(s) - hasFields["value"] = true + seenValue = true default: if !d.opts.DiscardUnknown { - return d.newError(tok.Pos(), "invalid field name %q in google.protobuf.Any message", tok.RawString()) + return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname) } } case text.TypeName: - if hasFields["expanded"] { + if isExpanded { return d.newError(tok.Pos(), "cannot have more than one type") } - if hasFields["type_url"] { + if seenTypeUrl { return d.newError(tok.Pos(), "conflict with type_url field") } typeURL = tok.TypeName() @@ -698,21 +693,21 @@ Loop: if err != nil { return err } - hasFields["expanded"] = true + isExpanded = true default: if !d.opts.DiscardUnknown { - return d.newError(tok.Pos(), "invalid field name %q in google.protobuf.Any message", tok.RawString()) + return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname) } } } fds := m.Descriptor().Fields() if len(typeURL) > 0 { - m.Set(fds.ByNumber(fieldnum.Any_TypeUrl), pref.ValueOfString(typeURL)) + m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), pref.ValueOfString(typeURL)) } if len(bValue) > 0 { - m.Set(fds.ByNumber(fieldnum.Any_Value), pref.ValueOfBytes(bValue)) + m.Set(fds.ByNumber(genid.Any_Value_field_number), pref.ValueOfBytes(bValue)) } return nil } diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 41e5c773c..0877d71c5 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -14,8 +14,8 @@ import ( "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/encoding/text" "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/fieldnum" "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/mapsort" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" @@ -162,7 +162,7 @@ func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error { } // Handle Any expansion. - if messageDesc.FullName() == "google.protobuf.Any" { + if messageDesc.FullName() == genid.Any_message_fullname { if e.marshalAny(m) { return nil } @@ -295,13 +295,13 @@ func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) e.StartMessage() defer e.EndMessage() - e.WriteName("key") + e.WriteName(string(genid.MapEntry_Key_field_name)) err = e.marshalSingular(key.Value(), fd.MapKey()) if err != nil { return false } - e.WriteName("value") + e.WriteName(string(genid.MapEntry_Value_field_name)) err = e.marshalSingular(val, fd.MapValue()) if err != nil { return false @@ -399,7 +399,7 @@ func (e encoder) marshalUnknown(b []byte) { func (e encoder) marshalAny(any pref.Message) bool { // Construct the embedded message. fds := any.Descriptor().Fields() - fdType := fds.ByNumber(fieldnum.Any_TypeUrl) + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) typeURL := any.Get(fdType).String() mt, err := e.opts.Resolver.FindMessageByURL(typeURL) if err != nil { @@ -408,7 +408,7 @@ func (e encoder) marshalAny(any pref.Message) bool { m := mt.New().Interface() // Unmarshal bytes into embedded message. - fdValue := fds.ByNumber(fieldnum.Any_Value) + fdValue := fds.ByNumber(genid.Any_Value_field_number) value := any.Get(fdValue) err = proto.UnmarshalOptions{ AllowPartial: true, diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go deleted file mode 100644 index 74c5fef24..000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.Any. -const ( - Any_TypeUrl = 1 // optional string - Any_Value = 2 // optional bytes -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go deleted file mode 100644 index 9a6b5f29b..000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.Api. -const ( - Api_Name = 1 // optional string - Api_Methods = 2 // repeated google.protobuf.Method - Api_Options = 3 // repeated google.protobuf.Option - Api_Version = 4 // optional string - Api_SourceContext = 5 // optional google.protobuf.SourceContext - Api_Mixins = 6 // repeated google.protobuf.Mixin - Api_Syntax = 7 // optional google.protobuf.Syntax -) - -// Field numbers for google.protobuf.Method. -const ( - Method_Name = 1 // optional string - Method_RequestTypeUrl = 2 // optional string - Method_RequestStreaming = 3 // optional bool - Method_ResponseTypeUrl = 4 // optional string - Method_ResponseStreaming = 5 // optional bool - Method_Options = 6 // repeated google.protobuf.Option - Method_Syntax = 7 // optional google.protobuf.Syntax -) - -// Field numbers for google.protobuf.Mixin. -const ( - Mixin_Name = 1 // optional string - Mixin_Root = 2 // optional string -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go deleted file mode 100644 index 6e37b59e9..000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.FileDescriptorSet. -const ( - FileDescriptorSet_File = 1 // repeated google.protobuf.FileDescriptorProto -) - -// Field numbers for google.protobuf.FileDescriptorProto. -const ( - FileDescriptorProto_Name = 1 // optional string - FileDescriptorProto_Package = 2 // optional string - FileDescriptorProto_Dependency = 3 // repeated string - FileDescriptorProto_PublicDependency = 10 // repeated int32 - FileDescriptorProto_WeakDependency = 11 // repeated int32 - FileDescriptorProto_MessageType = 4 // repeated google.protobuf.DescriptorProto - FileDescriptorProto_EnumType = 5 // repeated google.protobuf.EnumDescriptorProto - FileDescriptorProto_Service = 6 // repeated google.protobuf.ServiceDescriptorProto - FileDescriptorProto_Extension = 7 // repeated google.protobuf.FieldDescriptorProto - FileDescriptorProto_Options = 8 // optional google.protobuf.FileOptions - FileDescriptorProto_SourceCodeInfo = 9 // optional google.protobuf.SourceCodeInfo - FileDescriptorProto_Syntax = 12 // optional string -) - -// Field numbers for google.protobuf.DescriptorProto. -const ( - DescriptorProto_Name = 1 // optional string - DescriptorProto_Field = 2 // repeated google.protobuf.FieldDescriptorProto - DescriptorProto_Extension = 6 // repeated google.protobuf.FieldDescriptorProto - DescriptorProto_NestedType = 3 // repeated google.protobuf.DescriptorProto - DescriptorProto_EnumType = 4 // repeated google.protobuf.EnumDescriptorProto - DescriptorProto_ExtensionRange = 5 // repeated google.protobuf.DescriptorProto.ExtensionRange - DescriptorProto_OneofDecl = 8 // repeated google.protobuf.OneofDescriptorProto - DescriptorProto_Options = 7 // optional google.protobuf.MessageOptions - DescriptorProto_ReservedRange = 9 // repeated google.protobuf.DescriptorProto.ReservedRange - DescriptorProto_ReservedName = 10 // repeated string -) - -// Field numbers for google.protobuf.DescriptorProto.ExtensionRange. -const ( - DescriptorProto_ExtensionRange_Start = 1 // optional int32 - DescriptorProto_ExtensionRange_End = 2 // optional int32 - DescriptorProto_ExtensionRange_Options = 3 // optional google.protobuf.ExtensionRangeOptions -) - -// Field numbers for google.protobuf.DescriptorProto.ReservedRange. -const ( - DescriptorProto_ReservedRange_Start = 1 // optional int32 - DescriptorProto_ReservedRange_End = 2 // optional int32 -) - -// Field numbers for google.protobuf.ExtensionRangeOptions. -const ( - ExtensionRangeOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.FieldDescriptorProto. -const ( - FieldDescriptorProto_Name = 1 // optional string - FieldDescriptorProto_Number = 3 // optional int32 - FieldDescriptorProto_Label = 4 // optional google.protobuf.FieldDescriptorProto.Label - FieldDescriptorProto_Type = 5 // optional google.protobuf.FieldDescriptorProto.Type - FieldDescriptorProto_TypeName = 6 // optional string - FieldDescriptorProto_Extendee = 2 // optional string - FieldDescriptorProto_DefaultValue = 7 // optional string - FieldDescriptorProto_OneofIndex = 9 // optional int32 - FieldDescriptorProto_JsonName = 10 // optional string - FieldDescriptorProto_Options = 8 // optional google.protobuf.FieldOptions - FieldDescriptorProto_Proto3Optional = 17 // optional bool -) - -// Field numbers for google.protobuf.OneofDescriptorProto. -const ( - OneofDescriptorProto_Name = 1 // optional string - OneofDescriptorProto_Options = 2 // optional google.protobuf.OneofOptions -) - -// Field numbers for google.protobuf.EnumDescriptorProto. -const ( - EnumDescriptorProto_Name = 1 // optional string - EnumDescriptorProto_Value = 2 // repeated google.protobuf.EnumValueDescriptorProto - EnumDescriptorProto_Options = 3 // optional google.protobuf.EnumOptions - EnumDescriptorProto_ReservedRange = 4 // repeated google.protobuf.EnumDescriptorProto.EnumReservedRange - EnumDescriptorProto_ReservedName = 5 // repeated string -) - -// Field numbers for google.protobuf.EnumDescriptorProto.EnumReservedRange. -const ( - EnumDescriptorProto_EnumReservedRange_Start = 1 // optional int32 - EnumDescriptorProto_EnumReservedRange_End = 2 // optional int32 -) - -// Field numbers for google.protobuf.EnumValueDescriptorProto. -const ( - EnumValueDescriptorProto_Name = 1 // optional string - EnumValueDescriptorProto_Number = 2 // optional int32 - EnumValueDescriptorProto_Options = 3 // optional google.protobuf.EnumValueOptions -) - -// Field numbers for google.protobuf.ServiceDescriptorProto. -const ( - ServiceDescriptorProto_Name = 1 // optional string - ServiceDescriptorProto_Method = 2 // repeated google.protobuf.MethodDescriptorProto - ServiceDescriptorProto_Options = 3 // optional google.protobuf.ServiceOptions -) - -// Field numbers for google.protobuf.MethodDescriptorProto. -const ( - MethodDescriptorProto_Name = 1 // optional string - MethodDescriptorProto_InputType = 2 // optional string - MethodDescriptorProto_OutputType = 3 // optional string - MethodDescriptorProto_Options = 4 // optional google.protobuf.MethodOptions - MethodDescriptorProto_ClientStreaming = 5 // optional bool - MethodDescriptorProto_ServerStreaming = 6 // optional bool -) - -// Field numbers for google.protobuf.FileOptions. -const ( - FileOptions_JavaPackage = 1 // optional string - FileOptions_JavaOuterClassname = 8 // optional string - FileOptions_JavaMultipleFiles = 10 // optional bool - FileOptions_JavaGenerateEqualsAndHash = 20 // optional bool - FileOptions_JavaStringCheckUtf8 = 27 // optional bool - FileOptions_OptimizeFor = 9 // optional google.protobuf.FileOptions.OptimizeMode - FileOptions_GoPackage = 11 // optional string - FileOptions_CcGenericServices = 16 // optional bool - FileOptions_JavaGenericServices = 17 // optional bool - FileOptions_PyGenericServices = 18 // optional bool - FileOptions_PhpGenericServices = 42 // optional bool - FileOptions_Deprecated = 23 // optional bool - FileOptions_CcEnableArenas = 31 // optional bool - FileOptions_ObjcClassPrefix = 36 // optional string - FileOptions_CsharpNamespace = 37 // optional string - FileOptions_SwiftPrefix = 39 // optional string - FileOptions_PhpClassPrefix = 40 // optional string - FileOptions_PhpNamespace = 41 // optional string - FileOptions_PhpMetadataNamespace = 44 // optional string - FileOptions_RubyPackage = 45 // optional string - FileOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.MessageOptions. -const ( - MessageOptions_MessageSetWireFormat = 1 // optional bool - MessageOptions_NoStandardDescriptorAccessor = 2 // optional bool - MessageOptions_Deprecated = 3 // optional bool - MessageOptions_MapEntry = 7 // optional bool - MessageOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.FieldOptions. -const ( - FieldOptions_Ctype = 1 // optional google.protobuf.FieldOptions.CType - FieldOptions_Packed = 2 // optional bool - FieldOptions_Jstype = 6 // optional google.protobuf.FieldOptions.JSType - FieldOptions_Lazy = 5 // optional bool - FieldOptions_Deprecated = 3 // optional bool - FieldOptions_Weak = 10 // optional bool - FieldOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.OneofOptions. -const ( - OneofOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.EnumOptions. -const ( - EnumOptions_AllowAlias = 2 // optional bool - EnumOptions_Deprecated = 3 // optional bool - EnumOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.EnumValueOptions. -const ( - EnumValueOptions_Deprecated = 1 // optional bool - EnumValueOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.ServiceOptions. -const ( - ServiceOptions_Deprecated = 33 // optional bool - ServiceOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.MethodOptions. -const ( - MethodOptions_Deprecated = 33 // optional bool - MethodOptions_IdempotencyLevel = 34 // optional google.protobuf.MethodOptions.IdempotencyLevel - MethodOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.UninterpretedOption. -const ( - UninterpretedOption_Name = 2 // repeated google.protobuf.UninterpretedOption.NamePart - UninterpretedOption_IdentifierValue = 3 // optional string - UninterpretedOption_PositiveIntValue = 4 // optional uint64 - UninterpretedOption_NegativeIntValue = 5 // optional int64 - UninterpretedOption_DoubleValue = 6 // optional double - UninterpretedOption_StringValue = 7 // optional bytes - UninterpretedOption_AggregateValue = 8 // optional string -) - -// Field numbers for google.protobuf.UninterpretedOption.NamePart. -const ( - UninterpretedOption_NamePart_NamePart = 1 // required string - UninterpretedOption_NamePart_IsExtension = 2 // required bool -) - -// Field numbers for google.protobuf.SourceCodeInfo. -const ( - SourceCodeInfo_Location = 1 // repeated google.protobuf.SourceCodeInfo.Location -) - -// Field numbers for google.protobuf.SourceCodeInfo.Location. -const ( - SourceCodeInfo_Location_Path = 1 // repeated int32 - SourceCodeInfo_Location_Span = 2 // repeated int32 - SourceCodeInfo_Location_LeadingComments = 3 // optional string - SourceCodeInfo_Location_TrailingComments = 4 // optional string - SourceCodeInfo_Location_LeadingDetachedComments = 6 // repeated string -) - -// Field numbers for google.protobuf.GeneratedCodeInfo. -const ( - GeneratedCodeInfo_Annotation = 1 // repeated google.protobuf.GeneratedCodeInfo.Annotation -) - -// Field numbers for google.protobuf.GeneratedCodeInfo.Annotation. -const ( - GeneratedCodeInfo_Annotation_Path = 1 // repeated int32 - GeneratedCodeInfo_Annotation_SourceFile = 2 // optional string - GeneratedCodeInfo_Annotation_Begin = 3 // optional int32 - GeneratedCodeInfo_Annotation_End = 4 // optional int32 -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/doc.go b/vendor/google.golang.org/protobuf/internal/fieldnum/doc.go deleted file mode 100644 index e59788599..000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package fieldnum contains constants for field numbers of fields in messages -// declared in descriptor.proto and any of the well-known types. -package fieldnum diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go deleted file mode 100644 index 8816c7358..000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.Duration. -const ( - Duration_Seconds = 1 // optional int64 - Duration_Nanos = 2 // optional int32 -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go deleted file mode 100644 index 7e3bfa27b..000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.FieldMask. -const ( - FieldMask_Paths = 1 // repeated string -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go deleted file mode 100644 index 241972b1f..000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.SourceContext. -const ( - SourceContext_FileName = 1 // optional string -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go deleted file mode 100644 index c460aab44..000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.Struct. -const ( - Struct_Fields = 1 // repeated google.protobuf.Struct.FieldsEntry -) - -// Field numbers for google.protobuf.Struct.FieldsEntry. -const ( - Struct_FieldsEntry_Key = 1 // optional string - Struct_FieldsEntry_Value = 2 // optional google.protobuf.Value -) - -// Field numbers for google.protobuf.Value. -const ( - Value_NullValue = 1 // optional google.protobuf.NullValue - Value_NumberValue = 2 // optional double - Value_StringValue = 3 // optional string - Value_BoolValue = 4 // optional bool - Value_StructValue = 5 // optional google.protobuf.Struct - Value_ListValue = 6 // optional google.protobuf.ListValue -) - -// Field numbers for google.protobuf.ListValue. -const ( - ListValue_Values = 1 // repeated google.protobuf.Value -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go deleted file mode 100644 index b4346fba5..000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.Timestamp. -const ( - Timestamp_Seconds = 1 // optional int64 - Timestamp_Nanos = 2 // optional int32 -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go deleted file mode 100644 index b392e9598..000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.Type. -const ( - Type_Name = 1 // optional string - Type_Fields = 2 // repeated google.protobuf.Field - Type_Oneofs = 3 // repeated string - Type_Options = 4 // repeated google.protobuf.Option - Type_SourceContext = 5 // optional google.protobuf.SourceContext - Type_Syntax = 6 // optional google.protobuf.Syntax -) - -// Field numbers for google.protobuf.Field. -const ( - Field_Kind = 1 // optional google.protobuf.Field.Kind - Field_Cardinality = 2 // optional google.protobuf.Field.Cardinality - Field_Number = 3 // optional int32 - Field_Name = 4 // optional string - Field_TypeUrl = 6 // optional string - Field_OneofIndex = 7 // optional int32 - Field_Packed = 8 // optional bool - Field_Options = 9 // repeated google.protobuf.Option - Field_JsonName = 10 // optional string - Field_DefaultValue = 11 // optional string -) - -// Field numbers for google.protobuf.Enum. -const ( - Enum_Name = 1 // optional string - Enum_Enumvalue = 2 // repeated google.protobuf.EnumValue - Enum_Options = 3 // repeated google.protobuf.Option - Enum_SourceContext = 4 // optional google.protobuf.SourceContext - Enum_Syntax = 5 // optional google.protobuf.Syntax -) - -// Field numbers for google.protobuf.EnumValue. -const ( - EnumValue_Name = 1 // optional string - EnumValue_Number = 2 // optional int32 - EnumValue_Options = 3 // repeated google.protobuf.Option -) - -// Field numbers for google.protobuf.Option. -const ( - Option_Name = 1 // optional string - Option_Value = 2 // optional google.protobuf.Any -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go deleted file mode 100644 index 42f846a9f..000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.DoubleValue. -const ( - DoubleValue_Value = 1 // optional double -) - -// Field numbers for google.protobuf.FloatValue. -const ( - FloatValue_Value = 1 // optional float -) - -// Field numbers for google.protobuf.Int64Value. -const ( - Int64Value_Value = 1 // optional int64 -) - -// Field numbers for google.protobuf.UInt64Value. -const ( - UInt64Value_Value = 1 // optional uint64 -) - -// Field numbers for google.protobuf.Int32Value. -const ( - Int32Value_Value = 1 // optional int32 -) - -// Field numbers for google.protobuf.UInt32Value. -const ( - UInt32Value_Value = 1 // optional uint32 -) - -// Field numbers for google.protobuf.BoolValue. -const ( - BoolValue_Value = 1 // optional bool -) - -// Field numbers for google.protobuf.StringValue. -const ( - StringValue_Value = 1 // optional string -) - -// Field numbers for google.protobuf.BytesValue. -const ( - BytesValue_Value = 1 // optional bytes -) diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/vendor/google.golang.org/protobuf/internal/filedesc/build.go index 462d384e9..d02d770c9 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/build.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/build.go @@ -7,7 +7,7 @@ package filedesc import ( "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/fieldnum" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" pref "google.golang.org/protobuf/reflect/protoreflect" preg "google.golang.org/protobuf/reflect/protoregistry" @@ -126,24 +126,24 @@ func (db *Builder) unmarshalCounts(b []byte, isFile bool) { b = b[m:] if isFile { switch num { - case fieldnum.FileDescriptorProto_EnumType: + case genid.FileDescriptorProto_EnumType_field_number: db.NumEnums++ - case fieldnum.FileDescriptorProto_MessageType: + case genid.FileDescriptorProto_MessageType_field_number: db.unmarshalCounts(v, false) db.NumMessages++ - case fieldnum.FileDescriptorProto_Extension: + case genid.FileDescriptorProto_Extension_field_number: db.NumExtensions++ - case fieldnum.FileDescriptorProto_Service: + case genid.FileDescriptorProto_Service_field_number: db.NumServices++ } } else { switch num { - case fieldnum.DescriptorProto_EnumType: + case genid.DescriptorProto_EnumType_field_number: db.NumEnums++ - case fieldnum.DescriptorProto_NestedType: + case genid.DescriptorProto_NestedType_field_number: db.unmarshalCounts(v, false) db.NumMessages++ - case fieldnum.DescriptorProto_Extension: + case genid.DescriptorProto_Extension_field_number: db.NumExtensions++ } } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 2540befd6..9385126fb 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -13,6 +13,7 @@ import ( "google.golang.org/protobuf/internal/descfmt" "google.golang.org/protobuf/internal/descopts" "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" pref "google.golang.org/protobuf/reflect/protoreflect" @@ -302,13 +303,13 @@ func (fd *Field) MapKey() pref.FieldDescriptor { if !fd.IsMap() { return nil } - return fd.Message().Fields().ByNumber(1) + return fd.Message().Fields().ByNumber(genid.MapEntry_Key_field_number) } func (fd *Field) MapValue() pref.FieldDescriptor { if !fd.IsMap() { return nil } - return fd.Message().Fields().ByNumber(2) + return fd.Message().Fields().ByNumber(genid.MapEntry_Value_field_number) } func (fd *Field) HasDefault() bool { return fd.L1.Default.has } func (fd *Field) Default() pref.Value { return fd.L1.Default.get(fd) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index c0cddf86a..66e1fee52 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -8,7 +8,7 @@ import ( "sync" "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/fieldnum" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" pref "google.golang.org/protobuf/reflect/protoreflect" ) @@ -107,7 +107,7 @@ func (fd *File) unmarshalSeed(b []byte) { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.FileDescriptorProto_Syntax: + case genid.FileDescriptorProto_Syntax_field_number: switch string(v) { case "proto2": fd.L1.Syntax = pref.Proto2 @@ -116,36 +116,36 @@ func (fd *File) unmarshalSeed(b []byte) { default: panic("invalid syntax") } - case fieldnum.FileDescriptorProto_Name: + case genid.FileDescriptorProto_Name_field_number: fd.L1.Path = sb.MakeString(v) - case fieldnum.FileDescriptorProto_Package: + case genid.FileDescriptorProto_Package_field_number: fd.L1.Package = pref.FullName(sb.MakeString(v)) - case fieldnum.FileDescriptorProto_EnumType: - if prevField != fieldnum.FileDescriptorProto_EnumType { + case genid.FileDescriptorProto_EnumType_field_number: + if prevField != genid.FileDescriptorProto_EnumType_field_number { if numEnums > 0 { panic("non-contiguous repeated field") } posEnums = len(b0) - len(b) - n - m } numEnums++ - case fieldnum.FileDescriptorProto_MessageType: - if prevField != fieldnum.FileDescriptorProto_MessageType { + case genid.FileDescriptorProto_MessageType_field_number: + if prevField != genid.FileDescriptorProto_MessageType_field_number { if numMessages > 0 { panic("non-contiguous repeated field") } posMessages = len(b0) - len(b) - n - m } numMessages++ - case fieldnum.FileDescriptorProto_Extension: - if prevField != fieldnum.FileDescriptorProto_Extension { + case genid.FileDescriptorProto_Extension_field_number: + if prevField != genid.FileDescriptorProto_Extension_field_number { if numExtensions > 0 { panic("non-contiguous repeated field") } posExtensions = len(b0) - len(b) - n - m } numExtensions++ - case fieldnum.FileDescriptorProto_Service: - if prevField != fieldnum.FileDescriptorProto_Service { + case genid.FileDescriptorProto_Service_field_number: + if prevField != genid.FileDescriptorProto_Service_field_number { if numServices > 0 { panic("non-contiguous repeated field") } @@ -233,9 +233,9 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Desc v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.EnumDescriptorProto_Name: + case genid.EnumDescriptorProto_Name_field_number: ed.L0.FullName = appendFullName(sb, pd.FullName(), v) - case fieldnum.EnumDescriptorProto_Value: + case genid.EnumDescriptorProto_Value_field_number: numValues++ } default: @@ -260,7 +260,7 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Desc v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.EnumDescriptorProto_Value: + case genid.EnumDescriptorProto_Value_field_number: ed.L2.Values.List[i].unmarshalFull(v, sb, pf, ed, i) i++ } @@ -288,33 +288,33 @@ func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.D v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.DescriptorProto_Name: + case genid.DescriptorProto_Name_field_number: md.L0.FullName = appendFullName(sb, pd.FullName(), v) - case fieldnum.DescriptorProto_EnumType: - if prevField != fieldnum.DescriptorProto_EnumType { + case genid.DescriptorProto_EnumType_field_number: + if prevField != genid.DescriptorProto_EnumType_field_number { if numEnums > 0 { panic("non-contiguous repeated field") } posEnums = len(b0) - len(b) - n - m } numEnums++ - case fieldnum.DescriptorProto_NestedType: - if prevField != fieldnum.DescriptorProto_NestedType { + case genid.DescriptorProto_NestedType_field_number: + if prevField != genid.DescriptorProto_NestedType_field_number { if numMessages > 0 { panic("non-contiguous repeated field") } posMessages = len(b0) - len(b) - n - m } numMessages++ - case fieldnum.DescriptorProto_Extension: - if prevField != fieldnum.DescriptorProto_Extension { + case genid.DescriptorProto_Extension_field_number: + if prevField != genid.DescriptorProto_Extension_field_number { if numExtensions > 0 { panic("non-contiguous repeated field") } posExtensions = len(b0) - len(b) - n - m } numExtensions++ - case fieldnum.DescriptorProto_Options: + case genid.DescriptorProto_Options_field_number: md.unmarshalSeedOptions(v) } prevField = num @@ -375,9 +375,9 @@ func (md *Message) unmarshalSeedOptions(b []byte) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.MessageOptions_MapEntry: + case genid.MessageOptions_MapEntry_field_number: md.L1.IsMapEntry = protowire.DecodeBool(v) - case fieldnum.MessageOptions_MessageSetWireFormat: + case genid.MessageOptions_MessageSetWireFormat_field_number: md.L1.IsMessageSet = protowire.DecodeBool(v) } default: @@ -400,20 +400,20 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.FieldDescriptorProto_Number: + case genid.FieldDescriptorProto_Number_field_number: xd.L1.Number = pref.FieldNumber(v) - case fieldnum.FieldDescriptorProto_Label: + case genid.FieldDescriptorProto_Label_field_number: xd.L1.Cardinality = pref.Cardinality(v) - case fieldnum.FieldDescriptorProto_Type: + case genid.FieldDescriptorProto_Type_field_number: xd.L1.Kind = pref.Kind(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.FieldDescriptorProto_Name: + case genid.FieldDescriptorProto_Name_field_number: xd.L0.FullName = appendFullName(sb, pd.FullName(), v) - case fieldnum.FieldDescriptorProto_Extendee: + case genid.FieldDescriptorProto_Extendee_field_number: xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v)) } default: @@ -436,7 +436,7 @@ func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.D v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.ServiceDescriptorProto_Name: + case genid.ServiceDescriptorProto_Name_field_number: sd.L0.FullName = appendFullName(sb, pd.FullName(), v) } default: diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index bc215944a..e672233e7 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/descopts" - "google.golang.org/protobuf/internal/fieldnum" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" pref "google.golang.org/protobuf/reflect/protoreflect" @@ -143,35 +143,35 @@ func (fd *File) unmarshalFull(b []byte) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.FileDescriptorProto_PublicDependency: + case genid.FileDescriptorProto_PublicDependency_field_number: fd.L2.Imports[v].IsPublic = true - case fieldnum.FileDescriptorProto_WeakDependency: + case genid.FileDescriptorProto_WeakDependency_field_number: fd.L2.Imports[v].IsWeak = true } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.FileDescriptorProto_Dependency: + case genid.FileDescriptorProto_Dependency_field_number: path := sb.MakeString(v) imp, _ := fd.builder.FileRegistry.FindFileByPath(path) if imp == nil { imp = PlaceholderFile(path) } fd.L2.Imports = append(fd.L2.Imports, pref.FileImport{FileDescriptor: imp}) - case fieldnum.FileDescriptorProto_EnumType: + case genid.FileDescriptorProto_EnumType_field_number: fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb) enumIdx++ - case fieldnum.FileDescriptorProto_MessageType: + case genid.FileDescriptorProto_MessageType_field_number: fd.L1.Messages.List[messageIdx].unmarshalFull(v, sb) messageIdx++ - case fieldnum.FileDescriptorProto_Extension: + case genid.FileDescriptorProto_Extension_field_number: fd.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb) extensionIdx++ - case fieldnum.FileDescriptorProto_Service: + case genid.FileDescriptorProto_Service_field_number: fd.L1.Services.List[serviceIdx].unmarshalFull(v, sb) serviceIdx++ - case fieldnum.FileDescriptorProto_Options: + case genid.FileDescriptorProto_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: @@ -196,13 +196,13 @@ func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.EnumDescriptorProto_Value: + case genid.EnumDescriptorProto_Value_field_number: rawValues = append(rawValues, v) - case fieldnum.EnumDescriptorProto_ReservedName: + case genid.EnumDescriptorProto_ReservedName_field_number: ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) - case fieldnum.EnumDescriptorProto_ReservedRange: + case genid.EnumDescriptorProto_ReservedRange_field_number: ed.L2.ReservedRanges.List = append(ed.L2.ReservedRanges.List, unmarshalEnumReservedRange(v)) - case fieldnum.EnumDescriptorProto_Options: + case genid.EnumDescriptorProto_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: @@ -228,9 +228,9 @@ func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.EnumDescriptorProto_EnumReservedRange_Start: + case genid.EnumDescriptorProto_EnumReservedRange_Start_field_number: r[0] = pref.EnumNumber(v) - case fieldnum.EnumDescriptorProto_EnumReservedRange_End: + case genid.EnumDescriptorProto_EnumReservedRange_End_field_number: r[1] = pref.EnumNumber(v) } default: @@ -255,17 +255,17 @@ func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.EnumValueDescriptorProto_Number: + case genid.EnumValueDescriptorProto_Number_field_number: vd.L1.Number = pref.EnumNumber(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.EnumValueDescriptorProto_Name: + case genid.EnumValueDescriptorProto_Name_field_number: // NOTE: Enum values are in the same scope as the enum parent. vd.L0.FullName = appendFullName(sb, pd.Parent().FullName(), v) - case fieldnum.EnumValueDescriptorProto_Options: + case genid.EnumValueDescriptorProto_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: @@ -289,29 +289,29 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.DescriptorProto_Field: + case genid.DescriptorProto_Field_field_number: rawFields = append(rawFields, v) - case fieldnum.DescriptorProto_OneofDecl: + case genid.DescriptorProto_OneofDecl_field_number: rawOneofs = append(rawOneofs, v) - case fieldnum.DescriptorProto_ReservedName: + case genid.DescriptorProto_ReservedName_field_number: md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) - case fieldnum.DescriptorProto_ReservedRange: + case genid.DescriptorProto_ReservedRange_field_number: md.L2.ReservedRanges.List = append(md.L2.ReservedRanges.List, unmarshalMessageReservedRange(v)) - case fieldnum.DescriptorProto_ExtensionRange: + case genid.DescriptorProto_ExtensionRange_field_number: r, rawOptions := unmarshalMessageExtensionRange(v) opts := md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.ExtensionRange, rawOptions) md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, r) md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, opts) - case fieldnum.DescriptorProto_EnumType: + case genid.DescriptorProto_EnumType_field_number: md.L1.Enums.List[enumIdx].unmarshalFull(v, sb) enumIdx++ - case fieldnum.DescriptorProto_NestedType: + case genid.DescriptorProto_NestedType_field_number: md.L1.Messages.List[messageIdx].unmarshalFull(v, sb) messageIdx++ - case fieldnum.DescriptorProto_Extension: + case genid.DescriptorProto_Extension_field_number: md.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb) extensionIdx++ - case fieldnum.DescriptorProto_Options: + case genid.DescriptorProto_Options_field_number: md.unmarshalOptions(v) rawOptions = appendOptions(rawOptions, v) } @@ -347,9 +347,9 @@ func (md *Message) unmarshalOptions(b []byte) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.MessageOptions_MapEntry: + case genid.MessageOptions_MapEntry_field_number: md.L1.IsMapEntry = protowire.DecodeBool(v) - case fieldnum.MessageOptions_MessageSetWireFormat: + case genid.MessageOptions_MessageSetWireFormat_field_number: md.L1.IsMessageSet = protowire.DecodeBool(v) } default: @@ -368,9 +368,9 @@ func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.DescriptorProto_ReservedRange_Start: + case genid.DescriptorProto_ReservedRange_Start_field_number: r[0] = pref.FieldNumber(v) - case fieldnum.DescriptorProto_ReservedRange_End: + case genid.DescriptorProto_ReservedRange_End_field_number: r[1] = pref.FieldNumber(v) } default: @@ -390,16 +390,16 @@ func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.DescriptorProto_ExtensionRange_Start: + case genid.DescriptorProto_ExtensionRange_Start_field_number: r[0] = pref.FieldNumber(v) - case fieldnum.DescriptorProto_ExtensionRange_End: + case genid.DescriptorProto_ExtensionRange_End_field_number: r[1] = pref.FieldNumber(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.DescriptorProto_ExtensionRange_Options: + case genid.DescriptorProto_ExtensionRange_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: @@ -425,13 +425,13 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.FieldDescriptorProto_Number: + case genid.FieldDescriptorProto_Number_field_number: fd.L1.Number = pref.FieldNumber(v) - case fieldnum.FieldDescriptorProto_Label: + case genid.FieldDescriptorProto_Label_field_number: fd.L1.Cardinality = pref.Cardinality(v) - case fieldnum.FieldDescriptorProto_Type: + case genid.FieldDescriptorProto_Type_field_number: fd.L1.Kind = pref.Kind(v) - case fieldnum.FieldDescriptorProto_OneofIndex: + case genid.FieldDescriptorProto_OneofIndex_field_number: // In Message.unmarshalFull, we allocate slices for both // the field and oneof descriptors before unmarshaling either // of them. This ensures pointers to slice elements are stable. @@ -441,22 +441,22 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des panic("oneof type already set") } fd.L1.ContainingOneof = od - case fieldnum.FieldDescriptorProto_Proto3Optional: + case genid.FieldDescriptorProto_Proto3Optional_field_number: fd.L1.IsProto3Optional = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.FieldDescriptorProto_Name: + case genid.FieldDescriptorProto_Name_field_number: fd.L0.FullName = appendFullName(sb, pd.FullName(), v) - case fieldnum.FieldDescriptorProto_JsonName: + case genid.FieldDescriptorProto_JsonName_field_number: fd.L1.JSONName.Init(sb.MakeString(v)) - case fieldnum.FieldDescriptorProto_DefaultValue: + case genid.FieldDescriptorProto_DefaultValue_field_number: fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages - case fieldnum.FieldDescriptorProto_TypeName: + case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v - case fieldnum.FieldDescriptorProto_Options: + case genid.FieldDescriptorProto_Options_field_number: fd.unmarshalOptions(v) rawOptions = appendOptions(rawOptions, v) } @@ -488,10 +488,10 @@ func (fd *Field) unmarshalOptions(b []byte) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.FieldOptions_Packed: + case genid.FieldOptions_Packed_field_number: fd.L1.HasPacked = true fd.L1.IsPacked = protowire.DecodeBool(v) - case fieldnum.FieldOptions_Weak: + case genid.FieldOptions_Weak_field_number: fd.L1.IsWeak = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: fd.L1.HasEnforceUTF8 = true @@ -518,9 +518,9 @@ func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.OneofDescriptorProto_Name: + case genid.OneofDescriptorProto_Name_field_number: od.L0.FullName = appendFullName(sb, pd.FullName(), v) - case fieldnum.OneofDescriptorProto_Options: + case genid.OneofDescriptorProto_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: @@ -543,20 +543,20 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.FieldDescriptorProto_Proto3Optional: + case genid.FieldDescriptorProto_Proto3Optional_field_number: xd.L2.IsProto3Optional = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.FieldDescriptorProto_JsonName: + case genid.FieldDescriptorProto_JsonName_field_number: xd.L2.JSONName.Init(sb.MakeString(v)) - case fieldnum.FieldDescriptorProto_DefaultValue: + case genid.FieldDescriptorProto_DefaultValue_field_number: xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions - case fieldnum.FieldDescriptorProto_TypeName: + case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v - case fieldnum.FieldDescriptorProto_Options: + case genid.FieldDescriptorProto_Options_field_number: xd.unmarshalOptions(v) rawOptions = appendOptions(rawOptions, v) } @@ -586,7 +586,7 @@ func (xd *Extension) unmarshalOptions(b []byte) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.FieldOptions_Packed: + case genid.FieldOptions_Packed_field_number: xd.L2.IsPacked = protowire.DecodeBool(v) } default: @@ -608,9 +608,9 @@ func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.ServiceDescriptorProto_Method: + case genid.ServiceDescriptorProto_Method_field_number: rawMethods = append(rawMethods, v) - case fieldnum.ServiceDescriptorProto_Options: + case genid.ServiceDescriptorProto_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: @@ -641,22 +641,22 @@ func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.De v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.MethodDescriptorProto_ClientStreaming: + case genid.MethodDescriptorProto_ClientStreaming_field_number: md.L1.IsStreamingClient = protowire.DecodeBool(v) - case fieldnum.MethodDescriptorProto_ServerStreaming: + case genid.MethodDescriptorProto_ServerStreaming_field_number: md.L1.IsStreamingServer = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.MethodDescriptorProto_Name: + case genid.MethodDescriptorProto_Name_field_number: md.L0.FullName = appendFullName(sb, pd.FullName(), v) - case fieldnum.MethodDescriptorProto_InputType: + case genid.MethodDescriptorProto_InputType_field_number: md.L1.Input = PlaceholderMessage(makeFullName(sb, v)) - case fieldnum.MethodDescriptorProto_OutputType: + case genid.MethodDescriptorProto_OutputType_field_number: md.L1.Output = PlaceholderMessage(makeFullName(sb, v)) - case fieldnum.MethodDescriptorProto_Options: + case genid.MethodDescriptorProto_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go index 1b7089b64..c876cd34d 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go @@ -6,7 +6,6 @@ package filedesc import ( "fmt" - "math" "sort" "sync" @@ -185,10 +184,7 @@ func (p *FieldRanges) CheckValid(isMessageSet bool) error { // Unlike the FieldNumber.IsValid method, it allows ranges that cover the // reserved number range. func isValidFieldNumber(n protoreflect.FieldNumber, isMessageSet bool) bool { - if isMessageSet { - return protowire.MinValidNumber <= n && n <= math.MaxInt32 - } - return protowire.MinValidNumber <= n && n <= protowire.MaxValidNumber + return protowire.MinValidNumber <= n && (n <= protowire.MaxValidNumber || isMessageSet) } // CheckOverlap reports an error if p and q overlap. diff --git a/vendor/google.golang.org/protobuf/internal/genid/any_gen.go b/vendor/google.golang.org/protobuf/internal/genid/any_gen.go new file mode 100644 index 000000000..e6f7d47ab --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/any_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_any_proto = "google/protobuf/any.proto" + +// Names for google.protobuf.Any. +const ( + Any_message_name protoreflect.Name = "Any" + Any_message_fullname protoreflect.FullName = "google.protobuf.Any" +) + +// Field names for google.protobuf.Any. +const ( + Any_TypeUrl_field_name protoreflect.Name = "type_url" + Any_Value_field_name protoreflect.Name = "value" + + Any_TypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Any.type_url" + Any_Value_field_fullname protoreflect.FullName = "google.protobuf.Any.value" +) + +// Field numbers for google.protobuf.Any. +const ( + Any_TypeUrl_field_number protoreflect.FieldNumber = 1 + Any_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go new file mode 100644 index 000000000..df8f91850 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go @@ -0,0 +1,106 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_api_proto = "google/protobuf/api.proto" + +// Names for google.protobuf.Api. +const ( + Api_message_name protoreflect.Name = "Api" + Api_message_fullname protoreflect.FullName = "google.protobuf.Api" +) + +// Field names for google.protobuf.Api. +const ( + Api_Name_field_name protoreflect.Name = "name" + Api_Methods_field_name protoreflect.Name = "methods" + Api_Options_field_name protoreflect.Name = "options" + Api_Version_field_name protoreflect.Name = "version" + Api_SourceContext_field_name protoreflect.Name = "source_context" + Api_Mixins_field_name protoreflect.Name = "mixins" + Api_Syntax_field_name protoreflect.Name = "syntax" + + Api_Name_field_fullname protoreflect.FullName = "google.protobuf.Api.name" + Api_Methods_field_fullname protoreflect.FullName = "google.protobuf.Api.methods" + Api_Options_field_fullname protoreflect.FullName = "google.protobuf.Api.options" + Api_Version_field_fullname protoreflect.FullName = "google.protobuf.Api.version" + Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context" + Api_Mixins_field_fullname protoreflect.FullName = "google.protobuf.Api.mixins" + Api_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Api.syntax" +) + +// Field numbers for google.protobuf.Api. +const ( + Api_Name_field_number protoreflect.FieldNumber = 1 + Api_Methods_field_number protoreflect.FieldNumber = 2 + Api_Options_field_number protoreflect.FieldNumber = 3 + Api_Version_field_number protoreflect.FieldNumber = 4 + Api_SourceContext_field_number protoreflect.FieldNumber = 5 + Api_Mixins_field_number protoreflect.FieldNumber = 6 + Api_Syntax_field_number protoreflect.FieldNumber = 7 +) + +// Names for google.protobuf.Method. +const ( + Method_message_name protoreflect.Name = "Method" + Method_message_fullname protoreflect.FullName = "google.protobuf.Method" +) + +// Field names for google.protobuf.Method. +const ( + Method_Name_field_name protoreflect.Name = "name" + Method_RequestTypeUrl_field_name protoreflect.Name = "request_type_url" + Method_RequestStreaming_field_name protoreflect.Name = "request_streaming" + Method_ResponseTypeUrl_field_name protoreflect.Name = "response_type_url" + Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming" + Method_Options_field_name protoreflect.Name = "options" + Method_Syntax_field_name protoreflect.Name = "syntax" + + Method_Name_field_fullname protoreflect.FullName = "google.protobuf.Method.name" + Method_RequestTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.request_type_url" + Method_RequestStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.request_streaming" + Method_ResponseTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.response_type_url" + Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming" + Method_Options_field_fullname protoreflect.FullName = "google.protobuf.Method.options" + Method_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Method.syntax" +) + +// Field numbers for google.protobuf.Method. +const ( + Method_Name_field_number protoreflect.FieldNumber = 1 + Method_RequestTypeUrl_field_number protoreflect.FieldNumber = 2 + Method_RequestStreaming_field_number protoreflect.FieldNumber = 3 + Method_ResponseTypeUrl_field_number protoreflect.FieldNumber = 4 + Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5 + Method_Options_field_number protoreflect.FieldNumber = 6 + Method_Syntax_field_number protoreflect.FieldNumber = 7 +) + +// Names for google.protobuf.Mixin. +const ( + Mixin_message_name protoreflect.Name = "Mixin" + Mixin_message_fullname protoreflect.FullName = "google.protobuf.Mixin" +) + +// Field names for google.protobuf.Mixin. +const ( + Mixin_Name_field_name protoreflect.Name = "name" + Mixin_Root_field_name protoreflect.Name = "root" + + Mixin_Name_field_fullname protoreflect.FullName = "google.protobuf.Mixin.name" + Mixin_Root_field_fullname protoreflect.FullName = "google.protobuf.Mixin.root" +) + +// Field numbers for google.protobuf.Mixin. +const ( + Mixin_Name_field_number protoreflect.FieldNumber = 1 + Mixin_Root_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go new file mode 100644 index 000000000..e3cdf1c20 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -0,0 +1,829 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_descriptor_proto = "google/protobuf/descriptor.proto" + +// Names for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" + FileDescriptorSet_message_fullname protoreflect.FullName = "google.protobuf.FileDescriptorSet" +) + +// Field names for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_File_field_name protoreflect.Name = "file" + + FileDescriptorSet_File_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorSet.file" +) + +// Field numbers for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_File_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_message_name protoreflect.Name = "FileDescriptorProto" + FileDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto" +) + +// Field names for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_Name_field_name protoreflect.Name = "name" + FileDescriptorProto_Package_field_name protoreflect.Name = "package" + FileDescriptorProto_Dependency_field_name protoreflect.Name = "dependency" + FileDescriptorProto_PublicDependency_field_name protoreflect.Name = "public_dependency" + FileDescriptorProto_WeakDependency_field_name protoreflect.Name = "weak_dependency" + FileDescriptorProto_MessageType_field_name protoreflect.Name = "message_type" + FileDescriptorProto_EnumType_field_name protoreflect.Name = "enum_type" + FileDescriptorProto_Service_field_name protoreflect.Name = "service" + FileDescriptorProto_Extension_field_name protoreflect.Name = "extension" + FileDescriptorProto_Options_field_name protoreflect.Name = "options" + FileDescriptorProto_SourceCodeInfo_field_name protoreflect.Name = "source_code_info" + FileDescriptorProto_Syntax_field_name protoreflect.Name = "syntax" + + FileDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.name" + FileDescriptorProto_Package_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.package" + FileDescriptorProto_Dependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.dependency" + FileDescriptorProto_PublicDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.public_dependency" + FileDescriptorProto_WeakDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.weak_dependency" + FileDescriptorProto_MessageType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.message_type" + FileDescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.enum_type" + FileDescriptorProto_Service_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.service" + FileDescriptorProto_Extension_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.extension" + FileDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.options" + FileDescriptorProto_SourceCodeInfo_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.source_code_info" + FileDescriptorProto_Syntax_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.syntax" +) + +// Field numbers for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + FileDescriptorProto_Package_field_number protoreflect.FieldNumber = 2 + FileDescriptorProto_Dependency_field_number protoreflect.FieldNumber = 3 + FileDescriptorProto_PublicDependency_field_number protoreflect.FieldNumber = 10 + FileDescriptorProto_WeakDependency_field_number protoreflect.FieldNumber = 11 + FileDescriptorProto_MessageType_field_number protoreflect.FieldNumber = 4 + FileDescriptorProto_EnumType_field_number protoreflect.FieldNumber = 5 + FileDescriptorProto_Service_field_number protoreflect.FieldNumber = 6 + FileDescriptorProto_Extension_field_number protoreflect.FieldNumber = 7 + FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 + FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9 + FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12 +) + +// Names for google.protobuf.DescriptorProto. +const ( + DescriptorProto_message_name protoreflect.Name = "DescriptorProto" + DescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto" +) + +// Field names for google.protobuf.DescriptorProto. +const ( + DescriptorProto_Name_field_name protoreflect.Name = "name" + DescriptorProto_Field_field_name protoreflect.Name = "field" + DescriptorProto_Extension_field_name protoreflect.Name = "extension" + DescriptorProto_NestedType_field_name protoreflect.Name = "nested_type" + DescriptorProto_EnumType_field_name protoreflect.Name = "enum_type" + DescriptorProto_ExtensionRange_field_name protoreflect.Name = "extension_range" + DescriptorProto_OneofDecl_field_name protoreflect.Name = "oneof_decl" + DescriptorProto_Options_field_name protoreflect.Name = "options" + DescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" + DescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + + DescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.name" + DescriptorProto_Field_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.field" + DescriptorProto_Extension_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.extension" + DescriptorProto_NestedType_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.nested_type" + DescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.enum_type" + DescriptorProto_ExtensionRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.extension_range" + DescriptorProto_OneofDecl_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.oneof_decl" + DescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.options" + DescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_range" + DescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_name" +) + +// Field numbers for google.protobuf.DescriptorProto. +const ( + DescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + DescriptorProto_Field_field_number protoreflect.FieldNumber = 2 + DescriptorProto_Extension_field_number protoreflect.FieldNumber = 6 + DescriptorProto_NestedType_field_number protoreflect.FieldNumber = 3 + DescriptorProto_EnumType_field_number protoreflect.FieldNumber = 4 + DescriptorProto_ExtensionRange_field_number protoreflect.FieldNumber = 5 + DescriptorProto_OneofDecl_field_number protoreflect.FieldNumber = 8 + DescriptorProto_Options_field_number protoreflect.FieldNumber = 7 + DescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 9 + DescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 10 +) + +// Names for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_message_name protoreflect.Name = "ExtensionRange" + DescriptorProto_ExtensionRange_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange" +) + +// Field names for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_Start_field_name protoreflect.Name = "start" + DescriptorProto_ExtensionRange_End_field_name protoreflect.Name = "end" + DescriptorProto_ExtensionRange_Options_field_name protoreflect.Name = "options" + + DescriptorProto_ExtensionRange_Start_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.start" + DescriptorProto_ExtensionRange_End_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.end" + DescriptorProto_ExtensionRange_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.options" +) + +// Field numbers for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_Start_field_number protoreflect.FieldNumber = 1 + DescriptorProto_ExtensionRange_End_field_number protoreflect.FieldNumber = 2 + DescriptorProto_ExtensionRange_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_message_name protoreflect.Name = "ReservedRange" + DescriptorProto_ReservedRange_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange" +) + +// Field names for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_Start_field_name protoreflect.Name = "start" + DescriptorProto_ReservedRange_End_field_name protoreflect.Name = "end" + + DescriptorProto_ReservedRange_Start_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange.start" + DescriptorProto_ReservedRange_End_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange.end" +) + +// Field numbers for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_Start_field_number protoreflect.FieldNumber = 1 + DescriptorProto_ReservedRange_End_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_message_name protoreflect.Name = "ExtensionRangeOptions" + ExtensionRangeOptions_message_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions" +) + +// Field names for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_message_name protoreflect.Name = "FieldDescriptorProto" + FieldDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto" +) + +// Field names for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_Name_field_name protoreflect.Name = "name" + FieldDescriptorProto_Number_field_name protoreflect.Name = "number" + FieldDescriptorProto_Label_field_name protoreflect.Name = "label" + FieldDescriptorProto_Type_field_name protoreflect.Name = "type" + FieldDescriptorProto_TypeName_field_name protoreflect.Name = "type_name" + FieldDescriptorProto_Extendee_field_name protoreflect.Name = "extendee" + FieldDescriptorProto_DefaultValue_field_name protoreflect.Name = "default_value" + FieldDescriptorProto_OneofIndex_field_name protoreflect.Name = "oneof_index" + FieldDescriptorProto_JsonName_field_name protoreflect.Name = "json_name" + FieldDescriptorProto_Options_field_name protoreflect.Name = "options" + FieldDescriptorProto_Proto3Optional_field_name protoreflect.Name = "proto3_optional" + + FieldDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.name" + FieldDescriptorProto_Number_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.number" + FieldDescriptorProto_Label_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.label" + FieldDescriptorProto_Type_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.type" + FieldDescriptorProto_TypeName_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.type_name" + FieldDescriptorProto_Extendee_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.extendee" + FieldDescriptorProto_DefaultValue_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.default_value" + FieldDescriptorProto_OneofIndex_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.oneof_index" + FieldDescriptorProto_JsonName_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.json_name" + FieldDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.options" + FieldDescriptorProto_Proto3Optional_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.proto3_optional" +) + +// Field numbers for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + FieldDescriptorProto_Number_field_number protoreflect.FieldNumber = 3 + FieldDescriptorProto_Label_field_number protoreflect.FieldNumber = 4 + FieldDescriptorProto_Type_field_number protoreflect.FieldNumber = 5 + FieldDescriptorProto_TypeName_field_number protoreflect.FieldNumber = 6 + FieldDescriptorProto_Extendee_field_number protoreflect.FieldNumber = 2 + FieldDescriptorProto_DefaultValue_field_number protoreflect.FieldNumber = 7 + FieldDescriptorProto_OneofIndex_field_number protoreflect.FieldNumber = 9 + FieldDescriptorProto_JsonName_field_number protoreflect.FieldNumber = 10 + FieldDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 + FieldDescriptorProto_Proto3Optional_field_number protoreflect.FieldNumber = 17 +) + +// Full and short names for google.protobuf.FieldDescriptorProto.Type. +const ( + FieldDescriptorProto_Type_enum_fullname = "google.protobuf.FieldDescriptorProto.Type" + FieldDescriptorProto_Type_enum_name = "Type" +) + +// Full and short names for google.protobuf.FieldDescriptorProto.Label. +const ( + FieldDescriptorProto_Label_enum_fullname = "google.protobuf.FieldDescriptorProto.Label" + FieldDescriptorProto_Label_enum_name = "Label" +) + +// Names for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_message_name protoreflect.Name = "OneofDescriptorProto" + OneofDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto" +) + +// Field names for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_Name_field_name protoreflect.Name = "name" + OneofDescriptorProto_Options_field_name protoreflect.Name = "options" + + OneofDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto.name" + OneofDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto.options" +) + +// Field numbers for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + OneofDescriptorProto_Options_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_message_name protoreflect.Name = "EnumDescriptorProto" + EnumDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto" +) + +// Field names for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_Name_field_name protoreflect.Name = "name" + EnumDescriptorProto_Value_field_name protoreflect.Name = "value" + EnumDescriptorProto_Options_field_name protoreflect.Name = "options" + EnumDescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" + EnumDescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + + EnumDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.name" + EnumDescriptorProto_Value_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.value" + EnumDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.options" + EnumDescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_range" + EnumDescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_name" +) + +// Field numbers for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + EnumDescriptorProto_Value_field_number protoreflect.FieldNumber = 2 + EnumDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 + EnumDescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 4 + EnumDescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_message_name protoreflect.Name = "EnumReservedRange" + EnumDescriptorProto_EnumReservedRange_message_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange" +) + +// Field names for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_Start_field_name protoreflect.Name = "start" + EnumDescriptorProto_EnumReservedRange_End_field_name protoreflect.Name = "end" + + EnumDescriptorProto_EnumReservedRange_Start_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange.start" + EnumDescriptorProto_EnumReservedRange_End_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange.end" +) + +// Field numbers for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_Start_field_number protoreflect.FieldNumber = 1 + EnumDescriptorProto_EnumReservedRange_End_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_message_name protoreflect.Name = "EnumValueDescriptorProto" + EnumValueDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto" +) + +// Field names for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_Name_field_name protoreflect.Name = "name" + EnumValueDescriptorProto_Number_field_name protoreflect.Name = "number" + EnumValueDescriptorProto_Options_field_name protoreflect.Name = "options" + + EnumValueDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.name" + EnumValueDescriptorProto_Number_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.number" + EnumValueDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.options" +) + +// Field numbers for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + EnumValueDescriptorProto_Number_field_number protoreflect.FieldNumber = 2 + EnumValueDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_message_name protoreflect.Name = "ServiceDescriptorProto" + ServiceDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto" +) + +// Field names for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_Name_field_name protoreflect.Name = "name" + ServiceDescriptorProto_Method_field_name protoreflect.Name = "method" + ServiceDescriptorProto_Options_field_name protoreflect.Name = "options" + + ServiceDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.name" + ServiceDescriptorProto_Method_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.method" + ServiceDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.options" +) + +// Field numbers for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + ServiceDescriptorProto_Method_field_number protoreflect.FieldNumber = 2 + ServiceDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_message_name protoreflect.Name = "MethodDescriptorProto" + MethodDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto" +) + +// Field names for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_Name_field_name protoreflect.Name = "name" + MethodDescriptorProto_InputType_field_name protoreflect.Name = "input_type" + MethodDescriptorProto_OutputType_field_name protoreflect.Name = "output_type" + MethodDescriptorProto_Options_field_name protoreflect.Name = "options" + MethodDescriptorProto_ClientStreaming_field_name protoreflect.Name = "client_streaming" + MethodDescriptorProto_ServerStreaming_field_name protoreflect.Name = "server_streaming" + + MethodDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.name" + MethodDescriptorProto_InputType_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.input_type" + MethodDescriptorProto_OutputType_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.output_type" + MethodDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.options" + MethodDescriptorProto_ClientStreaming_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.client_streaming" + MethodDescriptorProto_ServerStreaming_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.server_streaming" +) + +// Field numbers for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + MethodDescriptorProto_InputType_field_number protoreflect.FieldNumber = 2 + MethodDescriptorProto_OutputType_field_number protoreflect.FieldNumber = 3 + MethodDescriptorProto_Options_field_number protoreflect.FieldNumber = 4 + MethodDescriptorProto_ClientStreaming_field_number protoreflect.FieldNumber = 5 + MethodDescriptorProto_ServerStreaming_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.FileOptions. +const ( + FileOptions_message_name protoreflect.Name = "FileOptions" + FileOptions_message_fullname protoreflect.FullName = "google.protobuf.FileOptions" +) + +// Field names for google.protobuf.FileOptions. +const ( + FileOptions_JavaPackage_field_name protoreflect.Name = "java_package" + FileOptions_JavaOuterClassname_field_name protoreflect.Name = "java_outer_classname" + FileOptions_JavaMultipleFiles_field_name protoreflect.Name = "java_multiple_files" + FileOptions_JavaGenerateEqualsAndHash_field_name protoreflect.Name = "java_generate_equals_and_hash" + FileOptions_JavaStringCheckUtf8_field_name protoreflect.Name = "java_string_check_utf8" + FileOptions_OptimizeFor_field_name protoreflect.Name = "optimize_for" + FileOptions_GoPackage_field_name protoreflect.Name = "go_package" + FileOptions_CcGenericServices_field_name protoreflect.Name = "cc_generic_services" + FileOptions_JavaGenericServices_field_name protoreflect.Name = "java_generic_services" + FileOptions_PyGenericServices_field_name protoreflect.Name = "py_generic_services" + FileOptions_PhpGenericServices_field_name protoreflect.Name = "php_generic_services" + FileOptions_Deprecated_field_name protoreflect.Name = "deprecated" + FileOptions_CcEnableArenas_field_name protoreflect.Name = "cc_enable_arenas" + FileOptions_ObjcClassPrefix_field_name protoreflect.Name = "objc_class_prefix" + FileOptions_CsharpNamespace_field_name protoreflect.Name = "csharp_namespace" + FileOptions_SwiftPrefix_field_name protoreflect.Name = "swift_prefix" + FileOptions_PhpClassPrefix_field_name protoreflect.Name = "php_class_prefix" + FileOptions_PhpNamespace_field_name protoreflect.Name = "php_namespace" + FileOptions_PhpMetadataNamespace_field_name protoreflect.Name = "php_metadata_namespace" + FileOptions_RubyPackage_field_name protoreflect.Name = "ruby_package" + FileOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + FileOptions_JavaPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_package" + FileOptions_JavaOuterClassname_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_outer_classname" + FileOptions_JavaMultipleFiles_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_multiple_files" + FileOptions_JavaGenerateEqualsAndHash_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generate_equals_and_hash" + FileOptions_JavaStringCheckUtf8_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_string_check_utf8" + FileOptions_OptimizeFor_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.optimize_for" + FileOptions_GoPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.go_package" + FileOptions_CcGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_generic_services" + FileOptions_JavaGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generic_services" + FileOptions_PyGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.py_generic_services" + FileOptions_PhpGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_generic_services" + FileOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.deprecated" + FileOptions_CcEnableArenas_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_enable_arenas" + FileOptions_ObjcClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.objc_class_prefix" + FileOptions_CsharpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.csharp_namespace" + FileOptions_SwiftPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.swift_prefix" + FileOptions_PhpClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_class_prefix" + FileOptions_PhpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_namespace" + FileOptions_PhpMetadataNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_metadata_namespace" + FileOptions_RubyPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.ruby_package" + FileOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.FileOptions. +const ( + FileOptions_JavaPackage_field_number protoreflect.FieldNumber = 1 + FileOptions_JavaOuterClassname_field_number protoreflect.FieldNumber = 8 + FileOptions_JavaMultipleFiles_field_number protoreflect.FieldNumber = 10 + FileOptions_JavaGenerateEqualsAndHash_field_number protoreflect.FieldNumber = 20 + FileOptions_JavaStringCheckUtf8_field_number protoreflect.FieldNumber = 27 + FileOptions_OptimizeFor_field_number protoreflect.FieldNumber = 9 + FileOptions_GoPackage_field_number protoreflect.FieldNumber = 11 + FileOptions_CcGenericServices_field_number protoreflect.FieldNumber = 16 + FileOptions_JavaGenericServices_field_number protoreflect.FieldNumber = 17 + FileOptions_PyGenericServices_field_number protoreflect.FieldNumber = 18 + FileOptions_PhpGenericServices_field_number protoreflect.FieldNumber = 42 + FileOptions_Deprecated_field_number protoreflect.FieldNumber = 23 + FileOptions_CcEnableArenas_field_number protoreflect.FieldNumber = 31 + FileOptions_ObjcClassPrefix_field_number protoreflect.FieldNumber = 36 + FileOptions_CsharpNamespace_field_number protoreflect.FieldNumber = 37 + FileOptions_SwiftPrefix_field_number protoreflect.FieldNumber = 39 + FileOptions_PhpClassPrefix_field_number protoreflect.FieldNumber = 40 + FileOptions_PhpNamespace_field_number protoreflect.FieldNumber = 41 + FileOptions_PhpMetadataNamespace_field_number protoreflect.FieldNumber = 44 + FileOptions_RubyPackage_field_number protoreflect.FieldNumber = 45 + FileOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.FileOptions.OptimizeMode. +const ( + FileOptions_OptimizeMode_enum_fullname = "google.protobuf.FileOptions.OptimizeMode" + FileOptions_OptimizeMode_enum_name = "OptimizeMode" +) + +// Names for google.protobuf.MessageOptions. +const ( + MessageOptions_message_name protoreflect.Name = "MessageOptions" + MessageOptions_message_fullname protoreflect.FullName = "google.protobuf.MessageOptions" +) + +// Field names for google.protobuf.MessageOptions. +const ( + MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" + MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" + MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" + MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" + MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.MessageOptions. +const ( + MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1 + MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2 + MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 + MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.FieldOptions. +const ( + FieldOptions_message_name protoreflect.Name = "FieldOptions" + FieldOptions_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions" +) + +// Field names for google.protobuf.FieldOptions. +const ( + FieldOptions_Ctype_field_name protoreflect.Name = "ctype" + FieldOptions_Packed_field_name protoreflect.Name = "packed" + FieldOptions_Jstype_field_name protoreflect.Name = "jstype" + FieldOptions_Lazy_field_name protoreflect.Name = "lazy" + FieldOptions_Deprecated_field_name protoreflect.Name = "deprecated" + FieldOptions_Weak_field_name protoreflect.Name = "weak" + FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" + FieldOptions_Packed_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.packed" + FieldOptions_Jstype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.jstype" + FieldOptions_Lazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.lazy" + FieldOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.deprecated" + FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak" + FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.FieldOptions. +const ( + FieldOptions_Ctype_field_number protoreflect.FieldNumber = 1 + FieldOptions_Packed_field_number protoreflect.FieldNumber = 2 + FieldOptions_Jstype_field_number protoreflect.FieldNumber = 6 + FieldOptions_Lazy_field_number protoreflect.FieldNumber = 5 + FieldOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + FieldOptions_Weak_field_number protoreflect.FieldNumber = 10 + FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.FieldOptions.CType. +const ( + FieldOptions_CType_enum_fullname = "google.protobuf.FieldOptions.CType" + FieldOptions_CType_enum_name = "CType" +) + +// Full and short names for google.protobuf.FieldOptions.JSType. +const ( + FieldOptions_JSType_enum_fullname = "google.protobuf.FieldOptions.JSType" + FieldOptions_JSType_enum_name = "JSType" +) + +// Names for google.protobuf.OneofOptions. +const ( + OneofOptions_message_name protoreflect.Name = "OneofOptions" + OneofOptions_message_fullname protoreflect.FullName = "google.protobuf.OneofOptions" +) + +// Field names for google.protobuf.OneofOptions. +const ( + OneofOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + OneofOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.OneofOptions. +const ( + OneofOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.EnumOptions. +const ( + EnumOptions_message_name protoreflect.Name = "EnumOptions" + EnumOptions_message_fullname protoreflect.FullName = "google.protobuf.EnumOptions" +) + +// Field names for google.protobuf.EnumOptions. +const ( + EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" + EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" + EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" + EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.EnumOptions. +const ( + EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 + EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_message_name protoreflect.Name = "EnumValueOptions" + EnumValueOptions_message_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions" +) + +// Field names for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" + EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 + EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.ServiceOptions. +const ( + ServiceOptions_message_name protoreflect.Name = "ServiceOptions" + ServiceOptions_message_fullname protoreflect.FullName = "google.protobuf.ServiceOptions" +) + +// Field names for google.protobuf.ServiceOptions. +const ( + ServiceOptions_Deprecated_field_name protoreflect.Name = "deprecated" + ServiceOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + ServiceOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.deprecated" + ServiceOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.ServiceOptions. +const ( + ServiceOptions_Deprecated_field_number protoreflect.FieldNumber = 33 + ServiceOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.MethodOptions. +const ( + MethodOptions_message_name protoreflect.Name = "MethodOptions" + MethodOptions_message_fullname protoreflect.FullName = "google.protobuf.MethodOptions" +) + +// Field names for google.protobuf.MethodOptions. +const ( + MethodOptions_Deprecated_field_name protoreflect.Name = "deprecated" + MethodOptions_IdempotencyLevel_field_name protoreflect.Name = "idempotency_level" + MethodOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + MethodOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.deprecated" + MethodOptions_IdempotencyLevel_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.idempotency_level" + MethodOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.MethodOptions. +const ( + MethodOptions_Deprecated_field_number protoreflect.FieldNumber = 33 + MethodOptions_IdempotencyLevel_field_number protoreflect.FieldNumber = 34 + MethodOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.MethodOptions.IdempotencyLevel. +const ( + MethodOptions_IdempotencyLevel_enum_fullname = "google.protobuf.MethodOptions.IdempotencyLevel" + MethodOptions_IdempotencyLevel_enum_name = "IdempotencyLevel" +) + +// Names for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_message_name protoreflect.Name = "UninterpretedOption" + UninterpretedOption_message_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption" +) + +// Field names for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_Name_field_name protoreflect.Name = "name" + UninterpretedOption_IdentifierValue_field_name protoreflect.Name = "identifier_value" + UninterpretedOption_PositiveIntValue_field_name protoreflect.Name = "positive_int_value" + UninterpretedOption_NegativeIntValue_field_name protoreflect.Name = "negative_int_value" + UninterpretedOption_DoubleValue_field_name protoreflect.Name = "double_value" + UninterpretedOption_StringValue_field_name protoreflect.Name = "string_value" + UninterpretedOption_AggregateValue_field_name protoreflect.Name = "aggregate_value" + + UninterpretedOption_Name_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.name" + UninterpretedOption_IdentifierValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.identifier_value" + UninterpretedOption_PositiveIntValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.positive_int_value" + UninterpretedOption_NegativeIntValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.negative_int_value" + UninterpretedOption_DoubleValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.double_value" + UninterpretedOption_StringValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.string_value" + UninterpretedOption_AggregateValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.aggregate_value" +) + +// Field numbers for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_Name_field_number protoreflect.FieldNumber = 2 + UninterpretedOption_IdentifierValue_field_number protoreflect.FieldNumber = 3 + UninterpretedOption_PositiveIntValue_field_number protoreflect.FieldNumber = 4 + UninterpretedOption_NegativeIntValue_field_number protoreflect.FieldNumber = 5 + UninterpretedOption_DoubleValue_field_number protoreflect.FieldNumber = 6 + UninterpretedOption_StringValue_field_number protoreflect.FieldNumber = 7 + UninterpretedOption_AggregateValue_field_number protoreflect.FieldNumber = 8 +) + +// Names for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_message_name protoreflect.Name = "NamePart" + UninterpretedOption_NamePart_message_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart" +) + +// Field names for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_NamePart_field_name protoreflect.Name = "name_part" + UninterpretedOption_NamePart_IsExtension_field_name protoreflect.Name = "is_extension" + + UninterpretedOption_NamePart_NamePart_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart.name_part" + UninterpretedOption_NamePart_IsExtension_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart.is_extension" +) + +// Field numbers for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_NamePart_field_number protoreflect.FieldNumber = 1 + UninterpretedOption_NamePart_IsExtension_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_message_name protoreflect.Name = "SourceCodeInfo" + SourceCodeInfo_message_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo" +) + +// Field names for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_Location_field_name protoreflect.Name = "location" + + SourceCodeInfo_Location_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.location" +) + +// Field numbers for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_Location_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_message_name protoreflect.Name = "Location" + SourceCodeInfo_Location_message_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location" +) + +// Field names for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_Path_field_name protoreflect.Name = "path" + SourceCodeInfo_Location_Span_field_name protoreflect.Name = "span" + SourceCodeInfo_Location_LeadingComments_field_name protoreflect.Name = "leading_comments" + SourceCodeInfo_Location_TrailingComments_field_name protoreflect.Name = "trailing_comments" + SourceCodeInfo_Location_LeadingDetachedComments_field_name protoreflect.Name = "leading_detached_comments" + + SourceCodeInfo_Location_Path_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.path" + SourceCodeInfo_Location_Span_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.span" + SourceCodeInfo_Location_LeadingComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.leading_comments" + SourceCodeInfo_Location_TrailingComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.trailing_comments" + SourceCodeInfo_Location_LeadingDetachedComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.leading_detached_comments" +) + +// Field numbers for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_Path_field_number protoreflect.FieldNumber = 1 + SourceCodeInfo_Location_Span_field_number protoreflect.FieldNumber = 2 + SourceCodeInfo_Location_LeadingComments_field_number protoreflect.FieldNumber = 3 + SourceCodeInfo_Location_TrailingComments_field_number protoreflect.FieldNumber = 4 + SourceCodeInfo_Location_LeadingDetachedComments_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_message_name protoreflect.Name = "GeneratedCodeInfo" + GeneratedCodeInfo_message_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo" +) + +// Field names for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_Annotation_field_name protoreflect.Name = "annotation" + + GeneratedCodeInfo_Annotation_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.annotation" +) + +// Field numbers for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_Annotation_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_message_name protoreflect.Name = "Annotation" + GeneratedCodeInfo_Annotation_message_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation" +) + +// Field names for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_Path_field_name protoreflect.Name = "path" + GeneratedCodeInfo_Annotation_SourceFile_field_name protoreflect.Name = "source_file" + GeneratedCodeInfo_Annotation_Begin_field_name protoreflect.Name = "begin" + GeneratedCodeInfo_Annotation_End_field_name protoreflect.Name = "end" + + GeneratedCodeInfo_Annotation_Path_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.path" + GeneratedCodeInfo_Annotation_SourceFile_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.source_file" + GeneratedCodeInfo_Annotation_Begin_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.begin" + GeneratedCodeInfo_Annotation_End_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.end" +) + +// Field numbers for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_Path_field_number protoreflect.FieldNumber = 1 + GeneratedCodeInfo_Annotation_SourceFile_field_number protoreflect.FieldNumber = 2 + GeneratedCodeInfo_Annotation_Begin_field_number protoreflect.FieldNumber = 3 + GeneratedCodeInfo_Annotation_End_field_number protoreflect.FieldNumber = 4 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go new file mode 100644 index 000000000..45ccd0121 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package genid contains constants for declarations in descriptor.proto +// and the well-known types. +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +const GoogleProtobuf_package protoreflect.FullName = "google.protobuf" diff --git a/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go b/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go new file mode 100644 index 000000000..b070ef4fd --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_duration_proto = "google/protobuf/duration.proto" + +// Names for google.protobuf.Duration. +const ( + Duration_message_name protoreflect.Name = "Duration" + Duration_message_fullname protoreflect.FullName = "google.protobuf.Duration" +) + +// Field names for google.protobuf.Duration. +const ( + Duration_Seconds_field_name protoreflect.Name = "seconds" + Duration_Nanos_field_name protoreflect.Name = "nanos" + + Duration_Seconds_field_fullname protoreflect.FullName = "google.protobuf.Duration.seconds" + Duration_Nanos_field_fullname protoreflect.FullName = "google.protobuf.Duration.nanos" +) + +// Field numbers for google.protobuf.Duration. +const ( + Duration_Seconds_field_number protoreflect.FieldNumber = 1 + Duration_Nanos_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go b/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go new file mode 100644 index 000000000..762abb34a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go @@ -0,0 +1,19 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_empty_proto = "google/protobuf/empty.proto" + +// Names for google.protobuf.Empty. +const ( + Empty_message_name protoreflect.Name = "Empty" + Empty_message_fullname protoreflect.FullName = "google.protobuf.Empty" +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go b/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go new file mode 100644 index 000000000..70bed453f --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_field_mask_proto = "google/protobuf/field_mask.proto" + +// Names for google.protobuf.FieldMask. +const ( + FieldMask_message_name protoreflect.Name = "FieldMask" + FieldMask_message_fullname protoreflect.FullName = "google.protobuf.FieldMask" +) + +// Field names for google.protobuf.FieldMask. +const ( + FieldMask_Paths_field_name protoreflect.Name = "paths" + + FieldMask_Paths_field_fullname protoreflect.FullName = "google.protobuf.FieldMask.paths" +) + +// Field numbers for google.protobuf.FieldMask. +const ( + FieldMask_Paths_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/goname.go b/vendor/google.golang.org/protobuf/internal/genid/goname.go new file mode 100644 index 000000000..693d2e9e1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/goname.go @@ -0,0 +1,25 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +// Go names of implementation-specific struct fields in generated messages. +const ( + State_goname = "state" + + SizeCache_goname = "sizeCache" + SizeCacheA_goname = "XXX_sizecache" + + WeakFields_goname = "weakFields" + WeakFieldsA_goname = "XXX_weak" + + UnknownFields_goname = "unknownFields" + UnknownFieldsA_goname = "XXX_unrecognized" + + ExtensionFields_goname = "extensionFields" + ExtensionFieldsA_goname = "XXX_InternalExtensions" + ExtensionFieldsB_goname = "XXX_extensions" + + WeakFieldPrefix_goname = "XXX_weak_" +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go new file mode 100644 index 000000000..8f9ea02ff --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +// Generic field names and numbers for synthetic map entry messages. +const ( + MapEntry_Key_field_name protoreflect.Name = "key" + MapEntry_Value_field_name protoreflect.Name = "value" + + MapEntry_Key_field_number protoreflect.FieldNumber = 1 + MapEntry_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go b/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go new file mode 100644 index 000000000..3e99ae16c --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_source_context_proto = "google/protobuf/source_context.proto" + +// Names for google.protobuf.SourceContext. +const ( + SourceContext_message_name protoreflect.Name = "SourceContext" + SourceContext_message_fullname protoreflect.FullName = "google.protobuf.SourceContext" +) + +// Field names for google.protobuf.SourceContext. +const ( + SourceContext_FileName_field_name protoreflect.Name = "file_name" + + SourceContext_FileName_field_fullname protoreflect.FullName = "google.protobuf.SourceContext.file_name" +) + +// Field numbers for google.protobuf.SourceContext. +const ( + SourceContext_FileName_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go new file mode 100644 index 000000000..1a38944b2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go @@ -0,0 +1,116 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_struct_proto = "google/protobuf/struct.proto" + +// Full and short names for google.protobuf.NullValue. +const ( + NullValue_enum_fullname = "google.protobuf.NullValue" + NullValue_enum_name = "NullValue" +) + +// Names for google.protobuf.Struct. +const ( + Struct_message_name protoreflect.Name = "Struct" + Struct_message_fullname protoreflect.FullName = "google.protobuf.Struct" +) + +// Field names for google.protobuf.Struct. +const ( + Struct_Fields_field_name protoreflect.Name = "fields" + + Struct_Fields_field_fullname protoreflect.FullName = "google.protobuf.Struct.fields" +) + +// Field numbers for google.protobuf.Struct. +const ( + Struct_Fields_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_message_name protoreflect.Name = "FieldsEntry" + Struct_FieldsEntry_message_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry" +) + +// Field names for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_Key_field_name protoreflect.Name = "key" + Struct_FieldsEntry_Value_field_name protoreflect.Name = "value" + + Struct_FieldsEntry_Key_field_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry.key" + Struct_FieldsEntry_Value_field_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry.value" +) + +// Field numbers for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_Key_field_number protoreflect.FieldNumber = 1 + Struct_FieldsEntry_Value_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.Value. +const ( + Value_message_name protoreflect.Name = "Value" + Value_message_fullname protoreflect.FullName = "google.protobuf.Value" +) + +// Field names for google.protobuf.Value. +const ( + Value_NullValue_field_name protoreflect.Name = "null_value" + Value_NumberValue_field_name protoreflect.Name = "number_value" + Value_StringValue_field_name protoreflect.Name = "string_value" + Value_BoolValue_field_name protoreflect.Name = "bool_value" + Value_StructValue_field_name protoreflect.Name = "struct_value" + Value_ListValue_field_name protoreflect.Name = "list_value" + + Value_NullValue_field_fullname protoreflect.FullName = "google.protobuf.Value.null_value" + Value_NumberValue_field_fullname protoreflect.FullName = "google.protobuf.Value.number_value" + Value_StringValue_field_fullname protoreflect.FullName = "google.protobuf.Value.string_value" + Value_BoolValue_field_fullname protoreflect.FullName = "google.protobuf.Value.bool_value" + Value_StructValue_field_fullname protoreflect.FullName = "google.protobuf.Value.struct_value" + Value_ListValue_field_fullname protoreflect.FullName = "google.protobuf.Value.list_value" +) + +// Field numbers for google.protobuf.Value. +const ( + Value_NullValue_field_number protoreflect.FieldNumber = 1 + Value_NumberValue_field_number protoreflect.FieldNumber = 2 + Value_StringValue_field_number protoreflect.FieldNumber = 3 + Value_BoolValue_field_number protoreflect.FieldNumber = 4 + Value_StructValue_field_number protoreflect.FieldNumber = 5 + Value_ListValue_field_number protoreflect.FieldNumber = 6 +) + +// Oneof names for google.protobuf.Value. +const ( + Value_Kind_oneof_name protoreflect.Name = "kind" + + Value_Kind_oneof_fullname protoreflect.FullName = "google.protobuf.Value.kind" +) + +// Names for google.protobuf.ListValue. +const ( + ListValue_message_name protoreflect.Name = "ListValue" + ListValue_message_fullname protoreflect.FullName = "google.protobuf.ListValue" +) + +// Field names for google.protobuf.ListValue. +const ( + ListValue_Values_field_name protoreflect.Name = "values" + + ListValue_Values_field_fullname protoreflect.FullName = "google.protobuf.ListValue.values" +) + +// Field numbers for google.protobuf.ListValue. +const ( + ListValue_Values_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go b/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go new file mode 100644 index 000000000..f5cd5634c --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_timestamp_proto = "google/protobuf/timestamp.proto" + +// Names for google.protobuf.Timestamp. +const ( + Timestamp_message_name protoreflect.Name = "Timestamp" + Timestamp_message_fullname protoreflect.FullName = "google.protobuf.Timestamp" +) + +// Field names for google.protobuf.Timestamp. +const ( + Timestamp_Seconds_field_name protoreflect.Name = "seconds" + Timestamp_Nanos_field_name protoreflect.Name = "nanos" + + Timestamp_Seconds_field_fullname protoreflect.FullName = "google.protobuf.Timestamp.seconds" + Timestamp_Nanos_field_fullname protoreflect.FullName = "google.protobuf.Timestamp.nanos" +) + +// Field numbers for google.protobuf.Timestamp. +const ( + Timestamp_Seconds_field_number protoreflect.FieldNumber = 1 + Timestamp_Nanos_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go new file mode 100644 index 000000000..3bc710138 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go @@ -0,0 +1,184 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_type_proto = "google/protobuf/type.proto" + +// Full and short names for google.protobuf.Syntax. +const ( + Syntax_enum_fullname = "google.protobuf.Syntax" + Syntax_enum_name = "Syntax" +) + +// Names for google.protobuf.Type. +const ( + Type_message_name protoreflect.Name = "Type" + Type_message_fullname protoreflect.FullName = "google.protobuf.Type" +) + +// Field names for google.protobuf.Type. +const ( + Type_Name_field_name protoreflect.Name = "name" + Type_Fields_field_name protoreflect.Name = "fields" + Type_Oneofs_field_name protoreflect.Name = "oneofs" + Type_Options_field_name protoreflect.Name = "options" + Type_SourceContext_field_name protoreflect.Name = "source_context" + Type_Syntax_field_name protoreflect.Name = "syntax" + + Type_Name_field_fullname protoreflect.FullName = "google.protobuf.Type.name" + Type_Fields_field_fullname protoreflect.FullName = "google.protobuf.Type.fields" + Type_Oneofs_field_fullname protoreflect.FullName = "google.protobuf.Type.oneofs" + Type_Options_field_fullname protoreflect.FullName = "google.protobuf.Type.options" + Type_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Type.source_context" + Type_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Type.syntax" +) + +// Field numbers for google.protobuf.Type. +const ( + Type_Name_field_number protoreflect.FieldNumber = 1 + Type_Fields_field_number protoreflect.FieldNumber = 2 + Type_Oneofs_field_number protoreflect.FieldNumber = 3 + Type_Options_field_number protoreflect.FieldNumber = 4 + Type_SourceContext_field_number protoreflect.FieldNumber = 5 + Type_Syntax_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.Field. +const ( + Field_message_name protoreflect.Name = "Field" + Field_message_fullname protoreflect.FullName = "google.protobuf.Field" +) + +// Field names for google.protobuf.Field. +const ( + Field_Kind_field_name protoreflect.Name = "kind" + Field_Cardinality_field_name protoreflect.Name = "cardinality" + Field_Number_field_name protoreflect.Name = "number" + Field_Name_field_name protoreflect.Name = "name" + Field_TypeUrl_field_name protoreflect.Name = "type_url" + Field_OneofIndex_field_name protoreflect.Name = "oneof_index" + Field_Packed_field_name protoreflect.Name = "packed" + Field_Options_field_name protoreflect.Name = "options" + Field_JsonName_field_name protoreflect.Name = "json_name" + Field_DefaultValue_field_name protoreflect.Name = "default_value" + + Field_Kind_field_fullname protoreflect.FullName = "google.protobuf.Field.kind" + Field_Cardinality_field_fullname protoreflect.FullName = "google.protobuf.Field.cardinality" + Field_Number_field_fullname protoreflect.FullName = "google.protobuf.Field.number" + Field_Name_field_fullname protoreflect.FullName = "google.protobuf.Field.name" + Field_TypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Field.type_url" + Field_OneofIndex_field_fullname protoreflect.FullName = "google.protobuf.Field.oneof_index" + Field_Packed_field_fullname protoreflect.FullName = "google.protobuf.Field.packed" + Field_Options_field_fullname protoreflect.FullName = "google.protobuf.Field.options" + Field_JsonName_field_fullname protoreflect.FullName = "google.protobuf.Field.json_name" + Field_DefaultValue_field_fullname protoreflect.FullName = "google.protobuf.Field.default_value" +) + +// Field numbers for google.protobuf.Field. +const ( + Field_Kind_field_number protoreflect.FieldNumber = 1 + Field_Cardinality_field_number protoreflect.FieldNumber = 2 + Field_Number_field_number protoreflect.FieldNumber = 3 + Field_Name_field_number protoreflect.FieldNumber = 4 + Field_TypeUrl_field_number protoreflect.FieldNumber = 6 + Field_OneofIndex_field_number protoreflect.FieldNumber = 7 + Field_Packed_field_number protoreflect.FieldNumber = 8 + Field_Options_field_number protoreflect.FieldNumber = 9 + Field_JsonName_field_number protoreflect.FieldNumber = 10 + Field_DefaultValue_field_number protoreflect.FieldNumber = 11 +) + +// Full and short names for google.protobuf.Field.Kind. +const ( + Field_Kind_enum_fullname = "google.protobuf.Field.Kind" + Field_Kind_enum_name = "Kind" +) + +// Full and short names for google.protobuf.Field.Cardinality. +const ( + Field_Cardinality_enum_fullname = "google.protobuf.Field.Cardinality" + Field_Cardinality_enum_name = "Cardinality" +) + +// Names for google.protobuf.Enum. +const ( + Enum_message_name protoreflect.Name = "Enum" + Enum_message_fullname protoreflect.FullName = "google.protobuf.Enum" +) + +// Field names for google.protobuf.Enum. +const ( + Enum_Name_field_name protoreflect.Name = "name" + Enum_Enumvalue_field_name protoreflect.Name = "enumvalue" + Enum_Options_field_name protoreflect.Name = "options" + Enum_SourceContext_field_name protoreflect.Name = "source_context" + Enum_Syntax_field_name protoreflect.Name = "syntax" + + Enum_Name_field_fullname protoreflect.FullName = "google.protobuf.Enum.name" + Enum_Enumvalue_field_fullname protoreflect.FullName = "google.protobuf.Enum.enumvalue" + Enum_Options_field_fullname protoreflect.FullName = "google.protobuf.Enum.options" + Enum_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Enum.source_context" + Enum_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Enum.syntax" +) + +// Field numbers for google.protobuf.Enum. +const ( + Enum_Name_field_number protoreflect.FieldNumber = 1 + Enum_Enumvalue_field_number protoreflect.FieldNumber = 2 + Enum_Options_field_number protoreflect.FieldNumber = 3 + Enum_SourceContext_field_number protoreflect.FieldNumber = 4 + Enum_Syntax_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.EnumValue. +const ( + EnumValue_message_name protoreflect.Name = "EnumValue" + EnumValue_message_fullname protoreflect.FullName = "google.protobuf.EnumValue" +) + +// Field names for google.protobuf.EnumValue. +const ( + EnumValue_Name_field_name protoreflect.Name = "name" + EnumValue_Number_field_name protoreflect.Name = "number" + EnumValue_Options_field_name protoreflect.Name = "options" + + EnumValue_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.name" + EnumValue_Number_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.number" + EnumValue_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.options" +) + +// Field numbers for google.protobuf.EnumValue. +const ( + EnumValue_Name_field_number protoreflect.FieldNumber = 1 + EnumValue_Number_field_number protoreflect.FieldNumber = 2 + EnumValue_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.Option. +const ( + Option_message_name protoreflect.Name = "Option" + Option_message_fullname protoreflect.FullName = "google.protobuf.Option" +) + +// Field names for google.protobuf.Option. +const ( + Option_Name_field_name protoreflect.Name = "name" + Option_Value_field_name protoreflect.Name = "value" + + Option_Name_field_fullname protoreflect.FullName = "google.protobuf.Option.name" + Option_Value_field_fullname protoreflect.FullName = "google.protobuf.Option.value" +) + +// Field numbers for google.protobuf.Option. +const ( + Option_Name_field_number protoreflect.FieldNumber = 1 + Option_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go new file mode 100644 index 000000000..429384b85 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go @@ -0,0 +1,13 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +// Generic field name and number for messages in wrappers.proto. +const ( + WrapperValue_Value_field_name protoreflect.Name = "value" + WrapperValue_Value_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go new file mode 100644 index 000000000..72527d2ab --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go @@ -0,0 +1,175 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_wrappers_proto = "google/protobuf/wrappers.proto" + +// Names for google.protobuf.DoubleValue. +const ( + DoubleValue_message_name protoreflect.Name = "DoubleValue" + DoubleValue_message_fullname protoreflect.FullName = "google.protobuf.DoubleValue" +) + +// Field names for google.protobuf.DoubleValue. +const ( + DoubleValue_Value_field_name protoreflect.Name = "value" + + DoubleValue_Value_field_fullname protoreflect.FullName = "google.protobuf.DoubleValue.value" +) + +// Field numbers for google.protobuf.DoubleValue. +const ( + DoubleValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.FloatValue. +const ( + FloatValue_message_name protoreflect.Name = "FloatValue" + FloatValue_message_fullname protoreflect.FullName = "google.protobuf.FloatValue" +) + +// Field names for google.protobuf.FloatValue. +const ( + FloatValue_Value_field_name protoreflect.Name = "value" + + FloatValue_Value_field_fullname protoreflect.FullName = "google.protobuf.FloatValue.value" +) + +// Field numbers for google.protobuf.FloatValue. +const ( + FloatValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Int64Value. +const ( + Int64Value_message_name protoreflect.Name = "Int64Value" + Int64Value_message_fullname protoreflect.FullName = "google.protobuf.Int64Value" +) + +// Field names for google.protobuf.Int64Value. +const ( + Int64Value_Value_field_name protoreflect.Name = "value" + + Int64Value_Value_field_fullname protoreflect.FullName = "google.protobuf.Int64Value.value" +) + +// Field numbers for google.protobuf.Int64Value. +const ( + Int64Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.UInt64Value. +const ( + UInt64Value_message_name protoreflect.Name = "UInt64Value" + UInt64Value_message_fullname protoreflect.FullName = "google.protobuf.UInt64Value" +) + +// Field names for google.protobuf.UInt64Value. +const ( + UInt64Value_Value_field_name protoreflect.Name = "value" + + UInt64Value_Value_field_fullname protoreflect.FullName = "google.protobuf.UInt64Value.value" +) + +// Field numbers for google.protobuf.UInt64Value. +const ( + UInt64Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Int32Value. +const ( + Int32Value_message_name protoreflect.Name = "Int32Value" + Int32Value_message_fullname protoreflect.FullName = "google.protobuf.Int32Value" +) + +// Field names for google.protobuf.Int32Value. +const ( + Int32Value_Value_field_name protoreflect.Name = "value" + + Int32Value_Value_field_fullname protoreflect.FullName = "google.protobuf.Int32Value.value" +) + +// Field numbers for google.protobuf.Int32Value. +const ( + Int32Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.UInt32Value. +const ( + UInt32Value_message_name protoreflect.Name = "UInt32Value" + UInt32Value_message_fullname protoreflect.FullName = "google.protobuf.UInt32Value" +) + +// Field names for google.protobuf.UInt32Value. +const ( + UInt32Value_Value_field_name protoreflect.Name = "value" + + UInt32Value_Value_field_fullname protoreflect.FullName = "google.protobuf.UInt32Value.value" +) + +// Field numbers for google.protobuf.UInt32Value. +const ( + UInt32Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.BoolValue. +const ( + BoolValue_message_name protoreflect.Name = "BoolValue" + BoolValue_message_fullname protoreflect.FullName = "google.protobuf.BoolValue" +) + +// Field names for google.protobuf.BoolValue. +const ( + BoolValue_Value_field_name protoreflect.Name = "value" + + BoolValue_Value_field_fullname protoreflect.FullName = "google.protobuf.BoolValue.value" +) + +// Field numbers for google.protobuf.BoolValue. +const ( + BoolValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.StringValue. +const ( + StringValue_message_name protoreflect.Name = "StringValue" + StringValue_message_fullname protoreflect.FullName = "google.protobuf.StringValue" +) + +// Field names for google.protobuf.StringValue. +const ( + StringValue_Value_field_name protoreflect.Name = "value" + + StringValue_Value_field_fullname protoreflect.FullName = "google.protobuf.StringValue.value" +) + +// Field numbers for google.protobuf.StringValue. +const ( + StringValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.BytesValue. +const ( + BytesValue_message_name protoreflect.Name = "BytesValue" + BytesValue_message_fullname protoreflect.FullName = "google.protobuf.BytesValue" +) + +// Field names for google.protobuf.BytesValue. +const ( + BytesValue_Value_field_name protoreflect.Name = "value" + + BytesValue_Value_field_fullname protoreflect.FullName = "google.protobuf.BytesValue.value" +) + +// Field numbers for google.protobuf.BytesValue. +const ( + BytesValue_Value_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genname/name.go b/vendor/google.golang.org/protobuf/internal/genname/name.go deleted file mode 100644 index f45509fbd..000000000 --- a/vendor/google.golang.org/protobuf/internal/genname/name.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package genname contains constants for generated names. -package genname - -const ( - State = "state" - - SizeCache = "sizeCache" - SizeCacheA = "XXX_sizecache" - - WeakFields = "weakFields" - WeakFieldsA = "XXX_weak" - - UnknownFields = "unknownFields" - UnknownFieldsA = "XXX_unrecognized" - - ExtensionFields = "extensionFields" - ExtensionFieldsA = "XXX_InternalExtensions" - ExtensionFieldsB = "XXX_extensions" - - WeakFieldPrefix = "XXX_weak_" -) diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go index 4d22c9604..b5974528d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -10,6 +10,7 @@ import ( "strconv" "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" pref "google.golang.org/protobuf/reflect/protoreflect" piface "google.golang.org/protobuf/runtime/protoiface" @@ -19,6 +20,12 @@ import ( // functions that we do not want to appear in godoc. type Export struct{} +// NewError formats a string according to the format specifier and arguments and +// returns an error that has a "proto" prefix. +func (Export) NewError(f string, x ...interface{}) error { + return errors.New(f, x...) +} + // enum is any enum type generated by protoc-gen-go // and must be a named int32 type. type enum = interface{} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index 35a67c25b..44885a761 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -10,6 +10,7 @@ import ( "sort" "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/genid" pref "google.golang.org/protobuf/reflect/protoreflect" ) @@ -134,7 +135,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo b = b[n:] err := errUnknown switch num { - case 1: + case genid.MapEntry_Key_field_number: var v pref.Value var o unmarshalOutput v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) @@ -143,7 +144,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo } key = v n = o.n - case 2: + case genid.MapEntry_Value_field_number: var v pref.Value var o unmarshalOutput v, o, err = mapi.valFuncs.unmarshal(b, val, num, wtyp, opts) diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 7dd994bd9..c026a9818 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -12,7 +12,7 @@ import ( "sync" "sync/atomic" - "google.golang.org/protobuf/internal/genname" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" pref "google.golang.org/protobuf/reflect/protoreflect" ) @@ -148,19 +148,19 @@ func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { fieldLoop: for i := 0; i < t.NumField(); i++ { switch f := t.Field(i); f.Name { - case genname.SizeCache, genname.SizeCacheA: + case genid.SizeCache_goname, genid.SizeCacheA_goname: if f.Type == sizecacheType { si.sizecacheOffset = offsetOf(f, mi.Exporter) } - case genname.WeakFields, genname.WeakFieldsA: + case genid.WeakFields_goname, genid.WeakFieldsA_goname: if f.Type == weakFieldsType { si.weakOffset = offsetOf(f, mi.Exporter) } - case genname.UnknownFields, genname.UnknownFieldsA: + case genid.UnknownFields_goname, genid.UnknownFieldsA_goname: if f.Type == unknownFieldsType { si.unknownOffset = offsetOf(f, mi.Exporter) } - case genname.ExtensionFields, genname.ExtensionFieldsA, genname.ExtensionFieldsB: + case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname: if f.Type == extensionFieldsType { si.extensionOffset = offsetOf(f, mi.Exporter) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go index 57de9cc85..08cfb6054 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/validate.go +++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -14,6 +14,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" pref "google.golang.org/protobuf/reflect/protoreflect" preg "google.golang.org/protobuf/reflect/protoregistry" @@ -282,9 +283,9 @@ State: switch { case st.typ == validationTypeMap: switch num { - case 1: + case genid.MapEntry_Key_field_number: vi.typ = st.keyType - case 2: + case genid.MapEntry_Value_field_number: vi.typ = st.valType vi.mi = st.mi vi.requiredBit = 1 diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 6b3001c66..72cf770b4 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 24 + Minor = 25 Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index 4974b16d5..42fc5195e 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -9,6 +9,7 @@ import ( "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" @@ -220,13 +221,13 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto b = b[n:] err = errUnknown switch num { - case 1: + case genid.MapEntry_Key_field_number: key, n, err = o.unmarshalScalar(b, wtyp, keyField) if err != nil { break } haveKey = true - case 2: + case genid.MapEntry_Value_field_number: var v protoreflect.Value v, n, err = o.unmarshalScalar(b, wtyp, valField) if err != nil { diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index b669a4e76..dd85915bd 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -128,7 +128,6 @@ package protoreflect import ( "fmt" - "regexp" "strings" "google.golang.org/protobuf/encoding/protowire" @@ -408,19 +407,14 @@ type EnumRanges interface { doNotImplement } -var ( - regexName = regexp.MustCompile(`^[_a-zA-Z][_a-zA-Z0-9]*$`) - regexFullName = regexp.MustCompile(`^[_a-zA-Z][_a-zA-Z0-9]*(\.[_a-zA-Z][_a-zA-Z0-9]*)*$`) -) - // Name is the short name for a proto declaration. This is not the name // as used in Go source code, which might not be identical to the proto name. type Name string // e.g., "Kind" -// IsValid reports whether n is a syntactically valid name. +// IsValid reports whether s is a syntactically valid name. // An empty name is invalid. -func (n Name) IsValid() bool { - return regexName.MatchString(string(n)) +func (s Name) IsValid() bool { + return consumeIdent(string(s)) == len(s) } // Names represent a list of names. @@ -443,10 +437,42 @@ type Names interface { // This should not have any leading or trailing dots. type FullName string // e.g., "google.protobuf.Field.Kind" -// IsValid reports whether n is a syntactically valid full name. +// IsValid reports whether s is a syntactically valid full name. // An empty full name is invalid. -func (n FullName) IsValid() bool { - return regexFullName.MatchString(string(n)) +func (s FullName) IsValid() bool { + i := consumeIdent(string(s)) + if i < 0 { + return false + } + for len(s) > i { + if s[i] != '.' { + return false + } + i++ + n := consumeIdent(string(s[i:])) + if n < 0 { + return false + } + i += n + } + return true +} + +func consumeIdent(s string) (i int) { + if len(s) == 0 || !isLetter(s[i]) { + return -1 + } + i++ + for len(s) > i && isLetterDigit(s[i]) { + i++ + } + return i +} +func isLetter(c byte) bool { + return c == '_' || ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') +} +func isLetterDigit(c byte) bool { + return isLetter(c) || ('0' <= c && c <= '9') } // Name returns the short name, which is the last identifier segment. diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 5f9498e4e..82a473e26 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -31,12 +31,100 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/any.proto +// Package anypb contains generated types for google/protobuf/any.proto. +// +// The Any message is a dynamic representation of any other message value. +// It is functionally a tuple of the full name of the remote message type and +// the serialized bytes of the remote message value. +// +// +// Constructing an Any +// +// An Any message containing another message value is constructed using New: +// +// any, err := anypb.New(m) +// if err != nil { +// ... // handle error +// } +// ... // make use of any +// +// +// Unmarshaling an Any +// +// With a populated Any message, the underlying message can be serialized into +// a remote concrete message value in a few ways. +// +// If the exact concrete type is known, then a new (or pre-existing) instance +// of that message can be passed to the UnmarshalTo method: +// +// m := new(foopb.MyMessage) +// if err := any.UnmarshalTo(m); err != nil { +// ... // handle error +// } +// ... // make use of m +// +// If the exact concrete type is not known, then the UnmarshalNew method can be +// used to unmarshal the contents into a new instance of the remote message type: +// +// m, err := any.UnmarshalNew() +// if err != nil { +// ... // handle error +// } +// ... // make use of m +// +// UnmarshalNew uses the global type registry to resolve the message type and +// construct a new instance of that message to unmarshal into. In order for a +// message type to appear in the global registry, the Go type representing that +// protobuf message type must be linked into the Go binary. For messages +// generated by protoc-gen-go, this is achieved through an import of the +// generated Go package representing a .proto file. +// +// A common pattern with UnmarshalNew is to use a type switch with the resulting +// proto.Message value: +// +// switch m := m.(type) { +// case *foopb.MyMessage: +// ... // make use of m as a *foopb.MyMessage +// case *barpb.OtherMessage: +// ... // make use of m as a *barpb.OtherMessage +// case *bazpb.SomeMessage: +// ... // make use of m as a *bazpb.SomeMessage +// } +// +// This pattern ensures that the generated packages containing the message types +// listed in the case clauses are linked into the Go binary and therefore also +// registered in the global registry. +// +// +// Type checking an Any +// +// In order to type check whether an Any message represents some other message, +// then use the MessageIs method: +// +// if any.MessageIs((*foopb.MyMessage)(nil)) { +// ... // make use of any, knowing that it contains a foopb.MyMessage +// } +// +// The MessageIs method can also be used with an allocated instance of the target +// message type if the intention is to unmarshal into it if the type matches: +// +// m := new(foopb.MyMessage) +// if any.MessageIs(m) { +// if err := any.UnmarshalTo(m); err != nil { +// ... // handle error +// } +// ... // make use of m +// } +// package anypb import ( + proto "google.golang.org/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoregistry "google.golang.org/protobuf/reflect/protoregistry" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" + strings "strings" sync "sync" ) @@ -158,6 +246,125 @@ type Any struct { Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } +// New marshals src into a new Any instance. +func New(src proto.Message) (*Any, error) { + dst := new(Any) + if err := dst.MarshalFrom(src); err != nil { + return nil, err + } + return dst, nil +} + +// MarshalFrom marshals src into dst as the underlying message +// using the provided marshal options. +// +// If no options are specified, call dst.MarshalFrom instead. +func MarshalFrom(dst *Any, src proto.Message, opts proto.MarshalOptions) error { + const urlPrefix = "type.googleapis.com/" + if src == nil { + return protoimpl.X.NewError("invalid nil source message") + } + b, err := opts.Marshal(src) + if err != nil { + return err + } + dst.TypeUrl = urlPrefix + string(src.ProtoReflect().Descriptor().FullName()) + dst.Value = b + return nil +} + +// UnmarshalTo unmarshals the underlying message from src into dst +// using the provided unmarshal options. +// It reports an error if dst is not of the right message type. +// +// If no options are specified, call src.UnmarshalTo instead. +func UnmarshalTo(src *Any, dst proto.Message, opts proto.UnmarshalOptions) error { + if src == nil { + return protoimpl.X.NewError("invalid nil source message") + } + if !src.MessageIs(dst) { + got := dst.ProtoReflect().Descriptor().FullName() + want := src.MessageName() + return protoimpl.X.NewError("mismatched message type: got %q, want %q", got, want) + } + return opts.Unmarshal(src.GetValue(), dst) +} + +// UnmarshalNew unmarshals the underlying message from src into dst, +// which is newly created message using a type resolved from the type URL. +// The message type is resolved according to opt.Resolver, +// which should implement protoregistry.MessageTypeResolver. +// It reports an error if the underlying message type could not be resolved. +// +// If no options are specified, call src.UnmarshalNew instead. +func UnmarshalNew(src *Any, opts proto.UnmarshalOptions) (dst proto.Message, err error) { + if src.GetTypeUrl() == "" { + return nil, protoimpl.X.NewError("invalid empty type URL") + } + if opts.Resolver == nil { + opts.Resolver = protoregistry.GlobalTypes + } + r, ok := opts.Resolver.(protoregistry.MessageTypeResolver) + if !ok { + return nil, protoregistry.NotFound + } + mt, err := r.FindMessageByURL(src.GetTypeUrl()) + if err != nil { + if err == protoregistry.NotFound { + return nil, err + } + return nil, protoimpl.X.NewError("could not resolve %q: %v", src.GetTypeUrl(), err) + } + dst = mt.New().Interface() + return dst, opts.Unmarshal(src.GetValue(), dst) +} + +// MessageIs reports whether the underlying message is of the same type as m. +func (x *Any) MessageIs(m proto.Message) bool { + if m == nil { + return false + } + url := x.GetTypeUrl() + name := string(m.ProtoReflect().Descriptor().FullName()) + if !strings.HasSuffix(url, name) { + return false + } + return len(url) == len(name) || url[len(url)-len(name)-1] == '/' +} + +// MessageName reports the full name of the underlying message, +// returning an empty string if invalid. +func (x *Any) MessageName() protoreflect.FullName { + url := x.GetTypeUrl() + name := protoreflect.FullName(url) + if i := strings.LastIndexByte(url, '/'); i >= 0 { + name = name[i+len("/"):] + } + if !name.IsValid() { + return "" + } + return name +} + +// MarshalFrom marshals m into x as the underlying message. +func (x *Any) MarshalFrom(m proto.Message) error { + return MarshalFrom(x, m, proto.MarshalOptions{}) +} + +// UnmarshalTo unmarshals the contents of the underlying message of x into m. +// It resets m before performing the unmarshal operation. +// It reports an error if m is not of the right message type. +func (x *Any) UnmarshalTo(m proto.Message) error { + return UnmarshalTo(x, m, proto.UnmarshalOptions{}) +} + +// UnmarshalNew unmarshals the contents of the underlying message of x into +// a newly allocated message of the specified type. +// It reports an error if the underlying message type could not be resolved. +func (x *Any) UnmarshalNew() (proto.Message, error) { + return UnmarshalNew(x, proto.UnmarshalOptions{}) +} + func (x *Any) Reset() { *x = Any{} if protoimpl.UnsafeEnabled { diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index 3997c604f..f7a110994 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -31,13 +31,58 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/duration.proto +// Package durationpb contains generated types for google/protobuf/duration.proto. +// +// The Duration message represents a signed span of time. +// +// +// Conversion to a Go Duration +// +// The AsDuration method can be used to convert a Duration message to a +// standard Go time.Duration value: +// +// d := dur.AsDuration() +// ... // make use of d as a time.Duration +// +// Converting to a time.Duration is a common operation so that the extensive +// set of time-based operations provided by the time package can be leveraged. +// See https://golang.org/pkg/time for more information. +// +// The AsDuration method performs the conversion on a best-effort basis. +// Durations with denormal values (e.g., nanoseconds beyond -99999999 and +// +99999999, inclusive; or seconds and nanoseconds with opposite signs) +// are normalized during the conversion to a time.Duration. To manually check for +// invalid Duration per the documented limitations in duration.proto, +// additionally call the CheckValid method: +// +// if err := dur.CheckValid(); err != nil { +// ... // handle error +// } +// +// Note that the documented limitations in duration.proto does not protect a +// Duration from overflowing the representable range of a time.Duration in Go. +// The AsDuration method uses saturation arithmetic such that an overflow clamps +// the resulting value to the closest representable value (e.g., math.MaxInt64 +// for positive overflow and math.MinInt64 for negative overflow). +// +// +// Conversion from a Go Duration +// +// The durationpb.New function can be used to construct a Duration message +// from a standard Go time.Duration value: +// +// dur := durationpb.New(d) +// ... // make use of d as a *durationpb.Duration +// package durationpb import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" reflect "reflect" sync "sync" + time "time" ) // A Duration represents a signed, fixed-length span of time represented @@ -118,6 +163,91 @@ type Duration struct { Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` } +// New constructs a new Duration from the provided time.Duration. +func New(d time.Duration) *Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &Duration{Seconds: int64(secs), Nanos: int32(nanos)} +} + +// AsDuration converts x to a time.Duration, +// returning the closest duration value in the event of overflow. +func (x *Duration) AsDuration() time.Duration { + secs := x.GetSeconds() + nanos := x.GetNanos() + d := time.Duration(secs) * time.Second + overflow := d/time.Second != time.Duration(secs) + d += time.Duration(nanos) * time.Nanosecond + overflow = overflow || (secs < 0 && nanos < 0 && d > 0) + overflow = overflow || (secs > 0 && nanos > 0 && d < 0) + if overflow { + switch { + case secs < 0: + return time.Duration(math.MinInt64) + case secs > 0: + return time.Duration(math.MaxInt64) + } + } + return d +} + +// IsValid reports whether the duration is valid. +// It is equivalent to CheckValid == nil. +func (x *Duration) IsValid() bool { + return x.check() == 0 +} + +// CheckValid returns an error if the duration is invalid. +// In particular, it checks whether the value is within the range of +// -10000 years to +10000 years inclusive. +// An error is reported for a nil Duration. +func (x *Duration) CheckValid() error { + switch x.check() { + case invalidNil: + return protoimpl.X.NewError("invalid nil Duration") + case invalidUnderflow: + return protoimpl.X.NewError("duration (%v) exceeds -10000 years", x) + case invalidOverflow: + return protoimpl.X.NewError("duration (%v) exceeds +10000 years", x) + case invalidNanosRange: + return protoimpl.X.NewError("duration (%v) has out-of-range nanos", x) + case invalidNanosSign: + return protoimpl.X.NewError("duration (%v) has seconds and nanos with different signs", x) + default: + return nil + } +} + +const ( + _ = iota + invalidNil + invalidUnderflow + invalidOverflow + invalidNanosRange + invalidNanosSign +) + +func (x *Duration) check() uint { + const absDuration = 315576000000 // 10000yr * 365.25day/yr * 24hr/day * 60min/hr * 60sec/min + secs := x.GetSeconds() + nanos := x.GetNanos() + switch { + case x == nil: + return invalidNil + case secs < -absDuration: + return invalidUnderflow + case secs > +absDuration: + return invalidOverflow + case nanos <= -1e9 || nanos >= +1e9: + return invalidNanosRange + case (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0): + return invalidNanosSign + default: + return 0 + } +} + func (x *Duration) Reset() { *x = Duration{} if protoimpl.UnsafeEnabled { diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 6fe6d42f1..c25e4bd7d 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -31,6 +31,48 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/timestamp.proto +// Package timestamppb contains generated types for google/protobuf/timestamp.proto. +// +// The Timestamp message represents a timestamp, +// an instant in time since the Unix epoch (January 1st, 1970). +// +// +// Conversion to a Go Time +// +// The AsTime method can be used to convert a Timestamp message to a +// standard Go time.Time value in UTC: +// +// t := ts.AsTime() +// ... // make use of t as a time.Time +// +// Converting to a time.Time is a common operation so that the extensive +// set of time-based operations provided by the time package can be leveraged. +// See https://golang.org/pkg/time for more information. +// +// The AsTime method performs the conversion on a best-effort basis. Timestamps +// with denormal values (e.g., nanoseconds beyond 0 and 99999999, inclusive) +// are normalized during the conversion to a time.Time. To manually check for +// invalid Timestamps per the documented limitations in timestamp.proto, +// additionally call the CheckValid method: +// +// if err := ts.CheckValid(); err != nil { +// ... // handle error +// } +// +// +// Conversion from a Go Time +// +// The timestamppb.New function can be used to construct a Timestamp message +// from a standard Go time.Time value: +// +// ts := timestamppb.New(t) +// ... // make use of ts as a *timestamppb.Timestamp +// +// In order to construct a Timestamp representing the current time, use Now: +// +// ts := timestamppb.Now() +// ... // make use of ts as a *timestamppb.Timestamp +// package timestamppb import ( @@ -38,6 +80,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + time "time" ) // A Timestamp represents a point in time independent of any time zone or local @@ -140,6 +183,73 @@ type Timestamp struct { Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` } +// Now constructs a new Timestamp from the current time. +func Now() *Timestamp { + return New(time.Now()) +} + +// New constructs a new Timestamp from the provided time.Time. +func New(t time.Time) *Timestamp { + return &Timestamp{Seconds: int64(t.Unix()), Nanos: int32(t.Nanosecond())} +} + +// AsTime converts x to a time.Time. +func (x *Timestamp) AsTime() time.Time { + return time.Unix(int64(x.GetSeconds()), int64(x.GetNanos())).UTC() +} + +// IsValid reports whether the timestamp is valid. +// It is equivalent to CheckValid == nil. +func (x *Timestamp) IsValid() bool { + return x.check() == 0 +} + +// CheckValid returns an error if the timestamp is invalid. +// In particular, it checks whether the value represents a date that is +// in the range of 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive. +// An error is reported for a nil Timestamp. +func (x *Timestamp) CheckValid() error { + switch x.check() { + case invalidNil: + return protoimpl.X.NewError("invalid nil Timestamp") + case invalidUnderflow: + return protoimpl.X.NewError("timestamp (%v) before 0001-01-01", x) + case invalidOverflow: + return protoimpl.X.NewError("timestamp (%v) after 9999-12-31", x) + case invalidNanos: + return protoimpl.X.NewError("timestamp (%v) has out-of-range nanos", x) + default: + return nil + } +} + +const ( + _ = iota + invalidNil + invalidUnderflow + invalidOverflow + invalidNanos +) + +func (x *Timestamp) check() uint { + const minTimestamp = -62135596800 // Seconds between 1970-01-01T00:00:00Z and 0001-01-01T00:00:00Z, inclusive + const maxTimestamp = +253402300799 // Seconds between 1970-01-01T00:00:00Z and 9999-12-31T23:59:59Z, inclusive + secs := x.GetSeconds() + nanos := x.GetNanos() + switch { + case x == nil: + return invalidNil + case secs < minTimestamp: + return invalidUnderflow + case secs > maxTimestamp: + return invalidOverflow + case nanos < 0 || nanos >= 1e9: + return invalidNanos + default: + return 0 + } +} + func (x *Timestamp) Reset() { *x = Timestamp{} if protoimpl.UnsafeEnabled { diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore b/vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore new file mode 100644 index 000000000..836562412 --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml b/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml new file mode 100644 index 000000000..65dcbc56d --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml @@ -0,0 +1,6 @@ +language: go + +go: + - 1.8 + - 1.7 + - 1.6 \ No newline at end of file diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE b/vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE new file mode 100644 index 000000000..c3d4cc307 --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Nate Finch + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/README.md b/vendor/gopkg.in/natefinch/lumberjack.v2/README.md new file mode 100644 index 000000000..060eae52a --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/README.md @@ -0,0 +1,179 @@ +# lumberjack [![GoDoc](https://godoc.org/gopkg.in/natefinch/lumberjack.v2?status.png)](https://godoc.org/gopkg.in/natefinch/lumberjack.v2) [![Build Status](https://travis-ci.org/natefinch/lumberjack.svg?branch=v2.0)](https://travis-ci.org/natefinch/lumberjack) [![Build status](https://ci.appveyor.com/api/projects/status/00gchpxtg4gkrt5d)](https://ci.appveyor.com/project/natefinch/lumberjack) [![Coverage Status](https://coveralls.io/repos/natefinch/lumberjack/badge.svg?branch=v2.0)](https://coveralls.io/r/natefinch/lumberjack?branch=v2.0) + +### Lumberjack is a Go package for writing logs to rolling files. + +Package lumberjack provides a rolling logger. + +Note that this is v2.0 of lumberjack, and should be imported using gopkg.in +thusly: + + import "gopkg.in/natefinch/lumberjack.v2" + +The package name remains simply lumberjack, and the code resides at +https://github.com/natefinch/lumberjack under the v2.0 branch. + +Lumberjack is intended to be one part of a logging infrastructure. +It is not an all-in-one solution, but instead is a pluggable +component at the bottom of the logging stack that simply controls the files +to which logs are written. + +Lumberjack plays well with any logging package that can write to an +io.Writer, including the standard library's log package. + +Lumberjack assumes that only one process is writing to the output files. +Using the same lumberjack configuration from multiple processes on the same +machine will result in improper behavior. + + +**Example** + +To use lumberjack with the standard library's log package, just pass it into the SetOutput function when your application starts. + +Code: + +```go +log.SetOutput(&lumberjack.Logger{ + Filename: "/var/log/myapp/foo.log", + MaxSize: 500, // megabytes + MaxBackups: 3, + MaxAge: 28, //days + Compress: true, // disabled by default +}) +``` + + + +## type Logger +``` go +type Logger struct { + // Filename is the file to write logs to. Backup log files will be retained + // in the same directory. It uses -lumberjack.log in + // os.TempDir() if empty. + Filename string `json:"filename" yaml:"filename"` + + // MaxSize is the maximum size in megabytes of the log file before it gets + // rotated. It defaults to 100 megabytes. + MaxSize int `json:"maxsize" yaml:"maxsize"` + + // MaxAge is the maximum number of days to retain old log files based on the + // timestamp encoded in their filename. Note that a day is defined as 24 + // hours and may not exactly correspond to calendar days due to daylight + // savings, leap seconds, etc. The default is not to remove old log files + // based on age. + MaxAge int `json:"maxage" yaml:"maxage"` + + // MaxBackups is the maximum number of old log files to retain. The default + // is to retain all old log files (though MaxAge may still cause them to get + // deleted.) + MaxBackups int `json:"maxbackups" yaml:"maxbackups"` + + // LocalTime determines if the time used for formatting the timestamps in + // backup files is the computer's local time. The default is to use UTC + // time. + LocalTime bool `json:"localtime" yaml:"localtime"` + + // Compress determines if the rotated log files should be compressed + // using gzip. The default is not to perform compression. + Compress bool `json:"compress" yaml:"compress"` + // contains filtered or unexported fields +} +``` +Logger is an io.WriteCloser that writes to the specified filename. + +Logger opens or creates the logfile on first Write. If the file exists and +is less than MaxSize megabytes, lumberjack will open and append to that file. +If the file exists and its size is >= MaxSize megabytes, the file is renamed +by putting the current time in a timestamp in the name immediately before the +file's extension (or the end of the filename if there's no extension). A new +log file is then created using original filename. + +Whenever a write would cause the current log file exceed MaxSize megabytes, +the current file is closed, renamed, and a new log file created with the +original name. Thus, the filename you give Logger is always the "current" log +file. + +Backups use the log file name given to Logger, in the form `name-timestamp.ext` +where name is the filename without the extension, timestamp is the time at which +the log was rotated formatted with the time.Time format of +`2006-01-02T15-04-05.000` and the extension is the original extension. For +example, if your Logger.Filename is `/var/log/foo/server.log`, a backup created +at 6:30pm on Nov 11 2016 would use the filename +`/var/log/foo/server-2016-11-04T18-30-00.000.log` + +### Cleaning Up Old Log Files +Whenever a new logfile gets created, old log files may be deleted. The most +recent files according to the encoded timestamp will be retained, up to a +number equal to MaxBackups (or all of them if MaxBackups is 0). Any files +with an encoded timestamp older than MaxAge days are deleted, regardless of +MaxBackups. Note that the time encoded in the timestamp is the rotation +time, which may differ from the last time that file was written to. + +If MaxBackups and MaxAge are both 0, no old log files will be deleted. + + + + + + + + + + + +### func (\*Logger) Close +``` go +func (l *Logger) Close() error +``` +Close implements io.Closer, and closes the current logfile. + + + +### func (\*Logger) Rotate +``` go +func (l *Logger) Rotate() error +``` +Rotate causes Logger to close the existing log file and immediately create a +new one. This is a helper function for applications that want to initiate +rotations outside of the normal rotation rules, such as in response to +SIGHUP. After rotating, this initiates a cleanup of old log files according +to the normal rules. + +**Example** + +Example of how to rotate in response to SIGHUP. + +Code: + +```go +l := &lumberjack.Logger{} +log.SetOutput(l) +c := make(chan os.Signal, 1) +signal.Notify(c, syscall.SIGHUP) + +go func() { + for { + <-c + l.Rotate() + } +}() +``` + +### func (\*Logger) Write +``` go +func (l *Logger) Write(p []byte) (n int, err error) +``` +Write implements io.Writer. If a write would cause the log file to be larger +than MaxSize, the file is closed, renamed to include a timestamp of the +current time, and a new log file is created using the original log file name. +If the length of the write is greater than MaxSize, an error is returned. + + + + + + + + + +- - - +Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/chown.go b/vendor/gopkg.in/natefinch/lumberjack.v2/chown.go new file mode 100644 index 000000000..11d066972 --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/chown.go @@ -0,0 +1,11 @@ +// +build !linux + +package lumberjack + +import ( + "os" +) + +func chown(_ string, _ os.FileInfo) error { + return nil +} diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go b/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go new file mode 100644 index 000000000..2758ec9ce --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go @@ -0,0 +1,19 @@ +package lumberjack + +import ( + "os" + "syscall" +) + +// os_Chown is a var so we can mock it out during tests. +var os_Chown = os.Chown + +func chown(name string, info os.FileInfo) error { + f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, info.Mode()) + if err != nil { + return err + } + f.Close() + stat := info.Sys().(*syscall.Stat_t) + return os_Chown(name, int(stat.Uid), int(stat.Gid)) +} diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go b/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go new file mode 100644 index 000000000..46d97c553 --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go @@ -0,0 +1,541 @@ +// Package lumberjack provides a rolling logger. +// +// Note that this is v2.0 of lumberjack, and should be imported using gopkg.in +// thusly: +// +// import "gopkg.in/natefinch/lumberjack.v2" +// +// The package name remains simply lumberjack, and the code resides at +// https://github.com/natefinch/lumberjack under the v2.0 branch. +// +// Lumberjack is intended to be one part of a logging infrastructure. +// It is not an all-in-one solution, but instead is a pluggable +// component at the bottom of the logging stack that simply controls the files +// to which logs are written. +// +// Lumberjack plays well with any logging package that can write to an +// io.Writer, including the standard library's log package. +// +// Lumberjack assumes that only one process is writing to the output files. +// Using the same lumberjack configuration from multiple processes on the same +// machine will result in improper behavior. +package lumberjack + +import ( + "compress/gzip" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "time" +) + +const ( + backupTimeFormat = "2006-01-02T15-04-05.000" + compressSuffix = ".gz" + defaultMaxSize = 100 +) + +// ensure we always implement io.WriteCloser +var _ io.WriteCloser = (*Logger)(nil) + +// Logger is an io.WriteCloser that writes to the specified filename. +// +// Logger opens or creates the logfile on first Write. If the file exists and +// is less than MaxSize megabytes, lumberjack will open and append to that file. +// If the file exists and its size is >= MaxSize megabytes, the file is renamed +// by putting the current time in a timestamp in the name immediately before the +// file's extension (or the end of the filename if there's no extension). A new +// log file is then created using original filename. +// +// Whenever a write would cause the current log file exceed MaxSize megabytes, +// the current file is closed, renamed, and a new log file created with the +// original name. Thus, the filename you give Logger is always the "current" log +// file. +// +// Backups use the log file name given to Logger, in the form +// `name-timestamp.ext` where name is the filename without the extension, +// timestamp is the time at which the log was rotated formatted with the +// time.Time format of `2006-01-02T15-04-05.000` and the extension is the +// original extension. For example, if your Logger.Filename is +// `/var/log/foo/server.log`, a backup created at 6:30pm on Nov 11 2016 would +// use the filename `/var/log/foo/server-2016-11-04T18-30-00.000.log` +// +// Cleaning Up Old Log Files +// +// Whenever a new logfile gets created, old log files may be deleted. The most +// recent files according to the encoded timestamp will be retained, up to a +// number equal to MaxBackups (or all of them if MaxBackups is 0). Any files +// with an encoded timestamp older than MaxAge days are deleted, regardless of +// MaxBackups. Note that the time encoded in the timestamp is the rotation +// time, which may differ from the last time that file was written to. +// +// If MaxBackups and MaxAge are both 0, no old log files will be deleted. +type Logger struct { + // Filename is the file to write logs to. Backup log files will be retained + // in the same directory. It uses -lumberjack.log in + // os.TempDir() if empty. + Filename string `json:"filename" yaml:"filename"` + + // MaxSize is the maximum size in megabytes of the log file before it gets + // rotated. It defaults to 100 megabytes. + MaxSize int `json:"maxsize" yaml:"maxsize"` + + // MaxAge is the maximum number of days to retain old log files based on the + // timestamp encoded in their filename. Note that a day is defined as 24 + // hours and may not exactly correspond to calendar days due to daylight + // savings, leap seconds, etc. The default is not to remove old log files + // based on age. + MaxAge int `json:"maxage" yaml:"maxage"` + + // MaxBackups is the maximum number of old log files to retain. The default + // is to retain all old log files (though MaxAge may still cause them to get + // deleted.) + MaxBackups int `json:"maxbackups" yaml:"maxbackups"` + + // LocalTime determines if the time used for formatting the timestamps in + // backup files is the computer's local time. The default is to use UTC + // time. + LocalTime bool `json:"localtime" yaml:"localtime"` + + // Compress determines if the rotated log files should be compressed + // using gzip. The default is not to perform compression. + Compress bool `json:"compress" yaml:"compress"` + + size int64 + file *os.File + mu sync.Mutex + + millCh chan bool + startMill sync.Once +} + +var ( + // currentTime exists so it can be mocked out by tests. + currentTime = time.Now + + // os_Stat exists so it can be mocked out by tests. + os_Stat = os.Stat + + // megabyte is the conversion factor between MaxSize and bytes. It is a + // variable so tests can mock it out and not need to write megabytes of data + // to disk. + megabyte = 1024 * 1024 +) + +// Write implements io.Writer. If a write would cause the log file to be larger +// than MaxSize, the file is closed, renamed to include a timestamp of the +// current time, and a new log file is created using the original log file name. +// If the length of the write is greater than MaxSize, an error is returned. +func (l *Logger) Write(p []byte) (n int, err error) { + l.mu.Lock() + defer l.mu.Unlock() + + writeLen := int64(len(p)) + if writeLen > l.max() { + return 0, fmt.Errorf( + "write length %d exceeds maximum file size %d", writeLen, l.max(), + ) + } + + if l.file == nil { + if err = l.openExistingOrNew(len(p)); err != nil { + return 0, err + } + } + + if l.size+writeLen > l.max() { + if err := l.rotate(); err != nil { + return 0, err + } + } + + n, err = l.file.Write(p) + l.size += int64(n) + + return n, err +} + +// Close implements io.Closer, and closes the current logfile. +func (l *Logger) Close() error { + l.mu.Lock() + defer l.mu.Unlock() + return l.close() +} + +// close closes the file if it is open. +func (l *Logger) close() error { + if l.file == nil { + return nil + } + err := l.file.Close() + l.file = nil + return err +} + +// Rotate causes Logger to close the existing log file and immediately create a +// new one. This is a helper function for applications that want to initiate +// rotations outside of the normal rotation rules, such as in response to +// SIGHUP. After rotating, this initiates compression and removal of old log +// files according to the configuration. +func (l *Logger) Rotate() error { + l.mu.Lock() + defer l.mu.Unlock() + return l.rotate() +} + +// rotate closes the current file, moves it aside with a timestamp in the name, +// (if it exists), opens a new file with the original filename, and then runs +// post-rotation processing and removal. +func (l *Logger) rotate() error { + if err := l.close(); err != nil { + return err + } + if err := l.openNew(); err != nil { + return err + } + l.mill() + return nil +} + +// openNew opens a new log file for writing, moving any old log file out of the +// way. This methods assumes the file has already been closed. +func (l *Logger) openNew() error { + err := os.MkdirAll(l.dir(), 0744) + if err != nil { + return fmt.Errorf("can't make directories for new logfile: %s", err) + } + + name := l.filename() + mode := os.FileMode(0644) + info, err := os_Stat(name) + if err == nil { + // Copy the mode off the old logfile. + mode = info.Mode() + // move the existing file + newname := backupName(name, l.LocalTime) + if err := os.Rename(name, newname); err != nil { + return fmt.Errorf("can't rename log file: %s", err) + } + + // this is a no-op anywhere but linux + if err := chown(name, info); err != nil { + return err + } + } + + // we use truncate here because this should only get called when we've moved + // the file ourselves. if someone else creates the file in the meantime, + // just wipe out the contents. + f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode) + if err != nil { + return fmt.Errorf("can't open new logfile: %s", err) + } + l.file = f + l.size = 0 + return nil +} + +// backupName creates a new filename from the given name, inserting a timestamp +// between the filename and the extension, using the local time if requested +// (otherwise UTC). +func backupName(name string, local bool) string { + dir := filepath.Dir(name) + filename := filepath.Base(name) + ext := filepath.Ext(filename) + prefix := filename[:len(filename)-len(ext)] + t := currentTime() + if !local { + t = t.UTC() + } + + timestamp := t.Format(backupTimeFormat) + return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext)) +} + +// openExistingOrNew opens the logfile if it exists and if the current write +// would not put it over MaxSize. If there is no such file or the write would +// put it over the MaxSize, a new file is created. +func (l *Logger) openExistingOrNew(writeLen int) error { + l.mill() + + filename := l.filename() + info, err := os_Stat(filename) + if os.IsNotExist(err) { + return l.openNew() + } + if err != nil { + return fmt.Errorf("error getting log file info: %s", err) + } + + if info.Size()+int64(writeLen) >= l.max() { + return l.rotate() + } + + file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644) + if err != nil { + // if we fail to open the old log file for some reason, just ignore + // it and open a new log file. + return l.openNew() + } + l.file = file + l.size = info.Size() + return nil +} + +// genFilename generates the name of the logfile from the current time. +func (l *Logger) filename() string { + if l.Filename != "" { + return l.Filename + } + name := filepath.Base(os.Args[0]) + "-lumberjack.log" + return filepath.Join(os.TempDir(), name) +} + +// millRunOnce performs compression and removal of stale log files. +// Log files are compressed if enabled via configuration and old log +// files are removed, keeping at most l.MaxBackups files, as long as +// none of them are older than MaxAge. +func (l *Logger) millRunOnce() error { + if l.MaxBackups == 0 && l.MaxAge == 0 && !l.Compress { + return nil + } + + files, err := l.oldLogFiles() + if err != nil { + return err + } + + var compress, remove []logInfo + + if l.MaxBackups > 0 && l.MaxBackups < len(files) { + preserved := make(map[string]bool) + var remaining []logInfo + for _, f := range files { + // Only count the uncompressed log file or the + // compressed log file, not both. + fn := f.Name() + if strings.HasSuffix(fn, compressSuffix) { + fn = fn[:len(fn)-len(compressSuffix)] + } + preserved[fn] = true + + if len(preserved) > l.MaxBackups { + remove = append(remove, f) + } else { + remaining = append(remaining, f) + } + } + files = remaining + } + if l.MaxAge > 0 { + diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge)) + cutoff := currentTime().Add(-1 * diff) + + var remaining []logInfo + for _, f := range files { + if f.timestamp.Before(cutoff) { + remove = append(remove, f) + } else { + remaining = append(remaining, f) + } + } + files = remaining + } + + if l.Compress { + for _, f := range files { + if !strings.HasSuffix(f.Name(), compressSuffix) { + compress = append(compress, f) + } + } + } + + for _, f := range remove { + errRemove := os.Remove(filepath.Join(l.dir(), f.Name())) + if err == nil && errRemove != nil { + err = errRemove + } + } + for _, f := range compress { + fn := filepath.Join(l.dir(), f.Name()) + errCompress := compressLogFile(fn, fn+compressSuffix) + if err == nil && errCompress != nil { + err = errCompress + } + } + + return err +} + +// millRun runs in a goroutine to manage post-rotation compression and removal +// of old log files. +func (l *Logger) millRun() { + for _ = range l.millCh { + // what am I going to do, log this? + _ = l.millRunOnce() + } +} + +// mill performs post-rotation compression and removal of stale log files, +// starting the mill goroutine if necessary. +func (l *Logger) mill() { + l.startMill.Do(func() { + l.millCh = make(chan bool, 1) + go l.millRun() + }) + select { + case l.millCh <- true: + default: + } +} + +// oldLogFiles returns the list of backup log files stored in the same +// directory as the current log file, sorted by ModTime +func (l *Logger) oldLogFiles() ([]logInfo, error) { + files, err := ioutil.ReadDir(l.dir()) + if err != nil { + return nil, fmt.Errorf("can't read log file directory: %s", err) + } + logFiles := []logInfo{} + + prefix, ext := l.prefixAndExt() + + for _, f := range files { + if f.IsDir() { + continue + } + if t, err := l.timeFromName(f.Name(), prefix, ext); err == nil { + logFiles = append(logFiles, logInfo{t, f}) + continue + } + if t, err := l.timeFromName(f.Name(), prefix, ext+compressSuffix); err == nil { + logFiles = append(logFiles, logInfo{t, f}) + continue + } + // error parsing means that the suffix at the end was not generated + // by lumberjack, and therefore it's not a backup file. + } + + sort.Sort(byFormatTime(logFiles)) + + return logFiles, nil +} + +// timeFromName extracts the formatted time from the filename by stripping off +// the filename's prefix and extension. This prevents someone's filename from +// confusing time.parse. +func (l *Logger) timeFromName(filename, prefix, ext string) (time.Time, error) { + if !strings.HasPrefix(filename, prefix) { + return time.Time{}, errors.New("mismatched prefix") + } + if !strings.HasSuffix(filename, ext) { + return time.Time{}, errors.New("mismatched extension") + } + ts := filename[len(prefix) : len(filename)-len(ext)] + return time.Parse(backupTimeFormat, ts) +} + +// max returns the maximum size in bytes of log files before rolling. +func (l *Logger) max() int64 { + if l.MaxSize == 0 { + return int64(defaultMaxSize * megabyte) + } + return int64(l.MaxSize) * int64(megabyte) +} + +// dir returns the directory for the current filename. +func (l *Logger) dir() string { + return filepath.Dir(l.filename()) +} + +// prefixAndExt returns the filename part and extension part from the Logger's +// filename. +func (l *Logger) prefixAndExt() (prefix, ext string) { + filename := filepath.Base(l.filename()) + ext = filepath.Ext(filename) + prefix = filename[:len(filename)-len(ext)] + "-" + return prefix, ext +} + +// compressLogFile compresses the given log file, removing the +// uncompressed log file if successful. +func compressLogFile(src, dst string) (err error) { + f, err := os.Open(src) + if err != nil { + return fmt.Errorf("failed to open log file: %v", err) + } + defer f.Close() + + fi, err := os_Stat(src) + if err != nil { + return fmt.Errorf("failed to stat log file: %v", err) + } + + if err := chown(dst, fi); err != nil { + return fmt.Errorf("failed to chown compressed log file: %v", err) + } + + // If this file already exists, we presume it was created by + // a previous attempt to compress the log file. + gzf, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, fi.Mode()) + if err != nil { + return fmt.Errorf("failed to open compressed log file: %v", err) + } + defer gzf.Close() + + gz := gzip.NewWriter(gzf) + + defer func() { + if err != nil { + os.Remove(dst) + err = fmt.Errorf("failed to compress log file: %v", err) + } + }() + + if _, err := io.Copy(gz, f); err != nil { + return err + } + if err := gz.Close(); err != nil { + return err + } + if err := gzf.Close(); err != nil { + return err + } + + if err := f.Close(); err != nil { + return err + } + if err := os.Remove(src); err != nil { + return err + } + + return nil +} + +// logInfo is a convenience struct to return the filename and its embedded +// timestamp. +type logInfo struct { + timestamp time.Time + os.FileInfo +} + +// byFormatTime sorts by newest time formatted in the name. +type byFormatTime []logInfo + +func (b byFormatTime) Less(i, j int) bool { + return b[i].timestamp.After(b[j].timestamp) +} + +func (b byFormatTime) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} + +func (b byFormatTime) Len() int { + return len(b) +} diff --git a/vendor/k8s.io/api/admission/v1/doc.go b/vendor/k8s.io/api/admission/v1/doc.go new file mode 100644 index 000000000..cbc6bb59d --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=false + +// +groupName=admission.k8s.io + +package v1 // import "k8s.io/api/admission/v1" diff --git a/vendor/k8s.io/api/admission/v1/generated.pb.go b/vendor/k8s.io/api/admission/v1/generated.pb.go new file mode 100644 index 000000000..04eb20675 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/generated.pb.go @@ -0,0 +1,1792 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/admission/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AdmissionRequest) Reset() { *m = AdmissionRequest{} } +func (*AdmissionRequest) ProtoMessage() {} +func (*AdmissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4b73421fd5edef9f, []int{0} +} +func (m *AdmissionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionRequest.Merge(m, src) +} +func (m *AdmissionRequest) XXX_Size() int { + return m.Size() +} +func (m *AdmissionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AdmissionRequest proto.InternalMessageInfo + +func (m *AdmissionResponse) Reset() { *m = AdmissionResponse{} } +func (*AdmissionResponse) ProtoMessage() {} +func (*AdmissionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4b73421fd5edef9f, []int{1} +} +func (m *AdmissionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionResponse.Merge(m, src) +} +func (m *AdmissionResponse) XXX_Size() int { + return m.Size() +} +func (m *AdmissionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AdmissionResponse proto.InternalMessageInfo + +func (m *AdmissionReview) Reset() { *m = AdmissionReview{} } +func (*AdmissionReview) ProtoMessage() {} +func (*AdmissionReview) Descriptor() ([]byte, []int) { + return fileDescriptor_4b73421fd5edef9f, []int{2} +} +func (m *AdmissionReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionReview.Merge(m, src) +} +func (m *AdmissionReview) XXX_Size() int { + return m.Size() +} +func (m *AdmissionReview) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionReview.DiscardUnknown(m) +} + +var xxx_messageInfo_AdmissionReview proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AdmissionRequest)(nil), "k8s.io.api.admission.v1.AdmissionRequest") + proto.RegisterType((*AdmissionResponse)(nil), "k8s.io.api.admission.v1.AdmissionResponse") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.admission.v1.AdmissionResponse.AuditAnnotationsEntry") + proto.RegisterType((*AdmissionReview)(nil), "k8s.io.api.admission.v1.AdmissionReview") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admission/v1/generated.proto", fileDescriptor_4b73421fd5edef9f) +} + +var fileDescriptor_4b73421fd5edef9f = []byte{ + // 919 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xf7, 0xd6, 0x8e, 0xed, 0x1d, 0x87, 0xda, 0x9d, 0x82, 0x58, 0xf9, 0xb0, 0x36, 0x39, 0x20, + 0x17, 0xb5, 0xbb, 0x24, 0x82, 0x2a, 0xaa, 0x38, 0x34, 0x4b, 0x2a, 0x14, 0x90, 0x9a, 0x68, 0xda, + 0x40, 0xc5, 0x01, 0x69, 0xec, 0x9d, 0xda, 0x83, 0xed, 0x99, 0x65, 0x67, 0xd6, 0xc1, 0x37, 0x4e, + 0x9c, 0xf9, 0x06, 0x1c, 0xf9, 0x0c, 0x7c, 0x83, 0x1c, 0x7b, 0xec, 0xc9, 0x22, 0xe6, 0x5b, 0xe4, + 0x84, 0x66, 0x76, 0xf6, 0x4f, 0xf3, 0x47, 0x84, 0x96, 0x93, 0xf7, 0xfd, 0xf9, 0xfd, 0xde, 0xf3, + 0xef, 0xed, 0x7b, 0x0b, 0x1e, 0x4f, 0x77, 0x85, 0x47, 0xb9, 0x3f, 0x4d, 0x86, 0x24, 0x66, 0x44, + 0x12, 0xe1, 0x2f, 0x08, 0x0b, 0x79, 0xec, 0x9b, 0x00, 0x8e, 0xa8, 0x8f, 0xc3, 0x39, 0x15, 0x82, + 0x72, 0xe6, 0x2f, 0xb6, 0xfd, 0x31, 0x61, 0x24, 0xc6, 0x92, 0x84, 0x5e, 0x14, 0x73, 0xc9, 0xe1, + 0x87, 0x69, 0xa2, 0x87, 0x23, 0xea, 0xe5, 0x89, 0xde, 0x62, 0xbb, 0xfb, 0x60, 0x4c, 0xe5, 0x24, + 0x19, 0x7a, 0x23, 0x3e, 0xf7, 0xc7, 0x7c, 0xcc, 0x7d, 0x9d, 0x3f, 0x4c, 0x5e, 0x6a, 0x4b, 0x1b, + 0xfa, 0x29, 0xe5, 0xe9, 0xde, 0x2f, 0x17, 0x4c, 0xe4, 0x84, 0x30, 0x49, 0x47, 0x58, 0x5e, 0x5d, + 0xb5, 0xfb, 0x59, 0x91, 0x3d, 0xc7, 0xa3, 0x09, 0x65, 0x24, 0x5e, 0xfa, 0xd1, 0x74, 0xac, 0x1c, + 0xc2, 0x9f, 0x13, 0x89, 0xaf, 0x42, 0xf9, 0xd7, 0xa1, 0xe2, 0x84, 0x49, 0x3a, 0x27, 0x97, 0x00, + 0x0f, 0xff, 0x0d, 0x20, 0x46, 0x13, 0x32, 0xc7, 0x17, 0x71, 0x5b, 0xbf, 0xdb, 0xa0, 0xb3, 0x97, + 0x89, 0x81, 0xc8, 0x4f, 0x09, 0x11, 0x12, 0x06, 0xa0, 0x9a, 0xd0, 0xd0, 0xb1, 0xfa, 0xd6, 0xc0, + 0x0e, 0x3e, 0x3d, 0x5d, 0xf5, 0x2a, 0xeb, 0x55, 0xaf, 0x7a, 0x7c, 0xb0, 0x7f, 0xbe, 0xea, 0x7d, + 0x74, 0x5d, 0x21, 0xb9, 0x8c, 0x88, 0xf0, 0x8e, 0x0f, 0xf6, 0x91, 0x02, 0xc3, 0x17, 0xa0, 0x36, + 0xa5, 0x2c, 0x74, 0x6e, 0xf5, 0xad, 0x41, 0x6b, 0xe7, 0xa1, 0x57, 0x88, 0x9f, 0xc3, 0xbc, 0x68, + 0x3a, 0x56, 0x0e, 0xe1, 0x29, 0x19, 0xbc, 0xc5, 0xb6, 0xf7, 0x55, 0xcc, 0x93, 0xe8, 0x5b, 0x12, + 0xab, 0x66, 0xbe, 0xa1, 0x2c, 0x0c, 0x36, 0x4d, 0xf1, 0x9a, 0xb2, 0x90, 0x66, 0x84, 0x13, 0xd0, + 0x8c, 0x89, 0xe0, 0x49, 0x3c, 0x22, 0x4e, 0x55, 0xb3, 0x3f, 0xfa, 0xef, 0xec, 0xc8, 0x30, 0x04, + 0x1d, 0x53, 0xa1, 0x99, 0x79, 0x50, 0xce, 0x0e, 0x3f, 0x07, 0x2d, 0x91, 0x0c, 0xb3, 0x80, 0x53, + 0xd3, 0x7a, 0xdc, 0x35, 0x80, 0xd6, 0xb3, 0x22, 0x84, 0xca, 0x79, 0x90, 0x82, 0x56, 0x9c, 0x2a, + 0xa9, 0xba, 0x76, 0xde, 0x7b, 0x27, 0x05, 0xda, 0xaa, 0x14, 0x2a, 0xe8, 0x50, 0x99, 0x1b, 0x2e, + 0x41, 0xdb, 0x98, 0x79, 0x97, 0xb7, 0xdf, 0x59, 0x92, 0xbb, 0xeb, 0x55, 0xaf, 0x8d, 0xde, 0xa4, + 0x45, 0x17, 0xeb, 0xc0, 0xaf, 0x01, 0x34, 0xae, 0x92, 0x10, 0x4e, 0x5b, 0x6b, 0xd4, 0x35, 0x1a, + 0x41, 0x74, 0x29, 0x03, 0x5d, 0x81, 0x82, 0x7d, 0x50, 0x63, 0x78, 0x4e, 0x9c, 0x0d, 0x8d, 0xce, + 0x87, 0xfe, 0x14, 0xcf, 0x09, 0xd2, 0x11, 0xe8, 0x03, 0x5b, 0xfd, 0x8a, 0x08, 0x8f, 0x88, 0x53, + 0xd7, 0x69, 0x77, 0x4c, 0x9a, 0xfd, 0x34, 0x0b, 0xa0, 0x22, 0x07, 0x7e, 0x01, 0x6c, 0x1e, 0xa9, + 0x57, 0x9d, 0x72, 0xe6, 0x34, 0x34, 0xc0, 0xcd, 0x00, 0x87, 0x59, 0xe0, 0xbc, 0x6c, 0xa0, 0x02, + 0x00, 0x9f, 0x83, 0x66, 0x22, 0x48, 0x7c, 0xc0, 0x5e, 0x72, 0xa7, 0xa9, 0x05, 0xfd, 0xd8, 0x2b, + 0x9f, 0x8f, 0x37, 0xd6, 0x5e, 0x09, 0x79, 0x6c, 0xb2, 0x8b, 0xf7, 0x29, 0xf3, 0xa0, 0x9c, 0x09, + 0x1e, 0x83, 0x3a, 0x1f, 0xfe, 0x48, 0x46, 0xd2, 0xb1, 0x35, 0xe7, 0x83, 0x6b, 0x87, 0x64, 0xb6, + 0xd6, 0x43, 0xf8, 0xe4, 0xc9, 0xcf, 0x92, 0x30, 0x35, 0x9f, 0xe0, 0xb6, 0xa1, 0xae, 0x1f, 0x6a, + 0x12, 0x64, 0xc8, 0xe0, 0x0f, 0xc0, 0xe6, 0xb3, 0x30, 0x75, 0x3a, 0xe0, 0x6d, 0x98, 0x73, 0x29, + 0x0f, 0x33, 0x1e, 0x54, 0x50, 0xc2, 0x2d, 0x50, 0x0f, 0xe3, 0x25, 0x4a, 0x98, 0xd3, 0xea, 0x5b, + 0x83, 0x66, 0x00, 0x54, 0x0f, 0xfb, 0xda, 0x83, 0x4c, 0x04, 0xbe, 0x00, 0x0d, 0x1e, 0x29, 0x31, + 0x84, 0xb3, 0xf9, 0x36, 0x1d, 0xb4, 0x4d, 0x07, 0x8d, 0xc3, 0x94, 0x05, 0x65, 0x74, 0x5b, 0x7f, + 0xd4, 0xc0, 0x9d, 0xd2, 0x85, 0x12, 0x11, 0x67, 0x82, 0xfc, 0x2f, 0x27, 0xea, 0x1e, 0x68, 0xe0, + 0xd9, 0x8c, 0x9f, 0x90, 0xf4, 0x4a, 0x35, 0x8b, 0x26, 0xf6, 0x52, 0x37, 0xca, 0xe2, 0xf0, 0x08, + 0xd4, 0x85, 0xc4, 0x32, 0x11, 0xe6, 0xe2, 0xdc, 0xbf, 0xd9, 0x7a, 0x3d, 0xd3, 0x98, 0x54, 0x30, + 0x44, 0x44, 0x32, 0x93, 0xc8, 0xf0, 0xc0, 0x1e, 0xd8, 0x88, 0xb0, 0x1c, 0x4d, 0xf4, 0x55, 0xd9, + 0x0c, 0xec, 0xf5, 0xaa, 0xb7, 0x71, 0xa4, 0x1c, 0x28, 0xf5, 0xc3, 0x5d, 0x60, 0xeb, 0x87, 0xe7, + 0xcb, 0x28, 0x5b, 0x8c, 0xae, 0x1a, 0xd1, 0x51, 0xe6, 0x3c, 0x2f, 0x1b, 0xa8, 0x48, 0x86, 0xbf, + 0x5a, 0xa0, 0x83, 0x93, 0x90, 0xca, 0x3d, 0xc6, 0xb8, 0xc4, 0xe9, 0x54, 0xea, 0xfd, 0xea, 0xa0, + 0xb5, 0xf3, 0xd8, 0xbb, 0xe6, 0x23, 0xe8, 0x5d, 0x92, 0xd8, 0xdb, 0xbb, 0x40, 0xf1, 0x84, 0xc9, + 0x78, 0x19, 0x38, 0x46, 0xa3, 0xce, 0xc5, 0x30, 0xba, 0x54, 0x13, 0x0e, 0x40, 0xf3, 0x04, 0xc7, + 0x8c, 0xb2, 0xb1, 0x70, 0x1a, 0xfd, 0xaa, 0x5a, 0x6d, 0xb5, 0x19, 0xdf, 0x19, 0x1f, 0xca, 0xa3, + 0xdd, 0x2f, 0xc1, 0x07, 0x57, 0x96, 0x83, 0x1d, 0x50, 0x9d, 0x92, 0x65, 0x3a, 0x67, 0xa4, 0x1e, + 0xe1, 0xfb, 0x60, 0x63, 0x81, 0x67, 0x09, 0xd1, 0x33, 0xb3, 0x51, 0x6a, 0x3c, 0xba, 0xb5, 0x6b, + 0x6d, 0xfd, 0x69, 0x81, 0x76, 0xe9, 0x6f, 0x2c, 0x28, 0x39, 0x81, 0x47, 0xa0, 0x61, 0xee, 0x8d, + 0xe6, 0x68, 0xed, 0xdc, 0xbb, 0x89, 0x02, 0x1a, 0x10, 0xb4, 0xd4, 0xab, 0x90, 0xdd, 0xc1, 0x8c, + 0x46, 0x9d, 0x86, 0xd8, 0x48, 0x64, 0x3e, 0x6e, 0x9f, 0xdc, 0x5c, 0xd4, 0x54, 0x80, 0xcc, 0x42, + 0x39, 0x53, 0x30, 0x38, 0x3d, 0x73, 0x2b, 0xaf, 0xce, 0xdc, 0xca, 0xeb, 0x33, 0xb7, 0xf2, 0xcb, + 0xda, 0xb5, 0x4e, 0xd7, 0xae, 0xf5, 0x6a, 0xed, 0x5a, 0xaf, 0xd7, 0xae, 0xf5, 0xd7, 0xda, 0xb5, + 0x7e, 0xfb, 0xdb, 0xad, 0x7c, 0x7f, 0x6b, 0xb1, 0xfd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x62, + 0xcb, 0x64, 0xf1, 0x09, 0x09, 0x00, 0x00, +} + +func (m *AdmissionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdmissionRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.RequestSubResource) + copy(dAtA[i:], m.RequestSubResource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequestSubResource))) + i-- + dAtA[i] = 0x7a + if m.RequestResource != nil { + { + size, err := m.RequestResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + if m.RequestKind != nil { + { + size, err := m.RequestKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + { + size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + if m.DryRun != nil { + i-- + if *m.DryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } + { + size, err := m.OldObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + { + size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + i -= len(m.Operation) + copy(dAtA[i:], m.Operation) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) + i-- + dAtA[i] = 0x3a + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x32 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x2a + i -= len(m.SubResource) + copy(dAtA[i:], m.SubResource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubResource))) + i-- + dAtA[i] = 0x22 + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Kind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AdmissionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdmissionResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Warnings) > 0 { + for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Warnings[iNdEx]) + copy(dAtA[i:], m.Warnings[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Warnings[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if len(m.AuditAnnotations) > 0 { + keysForAuditAnnotations := make([]string, 0, len(m.AuditAnnotations)) + for k := range m.AuditAnnotations { + keysForAuditAnnotations = append(keysForAuditAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) + for iNdEx := len(keysForAuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.AuditAnnotations[string(keysForAuditAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAuditAnnotations[iNdEx]) + copy(dAtA[i:], keysForAuditAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAuditAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if m.PatchType != nil { + i -= len(*m.PatchType) + copy(dAtA[i:], *m.PatchType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PatchType))) + i-- + dAtA[i] = 0x2a + } + if m.Patch != nil { + i -= len(m.Patch) + copy(dAtA[i:], m.Patch) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Patch))) + i-- + dAtA[i] = 0x22 + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i-- + if m.Allowed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AdmissionReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdmissionReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Response != nil { + { + size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Request != nil { + { + size, err := m.Request.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AdmissionRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Kind.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SubResource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operation) + n += 1 + l + sovGenerated(uint64(l)) + l = m.UserInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.OldObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.DryRun != nil { + n += 2 + } + l = m.Options.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.RequestKind != nil { + l = m.RequestKind.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RequestResource != nil { + l = m.RequestResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.RequestSubResource) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AdmissionResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Patch != nil { + l = len(m.Patch) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PatchType != nil { + l = len(*m.PatchType) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AuditAnnotations) > 0 { + for k, v := range m.AuditAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Warnings) > 0 { + for _, s := range m.Warnings { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *AdmissionReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Request != nil { + l = m.Request.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Response != nil { + l = m.Response.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AdmissionRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AdmissionRequest{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Kind:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Kind), "GroupVersionKind", "v1.GroupVersionKind", 1), `&`, ``, 1) + `,`, + `Resource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resource), "GroupVersionResource", "v1.GroupVersionResource", 1), `&`, ``, 1) + `,`, + `SubResource:` + fmt.Sprintf("%v", this.SubResource) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, + `UserInfo:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UserInfo), "UserInfo", "v11.UserInfo", 1), `&`, ``, 1) + `,`, + `Object:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Object), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `OldObject:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OldObject), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `DryRun:` + valueToStringGenerated(this.DryRun) + `,`, + `Options:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Options), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `RequestKind:` + strings.Replace(fmt.Sprintf("%v", this.RequestKind), "GroupVersionKind", "v1.GroupVersionKind", 1) + `,`, + `RequestResource:` + strings.Replace(fmt.Sprintf("%v", this.RequestResource), "GroupVersionResource", "v1.GroupVersionResource", 1) + `,`, + `RequestSubResource:` + fmt.Sprintf("%v", this.RequestSubResource) + `,`, + `}`, + }, "") + return s +} +func (this *AdmissionResponse) String() string { + if this == nil { + return "nil" + } + keysForAuditAnnotations := make([]string, 0, len(this.AuditAnnotations)) + for k := range this.AuditAnnotations { + keysForAuditAnnotations = append(keysForAuditAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) + mapStringForAuditAnnotations := "map[string]string{" + for _, k := range keysForAuditAnnotations { + mapStringForAuditAnnotations += fmt.Sprintf("%v: %v,", k, this.AuditAnnotations[k]) + } + mapStringForAuditAnnotations += "}" + s := strings.Join([]string{`&AdmissionResponse{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, + `Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "v1.Status", 1) + `,`, + `Patch:` + valueToStringGenerated(this.Patch) + `,`, + `PatchType:` + valueToStringGenerated(this.PatchType) + `,`, + `AuditAnnotations:` + mapStringForAuditAnnotations + `,`, + `Warnings:` + fmt.Sprintf("%v", this.Warnings) + `,`, + `}`, + }, "") + return s +} +func (this *AdmissionReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AdmissionReview{`, + `Request:` + strings.Replace(this.Request.String(), "AdmissionRequest", "AdmissionRequest", 1) + `,`, + `Response:` + strings.Replace(this.Response.String(), "AdmissionResponse", "AdmissionResponse", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdmissionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdmissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Kind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubResource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubResource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operation = Operation(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OldObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.OldObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.DryRun = &b + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestKind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestKind == nil { + m.RequestKind = &v1.GroupVersionKind{} + } + if err := m.RequestKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestResource == nil { + m.RequestResource = &v1.GroupVersionResource{} + } + if err := m.RequestResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestSubResource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestSubResource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdmissionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdmissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Allowed = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &v1.Status{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Patch", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Patch = append(m.Patch[:0], dAtA[iNdEx:postIndex]...) + if m.Patch == nil { + m.Patch = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PatchType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PatchType(dAtA[iNdEx:postIndex]) + m.PatchType = &s + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuditAnnotations == nil { + m.AuditAnnotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AuditAnnotations[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warnings = append(m.Warnings, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AdmissionReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdmissionReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdmissionReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Request == nil { + m.Request = &AdmissionRequest{} + } + if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Response == nil { + m.Response = &AdmissionResponse{} + } + if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/admission/v1/generated.proto b/vendor/k8s.io/api/admission/v1/generated.proto new file mode 100644 index 000000000..5fc0e342e --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/generated.proto @@ -0,0 +1,167 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.admission.v1; + +import "k8s.io/api/authentication/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// AdmissionRequest describes the admission.Attributes for the admission request. +message AdmissionRequest { + // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are + // otherwise identical (parallel requests, requests when earlier requests did not modify etc) + // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + optional string uid = 1; + + // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale) + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2; + + // Resource is the fully-qualified resource being requested (for example, v1.pods) + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3; + + // SubResource is the subresource being requested, if any (for example, "status" or "scale") + // +optional + optional string subResource = 4; + + // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). + // If this is specified and differs from the value in "kind", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for), + // and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type for more details. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind requestKind = 13; + + // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). + // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for), + // and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource requestResource = 14; + + // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale") + // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed. + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + optional string requestSubResource = 15; + + // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and + // rely on the server to generate the name. If that is the case, this field will contain an empty string. + // +optional + optional string name = 5; + + // Namespace is the namespace associated with the request (if any). + // +optional + optional string namespace = 6; + + // Operation is the operation being performed. This may be different than the operation + // requested. e.g. a patch can result in either a CREATE or UPDATE Operation. + optional string operation = 7; + + // UserInfo is information about the requesting user + optional k8s.io.api.authentication.v1.UserInfo userInfo = 8; + + // Object is the object from the incoming request. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 9; + + // OldObject is the existing object. Only populated for DELETE and UPDATE requests. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10; + + // DryRun indicates that modifications will definitely not be persisted for this request. + // Defaults to false. + // +optional + optional bool dryRun = 11; + + // Options is the operation option structure of the operation being performed. + // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be + // different than the options the caller provided. e.g. for a patch request the performed + // Operation might be a CREATE, in which case the Options will a + // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension options = 12; +} + +// AdmissionResponse describes an admission response. +message AdmissionResponse { + // UID is an identifier for the individual request/response. + // This must be copied over from the corresponding AdmissionRequest. + optional string uid = 1; + + // Allowed indicates whether or not the admission request was permitted. + optional bool allowed = 2; + + // Result contains extra details into why an admission request was denied. + // This field IS NOT consulted in any way if "Allowed" is "true". + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 3; + + // The patch body. Currently we only support "JSONPatch" which implements RFC 6902. + // +optional + optional bytes patch = 4; + + // The type of Patch. Currently we only allow "JSONPatch". + // +optional + optional string patchType = 5; + + // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). + // MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with + // admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by + // the admission webhook to add additional context to the audit log for this request. + // +optional + map auditAnnotations = 6; + + // warnings is a list of warning messages to return to the requesting API client. + // Warning messages describe a problem the client making the API request should correct or be aware of. + // Limit warnings to 120 characters if possible. + // Warnings over 256 characters and large numbers of warnings may be truncated. + // +optional + repeated string warnings = 7; +} + +// AdmissionReview describes an admission review request/response. +message AdmissionReview { + // Request describes the attributes for the admission request. + // +optional + optional AdmissionRequest request = 1; + + // Response describes the attributes for the admission response. + // +optional + optional AdmissionResponse response = 2; +} + diff --git a/vendor/k8s.io/api/admission/v1/register.go b/vendor/k8s.io/api/admission/v1/register.go new file mode 100644 index 000000000..b548509ab --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/register.go @@ -0,0 +1,51 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "admission.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &AdmissionReview{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/admission/v1/types.go b/vendor/k8s.io/api/admission/v1/types.go new file mode 100644 index 000000000..556fd1ad5 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/types.go @@ -0,0 +1,169 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + authenticationv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AdmissionReview describes an admission review request/response. +type AdmissionReview struct { + metav1.TypeMeta `json:",inline"` + // Request describes the attributes for the admission request. + // +optional + Request *AdmissionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"` + // Response describes the attributes for the admission response. + // +optional + Response *AdmissionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"` +} + +// AdmissionRequest describes the admission.Attributes for the admission request. +type AdmissionRequest struct { + // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are + // otherwise identical (parallel requests, requests when earlier requests did not modify etc) + // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"` + // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale) + Kind metav1.GroupVersionKind `json:"kind" protobuf:"bytes,2,opt,name=kind"` + // Resource is the fully-qualified resource being requested (for example, v1.pods) + Resource metav1.GroupVersionResource `json:"resource" protobuf:"bytes,3,opt,name=resource"` + // SubResource is the subresource being requested, if any (for example, "status" or "scale") + // +optional + SubResource string `json:"subResource,omitempty" protobuf:"bytes,4,opt,name=subResource"` + + // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). + // If this is specified and differs from the value in "kind", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for), + // and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type for more details. + // +optional + RequestKind *metav1.GroupVersionKind `json:"requestKind,omitempty" protobuf:"bytes,13,opt,name=requestKind"` + // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). + // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for), + // and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + RequestResource *metav1.GroupVersionResource `json:"requestResource,omitempty" protobuf:"bytes,14,opt,name=requestResource"` + // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale") + // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed. + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + RequestSubResource string `json:"requestSubResource,omitempty" protobuf:"bytes,15,opt,name=requestSubResource"` + + // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and + // rely on the server to generate the name. If that is the case, this field will contain an empty string. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,5,opt,name=name"` + // Namespace is the namespace associated with the request (if any). + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,6,opt,name=namespace"` + // Operation is the operation being performed. This may be different than the operation + // requested. e.g. a patch can result in either a CREATE or UPDATE Operation. + Operation Operation `json:"operation" protobuf:"bytes,7,opt,name=operation"` + // UserInfo is information about the requesting user + UserInfo authenticationv1.UserInfo `json:"userInfo" protobuf:"bytes,8,opt,name=userInfo"` + // Object is the object from the incoming request. + // +optional + Object runtime.RawExtension `json:"object,omitempty" protobuf:"bytes,9,opt,name=object"` + // OldObject is the existing object. Only populated for DELETE and UPDATE requests. + // +optional + OldObject runtime.RawExtension `json:"oldObject,omitempty" protobuf:"bytes,10,opt,name=oldObject"` + // DryRun indicates that modifications will definitely not be persisted for this request. + // Defaults to false. + // +optional + DryRun *bool `json:"dryRun,omitempty" protobuf:"varint,11,opt,name=dryRun"` + // Options is the operation option structure of the operation being performed. + // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be + // different than the options the caller provided. e.g. for a patch request the performed + // Operation might be a CREATE, in which case the Options will a + // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. + // +optional + Options runtime.RawExtension `json:"options,omitempty" protobuf:"bytes,12,opt,name=options"` +} + +// AdmissionResponse describes an admission response. +type AdmissionResponse struct { + // UID is an identifier for the individual request/response. + // This must be copied over from the corresponding AdmissionRequest. + UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"` + + // Allowed indicates whether or not the admission request was permitted. + Allowed bool `json:"allowed" protobuf:"varint,2,opt,name=allowed"` + + // Result contains extra details into why an admission request was denied. + // This field IS NOT consulted in any way if "Allowed" is "true". + // +optional + Result *metav1.Status `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` + + // The patch body. Currently we only support "JSONPatch" which implements RFC 6902. + // +optional + Patch []byte `json:"patch,omitempty" protobuf:"bytes,4,opt,name=patch"` + + // The type of Patch. Currently we only allow "JSONPatch". + // +optional + PatchType *PatchType `json:"patchType,omitempty" protobuf:"bytes,5,opt,name=patchType"` + + // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). + // MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with + // admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by + // the admission webhook to add additional context to the audit log for this request. + // +optional + AuditAnnotations map[string]string `json:"auditAnnotations,omitempty" protobuf:"bytes,6,opt,name=auditAnnotations"` + + // warnings is a list of warning messages to return to the requesting API client. + // Warning messages describe a problem the client making the API request should correct or be aware of. + // Limit warnings to 120 characters if possible. + // Warnings over 256 characters and large numbers of warnings may be truncated. + // +optional + Warnings []string `json:"warnings,omitempty" protobuf:"bytes,7,rep,name=warnings"` +} + +// PatchType is the type of patch being used to represent the mutated object +type PatchType string + +// PatchType constants. +const ( + PatchTypeJSONPatch PatchType = "JSONPatch" +) + +// Operation is the type of resource operation being checked for admission control +type Operation string + +// Operation constants +const ( + Create Operation = "CREATE" + Update Operation = "UPDATE" + Delete Operation = "DELETE" + Connect Operation = "CONNECT" +) diff --git a/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go new file mode 100644 index 000000000..f81594c91 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go @@ -0,0 +1,78 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AdmissionRequest = map[string]string{ + "": "AdmissionRequest describes the admission.Attributes for the admission request.", + "uid": "UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are otherwise identical (parallel requests, requests when earlier requests did not modify etc) The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.", + "kind": "Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)", + "resource": "Resource is the fully-qualified resource being requested (for example, v1.pods)", + "subResource": "SubResource is the subresource being requested, if any (for example, \"status\" or \"scale\")", + "requestKind": "RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). If this is specified and differs from the value in \"kind\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `kind: {group:\"apps\", version:\"v1\", kind:\"Deployment\"}` (matching the rule the webhook registered for), and `requestKind: {group:\"apps\", version:\"v1beta1\", kind:\"Deployment\"}` (indicating the kind of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type for more details.", + "requestResource": "RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). If this is specified and differs from the value in \"resource\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `resource: {group:\"apps\", version:\"v1\", resource:\"deployments\"}` (matching the resource the webhook registered for), and `requestResource: {group:\"apps\", version:\"v1beta1\", resource:\"deployments\"}` (indicating the resource of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type.", + "requestSubResource": "RequestSubResource is the name of the subresource of the original API request, if any (for example, \"status\" or \"scale\") If this is specified and differs from the value in \"subResource\", an equivalent match and conversion was performed. See documentation for the \"matchPolicy\" field in the webhook configuration type.", + "name": "Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and rely on the server to generate the name. If that is the case, this field will contain an empty string.", + "namespace": "Namespace is the namespace associated with the request (if any).", + "operation": "Operation is the operation being performed. This may be different than the operation requested. e.g. a patch can result in either a CREATE or UPDATE Operation.", + "userInfo": "UserInfo is information about the requesting user", + "object": "Object is the object from the incoming request.", + "oldObject": "OldObject is the existing object. Only populated for DELETE and UPDATE requests.", + "dryRun": "DryRun indicates that modifications will definitely not be persisted for this request. Defaults to false.", + "options": "Options is the operation option structure of the operation being performed. e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be different than the options the caller provided. e.g. for a patch request the performed Operation might be a CREATE, in which case the Options will a `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.", +} + +func (AdmissionRequest) SwaggerDoc() map[string]string { + return map_AdmissionRequest +} + +var map_AdmissionResponse = map[string]string{ + "": "AdmissionResponse describes an admission response.", + "uid": "UID is an identifier for the individual request/response. This must be copied over from the corresponding AdmissionRequest.", + "allowed": "Allowed indicates whether or not the admission request was permitted.", + "status": "Result contains extra details into why an admission request was denied. This field IS NOT consulted in any way if \"Allowed\" is \"true\".", + "patch": "The patch body. Currently we only support \"JSONPatch\" which implements RFC 6902.", + "patchType": "The type of Patch. Currently we only allow \"JSONPatch\".", + "auditAnnotations": "AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by the admission webhook to add additional context to the audit log for this request.", + "warnings": "warnings is a list of warning messages to return to the requesting API client. Warning messages describe a problem the client making the API request should correct or be aware of. Limit warnings to 120 characters if possible. Warnings over 256 characters and large numbers of warnings may be truncated.", +} + +func (AdmissionResponse) SwaggerDoc() map[string]string { + return map_AdmissionResponse +} + +var map_AdmissionReview = map[string]string{ + "": "AdmissionReview describes an admission review request/response.", + "request": "Request describes the attributes for the admission request.", + "response": "Response describes the attributes for the admission response.", +} + +func (AdmissionReview) SwaggerDoc() map[string]string { + return map_AdmissionReview +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..f7369471a --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go @@ -0,0 +1,141 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionRequest) DeepCopyInto(out *AdmissionRequest) { + *out = *in + out.Kind = in.Kind + out.Resource = in.Resource + if in.RequestKind != nil { + in, out := &in.RequestKind, &out.RequestKind + *out = new(metav1.GroupVersionKind) + **out = **in + } + if in.RequestResource != nil { + in, out := &in.RequestResource, &out.RequestResource + *out = new(metav1.GroupVersionResource) + **out = **in + } + in.UserInfo.DeepCopyInto(&out.UserInfo) + in.Object.DeepCopyInto(&out.Object) + in.OldObject.DeepCopyInto(&out.OldObject) + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = new(bool) + **out = **in + } + in.Options.DeepCopyInto(&out.Options) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionRequest. +func (in *AdmissionRequest) DeepCopy() *AdmissionRequest { + if in == nil { + return nil + } + out := new(AdmissionRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionResponse) DeepCopyInto(out *AdmissionResponse) { + *out = *in + if in.Result != nil { + in, out := &in.Result, &out.Result + *out = new(metav1.Status) + (*in).DeepCopyInto(*out) + } + if in.Patch != nil { + in, out := &in.Patch, &out.Patch + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.PatchType != nil { + in, out := &in.PatchType, &out.PatchType + *out = new(PatchType) + **out = **in + } + if in.AuditAnnotations != nil { + in, out := &in.AuditAnnotations, &out.AuditAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Warnings != nil { + in, out := &in.Warnings, &out.Warnings + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionResponse. +func (in *AdmissionResponse) DeepCopy() *AdmissionResponse { + if in == nil { + return nil + } + out := new(AdmissionResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionReview) DeepCopyInto(out *AdmissionReview) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = new(AdmissionRequest) + (*in).DeepCopyInto(*out) + } + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = new(AdmissionResponse) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionReview. +func (in *AdmissionReview) DeepCopy() *AdmissionReview { + if in == nil { + return nil + } + out := new(AdmissionReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AdmissionReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/api/admission/v1beta1/doc.go b/vendor/k8s.io/api/admission/v1beta1/doc.go new file mode 100644 index 000000000..a5669022a --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=false +// +k8s:prerelease-lifecycle-gen=true + +// +groupName=admission.k8s.io + +package v1beta1 // import "k8s.io/api/admission/v1beta1" diff --git a/vendor/k8s.io/api/admission/v1beta1/generated.pb.go b/vendor/k8s.io/api/admission/v1beta1/generated.pb.go new file mode 100644 index 000000000..ae82ff599 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/generated.pb.go @@ -0,0 +1,1792 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/admission/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AdmissionRequest) Reset() { *m = AdmissionRequest{} } +func (*AdmissionRequest) ProtoMessage() {} +func (*AdmissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b87c2352de86eab9, []int{0} +} +func (m *AdmissionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionRequest.Merge(m, src) +} +func (m *AdmissionRequest) XXX_Size() int { + return m.Size() +} +func (m *AdmissionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AdmissionRequest proto.InternalMessageInfo + +func (m *AdmissionResponse) Reset() { *m = AdmissionResponse{} } +func (*AdmissionResponse) ProtoMessage() {} +func (*AdmissionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b87c2352de86eab9, []int{1} +} +func (m *AdmissionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionResponse.Merge(m, src) +} +func (m *AdmissionResponse) XXX_Size() int { + return m.Size() +} +func (m *AdmissionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AdmissionResponse proto.InternalMessageInfo + +func (m *AdmissionReview) Reset() { *m = AdmissionReview{} } +func (*AdmissionReview) ProtoMessage() {} +func (*AdmissionReview) Descriptor() ([]byte, []int) { + return fileDescriptor_b87c2352de86eab9, []int{2} +} +func (m *AdmissionReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionReview.Merge(m, src) +} +func (m *AdmissionReview) XXX_Size() int { + return m.Size() +} +func (m *AdmissionReview) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionReview.DiscardUnknown(m) +} + +var xxx_messageInfo_AdmissionReview proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AdmissionRequest)(nil), "k8s.io.api.admission.v1beta1.AdmissionRequest") + proto.RegisterType((*AdmissionResponse)(nil), "k8s.io.api.admission.v1beta1.AdmissionResponse") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.admission.v1beta1.AdmissionResponse.AuditAnnotationsEntry") + proto.RegisterType((*AdmissionReview)(nil), "k8s.io.api.admission.v1beta1.AdmissionReview") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admission/v1beta1/generated.proto", fileDescriptor_b87c2352de86eab9) +} + +var fileDescriptor_b87c2352de86eab9 = []byte{ + // 925 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcb, 0x6e, 0x23, 0x45, + 0x17, 0x76, 0x8f, 0x1d, 0xdb, 0x5d, 0xce, 0x3f, 0xf6, 0xd4, 0xfc, 0x48, 0x2d, 0x0b, 0xb5, 0x4d, + 0x16, 0xc8, 0x48, 0x93, 0x6a, 0x12, 0xc1, 0x28, 0x1a, 0xb1, 0x49, 0x93, 0x08, 0x05, 0xa4, 0x49, + 0x54, 0x33, 0x86, 0x81, 0x05, 0x52, 0xd9, 0xae, 0xb1, 0x1b, 0xdb, 0x55, 0x4d, 0x57, 0xb5, 0x83, + 0x77, 0xec, 0xd9, 0xf0, 0x06, 0xbc, 0x00, 0x6f, 0xc1, 0x26, 0xcb, 0x59, 0xce, 0xca, 0x22, 0xe6, + 0x2d, 0xb2, 0x42, 0x55, 0x5d, 0x7d, 0x19, 0x27, 0x81, 0xb9, 0xb0, 0x72, 0x9f, 0xcb, 0xf7, 0x9d, + 0xe3, 0xef, 0xf4, 0x39, 0x0d, 0x8e, 0xa7, 0x07, 0x02, 0x05, 0xdc, 0x9b, 0xc6, 0x03, 0x1a, 0x31, + 0x2a, 0xa9, 0xf0, 0x16, 0x94, 0x8d, 0x78, 0xe4, 0x99, 0x00, 0x09, 0x03, 0x8f, 0x8c, 0xe6, 0x81, + 0x10, 0x01, 0x67, 0xde, 0x62, 0x6f, 0x40, 0x25, 0xd9, 0xf3, 0xc6, 0x94, 0xd1, 0x88, 0x48, 0x3a, + 0x42, 0x61, 0xc4, 0x25, 0x87, 0xef, 0x27, 0xd9, 0x88, 0x84, 0x01, 0xca, 0xb2, 0x91, 0xc9, 0x6e, + 0xef, 0x8e, 0x03, 0x39, 0x89, 0x07, 0x68, 0xc8, 0xe7, 0xde, 0x98, 0x8f, 0xb9, 0xa7, 0x41, 0x83, + 0xf8, 0xb9, 0xb6, 0xb4, 0xa1, 0x9f, 0x12, 0xb2, 0xf6, 0x83, 0x62, 0xe9, 0x58, 0x4e, 0x28, 0x93, + 0xc1, 0x90, 0xc8, 0xa4, 0xfe, 0x66, 0xe9, 0xf6, 0x27, 0x79, 0xf6, 0x9c, 0x0c, 0x27, 0x01, 0xa3, + 0xd1, 0xd2, 0x0b, 0xa7, 0x63, 0xe5, 0x10, 0xde, 0x9c, 0x4a, 0x72, 0x13, 0xca, 0xbb, 0x0d, 0x15, + 0xc5, 0x4c, 0x06, 0x73, 0x7a, 0x0d, 0xf0, 0xf0, 0xdf, 0x00, 0x62, 0x38, 0xa1, 0x73, 0xb2, 0x89, + 0xdb, 0xf9, 0xcd, 0x06, 0xad, 0xc3, 0x54, 0x11, 0x4c, 0x7f, 0x8c, 0xa9, 0x90, 0xd0, 0x07, 0xe5, + 0x38, 0x18, 0x39, 0x56, 0xd7, 0xea, 0xd9, 0xfe, 0xc7, 0x17, 0xab, 0x4e, 0x69, 0xbd, 0xea, 0x94, + 0xfb, 0x27, 0x47, 0x57, 0xab, 0xce, 0x07, 0xb7, 0x15, 0x92, 0xcb, 0x90, 0x0a, 0xd4, 0x3f, 0x39, + 0xc2, 0x0a, 0x0c, 0x9f, 0x81, 0xca, 0x34, 0x60, 0x23, 0xe7, 0x4e, 0xd7, 0xea, 0x35, 0xf6, 0x1f, + 0xa2, 0x7c, 0x02, 0x19, 0x0c, 0x85, 0xd3, 0xb1, 0x72, 0x08, 0xa4, 0x64, 0x40, 0x8b, 0x3d, 0xf4, + 0x45, 0xc4, 0xe3, 0xf0, 0x6b, 0x1a, 0xa9, 0x66, 0xbe, 0x0a, 0xd8, 0xc8, 0xdf, 0x36, 0xc5, 0x2b, + 0xca, 0xc2, 0x9a, 0x11, 0x4e, 0x40, 0x3d, 0xa2, 0x82, 0xc7, 0xd1, 0x90, 0x3a, 0x65, 0xcd, 0xfe, + 0xe8, 0xcd, 0xd9, 0xb1, 0x61, 0xf0, 0x5b, 0xa6, 0x42, 0x3d, 0xf5, 0xe0, 0x8c, 0x1d, 0x7e, 0x0a, + 0x1a, 0x22, 0x1e, 0xa4, 0x01, 0xa7, 0xa2, 0xf5, 0xb8, 0x6f, 0x00, 0x8d, 0x27, 0x79, 0x08, 0x17, + 0xf3, 0x60, 0x00, 0x1a, 0x51, 0xa2, 0xa4, 0xea, 0xda, 0xf9, 0xdf, 0x3b, 0x29, 0xd0, 0x54, 0xa5, + 0x70, 0x4e, 0x87, 0x8b, 0xdc, 0x70, 0x09, 0x9a, 0xc6, 0xcc, 0xba, 0xbc, 0xfb, 0xce, 0x92, 0xdc, + 0x5f, 0xaf, 0x3a, 0x4d, 0xfc, 0x2a, 0x2d, 0xde, 0xac, 0x03, 0xbf, 0x04, 0xd0, 0xb8, 0x0a, 0x42, + 0x38, 0x4d, 0xad, 0x51, 0xdb, 0x68, 0x04, 0xf1, 0xb5, 0x0c, 0x7c, 0x03, 0x0a, 0x76, 0x41, 0x85, + 0x91, 0x39, 0x75, 0xb6, 0x34, 0x3a, 0x1b, 0xfa, 0x63, 0x32, 0xa7, 0x58, 0x47, 0xa0, 0x07, 0x6c, + 0xf5, 0x2b, 0x42, 0x32, 0xa4, 0x4e, 0x55, 0xa7, 0xdd, 0x33, 0x69, 0xf6, 0xe3, 0x34, 0x80, 0xf3, + 0x1c, 0xf8, 0x19, 0xb0, 0x79, 0xa8, 0x5e, 0xf5, 0x80, 0x33, 0xa7, 0xa6, 0x01, 0x6e, 0x0a, 0x38, + 0x4d, 0x03, 0x57, 0x45, 0x03, 0xe7, 0x00, 0xf8, 0x14, 0xd4, 0x63, 0x41, 0xa3, 0x13, 0xf6, 0x9c, + 0x3b, 0x75, 0x2d, 0xe8, 0x87, 0xa8, 0x78, 0x43, 0x5e, 0x59, 0x7b, 0x25, 0x64, 0xdf, 0x64, 0xe7, + 0xef, 0x53, 0xea, 0xc1, 0x19, 0x13, 0xec, 0x83, 0x2a, 0x1f, 0xfc, 0x40, 0x87, 0xd2, 0xb1, 0x35, + 0xe7, 0xee, 0xad, 0x43, 0x32, 0x5b, 0x8b, 0x30, 0x39, 0x3f, 0xfe, 0x49, 0x52, 0xa6, 0xe6, 0xe3, + 0xdf, 0x35, 0xd4, 0xd5, 0x53, 0x4d, 0x82, 0x0d, 0x19, 0xfc, 0x1e, 0xd8, 0x7c, 0x36, 0x4a, 0x9c, + 0x0e, 0x78, 0x1b, 0xe6, 0x4c, 0xca, 0xd3, 0x94, 0x07, 0xe7, 0x94, 0x70, 0x07, 0x54, 0x47, 0xd1, + 0x12, 0xc7, 0xcc, 0x69, 0x74, 0xad, 0x5e, 0xdd, 0x07, 0xaa, 0x87, 0x23, 0xed, 0xc1, 0x26, 0x02, + 0x9f, 0x81, 0x1a, 0x0f, 0x95, 0x18, 0xc2, 0xd9, 0x7e, 0x9b, 0x0e, 0x9a, 0xa6, 0x83, 0xda, 0x69, + 0xc2, 0x82, 0x53, 0xba, 0x9d, 0xdf, 0x2b, 0xe0, 0x5e, 0xe1, 0x42, 0x89, 0x90, 0x33, 0x41, 0xff, + 0x93, 0x13, 0xf5, 0x11, 0xa8, 0x91, 0xd9, 0x8c, 0x9f, 0xd3, 0xe4, 0x4a, 0xd5, 0xf3, 0x26, 0x0e, + 0x13, 0x37, 0x4e, 0xe3, 0xf0, 0x0c, 0x54, 0x85, 0x24, 0x32, 0x16, 0xe6, 0xe2, 0x3c, 0x78, 0xbd, + 0xf5, 0x7a, 0xa2, 0x31, 0x89, 0x60, 0x98, 0x8a, 0x78, 0x26, 0xb1, 0xe1, 0x81, 0x1d, 0xb0, 0x15, + 0x12, 0x39, 0x9c, 0xe8, 0xab, 0xb2, 0xed, 0xdb, 0xeb, 0x55, 0x67, 0xeb, 0x4c, 0x39, 0x70, 0xe2, + 0x87, 0x07, 0xc0, 0xd6, 0x0f, 0x4f, 0x97, 0x61, 0xba, 0x18, 0x6d, 0x35, 0xa2, 0xb3, 0xd4, 0x79, + 0x55, 0x34, 0x70, 0x9e, 0x0c, 0x7f, 0xb1, 0x40, 0x8b, 0xc4, 0xa3, 0x40, 0x1e, 0x32, 0xc6, 0x25, + 0x49, 0xa6, 0x52, 0xed, 0x96, 0x7b, 0x8d, 0xfd, 0x63, 0xf4, 0x4f, 0x5f, 0x42, 0x74, 0x4d, 0x67, + 0x74, 0xb8, 0xc1, 0x73, 0xcc, 0x64, 0xb4, 0xf4, 0x1d, 0x23, 0x54, 0x6b, 0x33, 0x8c, 0xaf, 0x15, + 0x86, 0x3d, 0x50, 0x3f, 0x27, 0x11, 0x0b, 0xd8, 0x58, 0x38, 0xb5, 0x6e, 0x59, 0xed, 0xb7, 0x5a, + 0x8f, 0x6f, 0x8c, 0x0f, 0x67, 0xd1, 0xf6, 0xe7, 0xe0, 0xbd, 0x1b, 0xcb, 0xc1, 0x16, 0x28, 0x4f, + 0xe9, 0x32, 0x19, 0x36, 0x56, 0x8f, 0xf0, 0xff, 0x60, 0x6b, 0x41, 0x66, 0x31, 0xd5, 0x83, 0xb3, + 0x71, 0x62, 0x3c, 0xba, 0x73, 0x60, 0xed, 0xfc, 0x61, 0x81, 0x66, 0xe1, 0x6f, 0x2c, 0x02, 0x7a, + 0x0e, 0xfb, 0xa0, 0x66, 0x8e, 0x8e, 0xe6, 0x68, 0xec, 0xa3, 0xd7, 0x96, 0x41, 0xa3, 0xfc, 0x86, + 0x7a, 0x29, 0xd2, 0x8b, 0x98, 0x72, 0xc1, 0x6f, 0xf5, 0x87, 0x48, 0xeb, 0x64, 0x3e, 0x73, 0xde, + 0x1b, 0xca, 0x9b, 0x48, 0x91, 0x5a, 0x38, 0xa3, 0xf3, 0x77, 0x2f, 0x2e, 0xdd, 0xd2, 0x8b, 0x4b, + 0xb7, 0xf4, 0xf2, 0xd2, 0x2d, 0xfd, 0xbc, 0x76, 0xad, 0x8b, 0xb5, 0x6b, 0xbd, 0x58, 0xbb, 0xd6, + 0xcb, 0xb5, 0x6b, 0xfd, 0xb9, 0x76, 0xad, 0x5f, 0xff, 0x72, 0x4b, 0xdf, 0xd5, 0x0c, 0xf1, 0xdf, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x6c, 0x23, 0xa1, 0xd9, 0x27, 0x09, 0x00, 0x00, +} + +func (m *AdmissionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdmissionRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.RequestSubResource) + copy(dAtA[i:], m.RequestSubResource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequestSubResource))) + i-- + dAtA[i] = 0x7a + if m.RequestResource != nil { + { + size, err := m.RequestResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + if m.RequestKind != nil { + { + size, err := m.RequestKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + { + size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + if m.DryRun != nil { + i-- + if *m.DryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } + { + size, err := m.OldObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + { + size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + i -= len(m.Operation) + copy(dAtA[i:], m.Operation) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) + i-- + dAtA[i] = 0x3a + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x32 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x2a + i -= len(m.SubResource) + copy(dAtA[i:], m.SubResource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubResource))) + i-- + dAtA[i] = 0x22 + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Kind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AdmissionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdmissionResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Warnings) > 0 { + for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Warnings[iNdEx]) + copy(dAtA[i:], m.Warnings[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Warnings[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if len(m.AuditAnnotations) > 0 { + keysForAuditAnnotations := make([]string, 0, len(m.AuditAnnotations)) + for k := range m.AuditAnnotations { + keysForAuditAnnotations = append(keysForAuditAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) + for iNdEx := len(keysForAuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.AuditAnnotations[string(keysForAuditAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAuditAnnotations[iNdEx]) + copy(dAtA[i:], keysForAuditAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAuditAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if m.PatchType != nil { + i -= len(*m.PatchType) + copy(dAtA[i:], *m.PatchType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PatchType))) + i-- + dAtA[i] = 0x2a + } + if m.Patch != nil { + i -= len(m.Patch) + copy(dAtA[i:], m.Patch) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Patch))) + i-- + dAtA[i] = 0x22 + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i-- + if m.Allowed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AdmissionReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdmissionReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Response != nil { + { + size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Request != nil { + { + size, err := m.Request.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AdmissionRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Kind.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SubResource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operation) + n += 1 + l + sovGenerated(uint64(l)) + l = m.UserInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.OldObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.DryRun != nil { + n += 2 + } + l = m.Options.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.RequestKind != nil { + l = m.RequestKind.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RequestResource != nil { + l = m.RequestResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.RequestSubResource) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AdmissionResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Patch != nil { + l = len(m.Patch) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PatchType != nil { + l = len(*m.PatchType) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AuditAnnotations) > 0 { + for k, v := range m.AuditAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Warnings) > 0 { + for _, s := range m.Warnings { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *AdmissionReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Request != nil { + l = m.Request.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Response != nil { + l = m.Response.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AdmissionRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AdmissionRequest{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Kind:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Kind), "GroupVersionKind", "v1.GroupVersionKind", 1), `&`, ``, 1) + `,`, + `Resource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resource), "GroupVersionResource", "v1.GroupVersionResource", 1), `&`, ``, 1) + `,`, + `SubResource:` + fmt.Sprintf("%v", this.SubResource) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, + `UserInfo:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UserInfo), "UserInfo", "v11.UserInfo", 1), `&`, ``, 1) + `,`, + `Object:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Object), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `OldObject:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OldObject), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `DryRun:` + valueToStringGenerated(this.DryRun) + `,`, + `Options:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Options), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `RequestKind:` + strings.Replace(fmt.Sprintf("%v", this.RequestKind), "GroupVersionKind", "v1.GroupVersionKind", 1) + `,`, + `RequestResource:` + strings.Replace(fmt.Sprintf("%v", this.RequestResource), "GroupVersionResource", "v1.GroupVersionResource", 1) + `,`, + `RequestSubResource:` + fmt.Sprintf("%v", this.RequestSubResource) + `,`, + `}`, + }, "") + return s +} +func (this *AdmissionResponse) String() string { + if this == nil { + return "nil" + } + keysForAuditAnnotations := make([]string, 0, len(this.AuditAnnotations)) + for k := range this.AuditAnnotations { + keysForAuditAnnotations = append(keysForAuditAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) + mapStringForAuditAnnotations := "map[string]string{" + for _, k := range keysForAuditAnnotations { + mapStringForAuditAnnotations += fmt.Sprintf("%v: %v,", k, this.AuditAnnotations[k]) + } + mapStringForAuditAnnotations += "}" + s := strings.Join([]string{`&AdmissionResponse{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, + `Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "v1.Status", 1) + `,`, + `Patch:` + valueToStringGenerated(this.Patch) + `,`, + `PatchType:` + valueToStringGenerated(this.PatchType) + `,`, + `AuditAnnotations:` + mapStringForAuditAnnotations + `,`, + `Warnings:` + fmt.Sprintf("%v", this.Warnings) + `,`, + `}`, + }, "") + return s +} +func (this *AdmissionReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AdmissionReview{`, + `Request:` + strings.Replace(this.Request.String(), "AdmissionRequest", "AdmissionRequest", 1) + `,`, + `Response:` + strings.Replace(this.Response.String(), "AdmissionResponse", "AdmissionResponse", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdmissionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdmissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Kind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubResource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubResource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operation = Operation(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OldObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.OldObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.DryRun = &b + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestKind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestKind == nil { + m.RequestKind = &v1.GroupVersionKind{} + } + if err := m.RequestKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestResource == nil { + m.RequestResource = &v1.GroupVersionResource{} + } + if err := m.RequestResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestSubResource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestSubResource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdmissionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdmissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Allowed = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &v1.Status{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Patch", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Patch = append(m.Patch[:0], dAtA[iNdEx:postIndex]...) + if m.Patch == nil { + m.Patch = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PatchType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PatchType(dAtA[iNdEx:postIndex]) + m.PatchType = &s + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuditAnnotations == nil { + m.AuditAnnotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AuditAnnotations[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warnings = append(m.Warnings, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AdmissionReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdmissionReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdmissionReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Request == nil { + m.Request = &AdmissionRequest{} + } + if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Response == nil { + m.Response = &AdmissionResponse{} + } + if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/admission/v1beta1/generated.proto b/vendor/k8s.io/api/admission/v1beta1/generated.proto new file mode 100644 index 000000000..41a264312 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/generated.proto @@ -0,0 +1,167 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.admission.v1beta1; + +import "k8s.io/api/authentication/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// AdmissionRequest describes the admission.Attributes for the admission request. +message AdmissionRequest { + // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are + // otherwise identical (parallel requests, requests when earlier requests did not modify etc) + // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + optional string uid = 1; + + // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale) + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2; + + // Resource is the fully-qualified resource being requested (for example, v1.pods) + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3; + + // SubResource is the subresource being requested, if any (for example, "status" or "scale") + // +optional + optional string subResource = 4; + + // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). + // If this is specified and differs from the value in "kind", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for), + // and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type for more details. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind requestKind = 13; + + // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). + // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for), + // and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource requestResource = 14; + + // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale") + // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed. + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + optional string requestSubResource = 15; + + // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and + // rely on the server to generate the name. If that is the case, this field will contain an empty string. + // +optional + optional string name = 5; + + // Namespace is the namespace associated with the request (if any). + // +optional + optional string namespace = 6; + + // Operation is the operation being performed. This may be different than the operation + // requested. e.g. a patch can result in either a CREATE or UPDATE Operation. + optional string operation = 7; + + // UserInfo is information about the requesting user + optional k8s.io.api.authentication.v1.UserInfo userInfo = 8; + + // Object is the object from the incoming request. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 9; + + // OldObject is the existing object. Only populated for DELETE and UPDATE requests. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10; + + // DryRun indicates that modifications will definitely not be persisted for this request. + // Defaults to false. + // +optional + optional bool dryRun = 11; + + // Options is the operation option structure of the operation being performed. + // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be + // different than the options the caller provided. e.g. for a patch request the performed + // Operation might be a CREATE, in which case the Options will a + // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension options = 12; +} + +// AdmissionResponse describes an admission response. +message AdmissionResponse { + // UID is an identifier for the individual request/response. + // This should be copied over from the corresponding AdmissionRequest. + optional string uid = 1; + + // Allowed indicates whether or not the admission request was permitted. + optional bool allowed = 2; + + // Result contains extra details into why an admission request was denied. + // This field IS NOT consulted in any way if "Allowed" is "true". + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 3; + + // The patch body. Currently we only support "JSONPatch" which implements RFC 6902. + // +optional + optional bytes patch = 4; + + // The type of Patch. Currently we only allow "JSONPatch". + // +optional + optional string patchType = 5; + + // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). + // MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with + // admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by + // the admission webhook to add additional context to the audit log for this request. + // +optional + map auditAnnotations = 6; + + // warnings is a list of warning messages to return to the requesting API client. + // Warning messages describe a problem the client making the API request should correct or be aware of. + // Limit warnings to 120 characters if possible. + // Warnings over 256 characters and large numbers of warnings may be truncated. + // +optional + repeated string warnings = 7; +} + +// AdmissionReview describes an admission review request/response. +message AdmissionReview { + // Request describes the attributes for the admission request. + // +optional + optional AdmissionRequest request = 1; + + // Response describes the attributes for the admission response. + // +optional + optional AdmissionResponse response = 2; +} + diff --git a/vendor/k8s.io/api/settings/v1alpha1/register.go b/vendor/k8s.io/api/admission/v1beta1/register.go similarity index 90% rename from vendor/k8s.io/api/settings/v1alpha1/register.go rename to vendor/k8s.io/api/admission/v1beta1/register.go index eee278d95..78d21a0c8 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/register.go +++ b/vendor/k8s.io/api/admission/v1beta1/register.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -22,11 +22,11 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) -// GroupName is the group name use in this package -const GroupName = "settings.k8s.io" +// GroupName is the group name for this API. +const GroupName = "admission.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { @@ -44,8 +44,7 @@ var ( // Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &PodPreset{}, - &PodPresetList{}, + &AdmissionReview{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/k8s.io/api/admission/v1beta1/types.go b/vendor/k8s.io/api/admission/v1beta1/types.go new file mode 100644 index 000000000..00c619d99 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/types.go @@ -0,0 +1,174 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + authenticationv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 +// +k8s:prerelease-lifecycle-gen:deprecated=1.19 +// This API is never server served. It is used for outbound requests from apiservers. This will ensure it never gets served accidentally +// and having the generator against this group will protect future APIs which may be served. +// +k8s:prerelease-lifecycle-gen:replacement=admission.k8s.io,v1,AdmissionReview + +// AdmissionReview describes an admission review request/response. +type AdmissionReview struct { + metav1.TypeMeta `json:",inline"` + // Request describes the attributes for the admission request. + // +optional + Request *AdmissionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"` + // Response describes the attributes for the admission response. + // +optional + Response *AdmissionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"` +} + +// AdmissionRequest describes the admission.Attributes for the admission request. +type AdmissionRequest struct { + // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are + // otherwise identical (parallel requests, requests when earlier requests did not modify etc) + // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"` + // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale) + Kind metav1.GroupVersionKind `json:"kind" protobuf:"bytes,2,opt,name=kind"` + // Resource is the fully-qualified resource being requested (for example, v1.pods) + Resource metav1.GroupVersionResource `json:"resource" protobuf:"bytes,3,opt,name=resource"` + // SubResource is the subresource being requested, if any (for example, "status" or "scale") + // +optional + SubResource string `json:"subResource,omitempty" protobuf:"bytes,4,opt,name=subResource"` + + // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). + // If this is specified and differs from the value in "kind", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for), + // and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type for more details. + // +optional + RequestKind *metav1.GroupVersionKind `json:"requestKind,omitempty" protobuf:"bytes,13,opt,name=requestKind"` + // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). + // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for), + // and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + RequestResource *metav1.GroupVersionResource `json:"requestResource,omitempty" protobuf:"bytes,14,opt,name=requestResource"` + // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale") + // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed. + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + RequestSubResource string `json:"requestSubResource,omitempty" protobuf:"bytes,15,opt,name=requestSubResource"` + + // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and + // rely on the server to generate the name. If that is the case, this field will contain an empty string. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,5,opt,name=name"` + // Namespace is the namespace associated with the request (if any). + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,6,opt,name=namespace"` + // Operation is the operation being performed. This may be different than the operation + // requested. e.g. a patch can result in either a CREATE or UPDATE Operation. + Operation Operation `json:"operation" protobuf:"bytes,7,opt,name=operation"` + // UserInfo is information about the requesting user + UserInfo authenticationv1.UserInfo `json:"userInfo" protobuf:"bytes,8,opt,name=userInfo"` + // Object is the object from the incoming request. + // +optional + Object runtime.RawExtension `json:"object,omitempty" protobuf:"bytes,9,opt,name=object"` + // OldObject is the existing object. Only populated for DELETE and UPDATE requests. + // +optional + OldObject runtime.RawExtension `json:"oldObject,omitempty" protobuf:"bytes,10,opt,name=oldObject"` + // DryRun indicates that modifications will definitely not be persisted for this request. + // Defaults to false. + // +optional + DryRun *bool `json:"dryRun,omitempty" protobuf:"varint,11,opt,name=dryRun"` + // Options is the operation option structure of the operation being performed. + // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be + // different than the options the caller provided. e.g. for a patch request the performed + // Operation might be a CREATE, in which case the Options will a + // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. + // +optional + Options runtime.RawExtension `json:"options,omitempty" protobuf:"bytes,12,opt,name=options"` +} + +// AdmissionResponse describes an admission response. +type AdmissionResponse struct { + // UID is an identifier for the individual request/response. + // This should be copied over from the corresponding AdmissionRequest. + UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"` + + // Allowed indicates whether or not the admission request was permitted. + Allowed bool `json:"allowed" protobuf:"varint,2,opt,name=allowed"` + + // Result contains extra details into why an admission request was denied. + // This field IS NOT consulted in any way if "Allowed" is "true". + // +optional + Result *metav1.Status `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` + + // The patch body. Currently we only support "JSONPatch" which implements RFC 6902. + // +optional + Patch []byte `json:"patch,omitempty" protobuf:"bytes,4,opt,name=patch"` + + // The type of Patch. Currently we only allow "JSONPatch". + // +optional + PatchType *PatchType `json:"patchType,omitempty" protobuf:"bytes,5,opt,name=patchType"` + + // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). + // MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with + // admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by + // the admission webhook to add additional context to the audit log for this request. + // +optional + AuditAnnotations map[string]string `json:"auditAnnotations,omitempty" protobuf:"bytes,6,opt,name=auditAnnotations"` + + // warnings is a list of warning messages to return to the requesting API client. + // Warning messages describe a problem the client making the API request should correct or be aware of. + // Limit warnings to 120 characters if possible. + // Warnings over 256 characters and large numbers of warnings may be truncated. + // +optional + Warnings []string `json:"warnings,omitempty" protobuf:"bytes,7,rep,name=warnings"` +} + +// PatchType is the type of patch being used to represent the mutated object +type PatchType string + +// PatchType constants. +const ( + PatchTypeJSONPatch PatchType = "JSONPatch" +) + +// Operation is the type of resource operation being checked for admission control +type Operation string + +// Operation constants +const ( + Create Operation = "CREATE" + Update Operation = "UPDATE" + Delete Operation = "DELETE" + Connect Operation = "CONNECT" +) diff --git a/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000..13067ad80 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,78 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AdmissionRequest = map[string]string{ + "": "AdmissionRequest describes the admission.Attributes for the admission request.", + "uid": "UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are otherwise identical (parallel requests, requests when earlier requests did not modify etc) The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.", + "kind": "Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)", + "resource": "Resource is the fully-qualified resource being requested (for example, v1.pods)", + "subResource": "SubResource is the subresource being requested, if any (for example, \"status\" or \"scale\")", + "requestKind": "RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). If this is specified and differs from the value in \"kind\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `kind: {group:\"apps\", version:\"v1\", kind:\"Deployment\"}` (matching the rule the webhook registered for), and `requestKind: {group:\"apps\", version:\"v1beta1\", kind:\"Deployment\"}` (indicating the kind of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type for more details.", + "requestResource": "RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). If this is specified and differs from the value in \"resource\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `resource: {group:\"apps\", version:\"v1\", resource:\"deployments\"}` (matching the resource the webhook registered for), and `requestResource: {group:\"apps\", version:\"v1beta1\", resource:\"deployments\"}` (indicating the resource of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type.", + "requestSubResource": "RequestSubResource is the name of the subresource of the original API request, if any (for example, \"status\" or \"scale\") If this is specified and differs from the value in \"subResource\", an equivalent match and conversion was performed. See documentation for the \"matchPolicy\" field in the webhook configuration type.", + "name": "Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and rely on the server to generate the name. If that is the case, this field will contain an empty string.", + "namespace": "Namespace is the namespace associated with the request (if any).", + "operation": "Operation is the operation being performed. This may be different than the operation requested. e.g. a patch can result in either a CREATE or UPDATE Operation.", + "userInfo": "UserInfo is information about the requesting user", + "object": "Object is the object from the incoming request.", + "oldObject": "OldObject is the existing object. Only populated for DELETE and UPDATE requests.", + "dryRun": "DryRun indicates that modifications will definitely not be persisted for this request. Defaults to false.", + "options": "Options is the operation option structure of the operation being performed. e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be different than the options the caller provided. e.g. for a patch request the performed Operation might be a CREATE, in which case the Options will a `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.", +} + +func (AdmissionRequest) SwaggerDoc() map[string]string { + return map_AdmissionRequest +} + +var map_AdmissionResponse = map[string]string{ + "": "AdmissionResponse describes an admission response.", + "uid": "UID is an identifier for the individual request/response. This should be copied over from the corresponding AdmissionRequest.", + "allowed": "Allowed indicates whether or not the admission request was permitted.", + "status": "Result contains extra details into why an admission request was denied. This field IS NOT consulted in any way if \"Allowed\" is \"true\".", + "patch": "The patch body. Currently we only support \"JSONPatch\" which implements RFC 6902.", + "patchType": "The type of Patch. Currently we only allow \"JSONPatch\".", + "auditAnnotations": "AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by the admission webhook to add additional context to the audit log for this request.", + "warnings": "warnings is a list of warning messages to return to the requesting API client. Warning messages describe a problem the client making the API request should correct or be aware of. Limit warnings to 120 characters if possible. Warnings over 256 characters and large numbers of warnings may be truncated.", +} + +func (AdmissionResponse) SwaggerDoc() map[string]string { + return map_AdmissionResponse +} + +var map_AdmissionReview = map[string]string{ + "": "AdmissionReview describes an admission review request/response.", + "request": "Request describes the attributes for the admission request.", + "response": "Response describes the attributes for the admission response.", +} + +func (AdmissionReview) SwaggerDoc() map[string]string { + return map_AdmissionReview +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..4f3dd45be --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,141 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionRequest) DeepCopyInto(out *AdmissionRequest) { + *out = *in + out.Kind = in.Kind + out.Resource = in.Resource + if in.RequestKind != nil { + in, out := &in.RequestKind, &out.RequestKind + *out = new(v1.GroupVersionKind) + **out = **in + } + if in.RequestResource != nil { + in, out := &in.RequestResource, &out.RequestResource + *out = new(v1.GroupVersionResource) + **out = **in + } + in.UserInfo.DeepCopyInto(&out.UserInfo) + in.Object.DeepCopyInto(&out.Object) + in.OldObject.DeepCopyInto(&out.OldObject) + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = new(bool) + **out = **in + } + in.Options.DeepCopyInto(&out.Options) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionRequest. +func (in *AdmissionRequest) DeepCopy() *AdmissionRequest { + if in == nil { + return nil + } + out := new(AdmissionRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionResponse) DeepCopyInto(out *AdmissionResponse) { + *out = *in + if in.Result != nil { + in, out := &in.Result, &out.Result + *out = new(v1.Status) + (*in).DeepCopyInto(*out) + } + if in.Patch != nil { + in, out := &in.Patch, &out.Patch + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.PatchType != nil { + in, out := &in.PatchType, &out.PatchType + *out = new(PatchType) + **out = **in + } + if in.AuditAnnotations != nil { + in, out := &in.AuditAnnotations, &out.AuditAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Warnings != nil { + in, out := &in.Warnings, &out.Warnings + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionResponse. +func (in *AdmissionResponse) DeepCopy() *AdmissionResponse { + if in == nil { + return nil + } + out := new(AdmissionResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionReview) DeepCopyInto(out *AdmissionReview) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = new(AdmissionRequest) + (*in).DeepCopyInto(*out) + } + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = new(AdmissionResponse) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionReview. +func (in *AdmissionReview) DeepCopy() *AdmissionReview { + if in == nil { + return nil + } + out := new(AdmissionReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AdmissionReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..8fc1cde0a --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,49 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1beta1 + +import ( + schema "k8s.io/apimachinery/pkg/runtime/schema" +) + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *AdmissionReview) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *AdmissionReview) APILifecycleDeprecated() (major, minor int) { + return 1, 19 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *AdmissionReview) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "admission.k8s.io", Version: "v1", Kind: "AdmissionReview"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *AdmissionReview) APILifecycleRemoved() (major, minor int) { + return 1, 22 +} diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1/generated.proto index 7f9772e7d..16ab9d5d6 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.admissionregistration.v1; diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto index 70ffa9219..bdae74037 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.admissionregistration.v1beta1; diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go new file mode 100644 index 000000000..a4da95d44 --- /dev/null +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=internal.apiserver.k8s.io + +// Package v1alpha1 contains the v1alpha1 version of the API used by the +// apiservers themselves. +package v1alpha1 // import "k8s.io/api/apiserverinternal/v1alpha1" diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go new file mode 100644 index 000000000..0af1c09d1 --- /dev/null +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go @@ -0,0 +1,1718 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ServerStorageVersion) Reset() { *m = ServerStorageVersion{} } +func (*ServerStorageVersion) ProtoMessage() {} +func (*ServerStorageVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_a3903ff5e3cc7a03, []int{0} +} +func (m *ServerStorageVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServerStorageVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServerStorageVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerStorageVersion.Merge(m, src) +} +func (m *ServerStorageVersion) XXX_Size() int { + return m.Size() +} +func (m *ServerStorageVersion) XXX_DiscardUnknown() { + xxx_messageInfo_ServerStorageVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerStorageVersion proto.InternalMessageInfo + +func (m *StorageVersion) Reset() { *m = StorageVersion{} } +func (*StorageVersion) ProtoMessage() {} +func (*StorageVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_a3903ff5e3cc7a03, []int{1} +} +func (m *StorageVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageVersion.Merge(m, src) +} +func (m *StorageVersion) XXX_Size() int { + return m.Size() +} +func (m *StorageVersion) XXX_DiscardUnknown() { + xxx_messageInfo_StorageVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageVersion proto.InternalMessageInfo + +func (m *StorageVersionCondition) Reset() { *m = StorageVersionCondition{} } +func (*StorageVersionCondition) ProtoMessage() {} +func (*StorageVersionCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_a3903ff5e3cc7a03, []int{2} +} +func (m *StorageVersionCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageVersionCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageVersionCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageVersionCondition.Merge(m, src) +} +func (m *StorageVersionCondition) XXX_Size() int { + return m.Size() +} +func (m *StorageVersionCondition) XXX_DiscardUnknown() { + xxx_messageInfo_StorageVersionCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageVersionCondition proto.InternalMessageInfo + +func (m *StorageVersionList) Reset() { *m = StorageVersionList{} } +func (*StorageVersionList) ProtoMessage() {} +func (*StorageVersionList) Descriptor() ([]byte, []int) { + return fileDescriptor_a3903ff5e3cc7a03, []int{3} +} +func (m *StorageVersionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageVersionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageVersionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageVersionList.Merge(m, src) +} +func (m *StorageVersionList) XXX_Size() int { + return m.Size() +} +func (m *StorageVersionList) XXX_DiscardUnknown() { + xxx_messageInfo_StorageVersionList.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageVersionList proto.InternalMessageInfo + +func (m *StorageVersionSpec) Reset() { *m = StorageVersionSpec{} } +func (*StorageVersionSpec) ProtoMessage() {} +func (*StorageVersionSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_a3903ff5e3cc7a03, []int{4} +} +func (m *StorageVersionSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageVersionSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageVersionSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageVersionSpec.Merge(m, src) +} +func (m *StorageVersionSpec) XXX_Size() int { + return m.Size() +} +func (m *StorageVersionSpec) XXX_DiscardUnknown() { + xxx_messageInfo_StorageVersionSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageVersionSpec proto.InternalMessageInfo + +func (m *StorageVersionStatus) Reset() { *m = StorageVersionStatus{} } +func (*StorageVersionStatus) ProtoMessage() {} +func (*StorageVersionStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_a3903ff5e3cc7a03, []int{5} +} +func (m *StorageVersionStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageVersionStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageVersionStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageVersionStatus.Merge(m, src) +} +func (m *StorageVersionStatus) XXX_Size() int { + return m.Size() +} +func (m *StorageVersionStatus) XXX_DiscardUnknown() { + xxx_messageInfo_StorageVersionStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageVersionStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ServerStorageVersion)(nil), "k8s.io.api.apiserverinternal.v1alpha1.ServerStorageVersion") + proto.RegisterType((*StorageVersion)(nil), "k8s.io.api.apiserverinternal.v1alpha1.StorageVersion") + proto.RegisterType((*StorageVersionCondition)(nil), "k8s.io.api.apiserverinternal.v1alpha1.StorageVersionCondition") + proto.RegisterType((*StorageVersionList)(nil), "k8s.io.api.apiserverinternal.v1alpha1.StorageVersionList") + proto.RegisterType((*StorageVersionSpec)(nil), "k8s.io.api.apiserverinternal.v1alpha1.StorageVersionSpec") + proto.RegisterType((*StorageVersionStatus)(nil), "k8s.io.api.apiserverinternal.v1alpha1.StorageVersionStatus") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto", fileDescriptor_a3903ff5e3cc7a03) +} + +var fileDescriptor_a3903ff5e3cc7a03 = []byte{ + // 763 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x4f, 0x4f, 0x13, 0x41, + 0x14, 0xef, 0xd2, 0x52, 0x60, 0xaa, 0x54, 0x46, 0x08, 0xb5, 0x26, 0x5b, 0x6c, 0xa2, 0x41, 0x8d, + 0xbb, 0xd2, 0x88, 0x91, 0x98, 0x68, 0x58, 0x20, 0x06, 0x03, 0x62, 0x06, 0xe2, 0x01, 0x3d, 0x38, + 0xdd, 0x1d, 0xb7, 0x6b, 0xbb, 0x3b, 0x9b, 0x9d, 0x69, 0x13, 0x2e, 0xc6, 0x8f, 0xe0, 0x07, 0xf1, + 0xe8, 0x87, 0xe0, 0x64, 0xb8, 0x98, 0x90, 0x98, 0x34, 0xb2, 0x7e, 0x0b, 0x4e, 0x66, 0x66, 0x77, + 0x5b, 0xb6, 0x2d, 0xb1, 0xe1, 0xb0, 0xc9, 0xce, 0x7b, 0xef, 0xf7, 0x7b, 0x7f, 0xe6, 0x37, 0x0f, + 0xbc, 0x69, 0x3e, 0x63, 0x9a, 0x43, 0xf5, 0x66, 0xbb, 0x4e, 0x02, 0x8f, 0x70, 0xc2, 0xf4, 0x0e, + 0xf1, 0x2c, 0x1a, 0xe8, 0xb1, 0x03, 0xfb, 0x8e, 0xf8, 0x18, 0x09, 0x3a, 0x24, 0x70, 0x3c, 0x4e, + 0x02, 0x0f, 0xb7, 0xf4, 0xce, 0x0a, 0x6e, 0xf9, 0x0d, 0xbc, 0xa2, 0xdb, 0xc4, 0x23, 0x01, 0xe6, + 0xc4, 0xd2, 0xfc, 0x80, 0x72, 0x0a, 0xef, 0x46, 0x30, 0x0d, 0xfb, 0x8e, 0x36, 0x04, 0xd3, 0x12, + 0x58, 0xf9, 0x91, 0xed, 0xf0, 0x46, 0xbb, 0xae, 0x99, 0xd4, 0xd5, 0x6d, 0x6a, 0x53, 0x5d, 0xa2, + 0xeb, 0xed, 0x4f, 0xf2, 0x24, 0x0f, 0xf2, 0x2f, 0x62, 0x2d, 0x3f, 0xe9, 0x17, 0xe3, 0x62, 0xb3, + 0xe1, 0x78, 0x24, 0x38, 0xd2, 0xfd, 0xa6, 0x2d, 0x2b, 0xd3, 0x5d, 0xc2, 0xb1, 0xde, 0x19, 0xaa, + 0xa5, 0xac, 0x5f, 0x86, 0x0a, 0xda, 0x1e, 0x77, 0x5c, 0x32, 0x04, 0x78, 0xfa, 0x3f, 0x00, 0x33, + 0x1b, 0xc4, 0xc5, 0x83, 0xb8, 0xea, 0x2f, 0x05, 0xcc, 0xef, 0xcb, 0x4e, 0xf7, 0x39, 0x0d, 0xb0, + 0x4d, 0xde, 0x91, 0x80, 0x39, 0xd4, 0x83, 0xab, 0xa0, 0x80, 0x7d, 0x27, 0x72, 0x6d, 0x6f, 0x96, + 0x94, 0x25, 0x65, 0x79, 0xc6, 0xb8, 0x79, 0xdc, 0xad, 0x64, 0xc2, 0x6e, 0xa5, 0xb0, 0xfe, 0x76, + 0x3b, 0x71, 0xa1, 0x8b, 0x71, 0x70, 0x1d, 0x14, 0x89, 0x67, 0x52, 0xcb, 0xf1, 0xec, 0x98, 0xa9, + 0x34, 0x21, 0xa1, 0x8b, 0x31, 0xb4, 0xb8, 0x95, 0x76, 0xa3, 0xc1, 0x78, 0xb8, 0x01, 0xe6, 0x2c, + 0x62, 0x52, 0x0b, 0xd7, 0x5b, 0x49, 0x35, 0xac, 0x94, 0x5d, 0xca, 0x2e, 0xcf, 0x18, 0x0b, 0x61, + 0xb7, 0x32, 0xb7, 0x39, 0xe8, 0x44, 0xc3, 0xf1, 0xd5, 0x1f, 0x13, 0x60, 0x76, 0xa0, 0xa3, 0x8f, + 0x60, 0x5a, 0x8c, 0xdb, 0xc2, 0x1c, 0xcb, 0x76, 0x0a, 0xb5, 0xc7, 0x5a, 0xff, 0xca, 0x7b, 0x53, + 0xd3, 0xfc, 0xa6, 0x2d, 0xef, 0x5f, 0x13, 0xd1, 0x5a, 0x67, 0x45, 0xdb, 0xab, 0x7f, 0x26, 0x26, + 0xdf, 0x25, 0x1c, 0x1b, 0x30, 0xee, 0x02, 0xf4, 0x6d, 0xa8, 0xc7, 0x0a, 0xdf, 0x83, 0x1c, 0xf3, + 0x89, 0x29, 0x3b, 0x2e, 0xd4, 0xd6, 0xb4, 0xb1, 0x04, 0xa5, 0xa5, 0xcb, 0xdc, 0xf7, 0x89, 0x69, + 0x5c, 0x8b, 0xd3, 0xe4, 0xc4, 0x09, 0x49, 0x52, 0x68, 0x82, 0x3c, 0xe3, 0x98, 0xb7, 0xc5, 0x2c, + 0x04, 0xfd, 0xf3, 0xab, 0xd1, 0x4b, 0x0a, 0x63, 0x36, 0x4e, 0x90, 0x8f, 0xce, 0x28, 0xa6, 0xae, + 0x7e, 0xcf, 0x82, 0xc5, 0x34, 0x60, 0x83, 0x7a, 0x96, 0xc3, 0xc5, 0xfc, 0x5e, 0x82, 0x1c, 0x3f, + 0xf2, 0x49, 0x2c, 0x85, 0x87, 0x49, 0x89, 0x07, 0x47, 0x3e, 0x39, 0xef, 0x56, 0x6e, 0x5f, 0x02, + 0x13, 0x6e, 0x24, 0x81, 0x70, 0xad, 0xd7, 0x41, 0x24, 0x89, 0x3b, 0xe9, 0x22, 0xce, 0xbb, 0x95, + 0x62, 0x0f, 0x96, 0xae, 0x0b, 0xbe, 0x06, 0x90, 0xd6, 0x65, 0x87, 0xd6, 0xab, 0x48, 0xc1, 0x42, + 0x59, 0x62, 0x10, 0x59, 0xa3, 0x1c, 0xd3, 0xc0, 0xbd, 0xa1, 0x08, 0x34, 0x02, 0x05, 0x3b, 0x00, + 0xb6, 0x30, 0xe3, 0x07, 0x01, 0xf6, 0x58, 0x54, 0xa2, 0xe3, 0x92, 0x52, 0x4e, 0x0e, 0xf5, 0xc1, + 0x78, 0x8a, 0x10, 0x88, 0x7e, 0xde, 0x9d, 0x21, 0x36, 0x34, 0x22, 0x03, 0xbc, 0x07, 0xf2, 0x01, + 0xc1, 0x8c, 0x7a, 0xa5, 0x49, 0xd9, 0x7e, 0xef, 0x0e, 0x90, 0xb4, 0xa2, 0xd8, 0x0b, 0xef, 0x83, + 0x29, 0x97, 0x30, 0x86, 0x6d, 0x52, 0xca, 0xcb, 0xc0, 0x62, 0x1c, 0x38, 0xb5, 0x1b, 0x99, 0x51, + 0xe2, 0xaf, 0xfe, 0x54, 0x00, 0x4c, 0xcf, 0x7d, 0xc7, 0x61, 0x1c, 0x7e, 0x18, 0x52, 0xba, 0x36, + 0x5e, 0x5f, 0x02, 0x2d, 0x75, 0x7e, 0x23, 0x4e, 0x39, 0x9d, 0x58, 0x2e, 0xa8, 0xfc, 0x10, 0x4c, + 0x3a, 0x9c, 0xb8, 0xe2, 0x16, 0xb3, 0xcb, 0x85, 0xda, 0xea, 0x95, 0x74, 0x68, 0x5c, 0x8f, 0x33, + 0x4c, 0x6e, 0x0b, 0x2e, 0x14, 0x51, 0x56, 0xe7, 0x07, 0xfb, 0x11, 0x0f, 0xa0, 0xfa, 0x7b, 0x02, + 0xcc, 0x8f, 0x92, 0x31, 0xfc, 0x02, 0x8a, 0x2c, 0x65, 0x67, 0x25, 0x45, 0x16, 0x35, 0xf6, 0xe3, + 0x18, 0xb1, 0xfa, 0xfa, 0xab, 0x2a, 0x6d, 0x67, 0x68, 0x30, 0x19, 0xdc, 0x03, 0x0b, 0x26, 0x75, + 0x5d, 0xea, 0x6d, 0x8d, 0xdc, 0x79, 0xb7, 0xc2, 0x6e, 0x65, 0x61, 0x63, 0x54, 0x00, 0x1a, 0x8d, + 0x83, 0x01, 0x00, 0x66, 0xf2, 0x04, 0xa2, 0xa5, 0x57, 0xa8, 0xbd, 0xb8, 0xd2, 0x80, 0x7b, 0x2f, + 0xa9, 0xbf, 0xb3, 0x7a, 0x26, 0x86, 0x2e, 0x64, 0x31, 0xb4, 0xe3, 0x33, 0x35, 0x73, 0x72, 0xa6, + 0x66, 0x4e, 0xcf, 0xd4, 0xcc, 0xd7, 0x50, 0x55, 0x8e, 0x43, 0x55, 0x39, 0x09, 0x55, 0xe5, 0x34, + 0x54, 0x95, 0x3f, 0xa1, 0xaa, 0x7c, 0xfb, 0xab, 0x66, 0x0e, 0xa7, 0x93, 0x3c, 0xff, 0x02, 0x00, + 0x00, 0xff, 0xff, 0xa1, 0x5f, 0xcf, 0x37, 0x78, 0x07, 0x00, 0x00, +} + +func (m *ServerStorageVersion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServerStorageVersion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServerStorageVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DecodableVersions) > 0 { + for iNdEx := len(m.DecodableVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DecodableVersions[iNdEx]) + copy(dAtA[i:], m.DecodableVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DecodableVersions[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.EncodingVersion) + copy(dAtA[i:], m.EncodingVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EncodingVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIServerID) + copy(dAtA[i:], m.APIServerID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIServerID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StorageVersion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageVersion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StorageVersionCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageVersionCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageVersionCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x18 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StorageVersionList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageVersionList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageVersionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StorageVersionSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageVersionSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageVersionSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *StorageVersionStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageVersionStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageVersionStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.CommonEncodingVersion != nil { + i -= len(*m.CommonEncodingVersion) + copy(dAtA[i:], *m.CommonEncodingVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CommonEncodingVersion))) + i-- + dAtA[i] = 0x12 + } + if len(m.StorageVersions) > 0 { + for iNdEx := len(m.StorageVersions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.StorageVersions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ServerStorageVersion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIServerID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.EncodingVersion) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.DecodableVersions) > 0 { + for _, s := range m.DecodableVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StorageVersion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StorageVersionCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StorageVersionList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StorageVersionSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *StorageVersionStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.StorageVersions) > 0 { + for _, e := range m.StorageVersions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.CommonEncodingVersion != nil { + l = len(*m.CommonEncodingVersion) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ServerStorageVersion) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServerStorageVersion{`, + `APIServerID:` + fmt.Sprintf("%v", this.APIServerID) + `,`, + `EncodingVersion:` + fmt.Sprintf("%v", this.EncodingVersion) + `,`, + `DecodableVersions:` + fmt.Sprintf("%v", this.DecodableVersions) + `,`, + `}`, + }, "") + return s +} +func (this *StorageVersion) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageVersion{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StorageVersionSpec", "StorageVersionSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StorageVersionStatus", "StorageVersionStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StorageVersionCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageVersionCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *StorageVersionList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]StorageVersion{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StorageVersion", "StorageVersion", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&StorageVersionList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *StorageVersionSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageVersionSpec{`, + `}`, + }, "") + return s +} +func (this *StorageVersionStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForStorageVersions := "[]ServerStorageVersion{" + for _, f := range this.StorageVersions { + repeatedStringForStorageVersions += strings.Replace(strings.Replace(f.String(), "ServerStorageVersion", "ServerStorageVersion", 1), `&`, ``, 1) + "," + } + repeatedStringForStorageVersions += "}" + repeatedStringForConditions := "[]StorageVersionCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "StorageVersionCondition", "StorageVersionCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&StorageVersionStatus{`, + `StorageVersions:` + repeatedStringForStorageVersions + `,`, + `CommonEncodingVersion:` + valueToStringGenerated(this.CommonEncodingVersion) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ServerStorageVersion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServerStorageVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServerStorageVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIServerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIServerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EncodingVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EncodingVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DecodableVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DecodableVersions = append(m.DecodableVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageVersion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageVersionCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageVersionCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageVersionCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StorageVersionConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageVersionList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageVersionList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageVersionList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StorageVersion{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageVersionSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageVersionSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageVersionSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageVersionStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageVersionStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageVersionStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageVersions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageVersions = append(m.StorageVersions, ServerStorageVersion{}) + if err := m.StorageVersions[len(m.StorageVersions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonEncodingVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.CommonEncodingVersion = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, StorageVersionCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto new file mode 100644 index 000000000..539c8c9e2 --- /dev/null +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto @@ -0,0 +1,121 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.apiserverinternal.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// An API server instance reports the version it can decode and the version it +// encodes objects to when persisting objects in the backend. +message ServerStorageVersion { + // The ID of the reporting API server. + optional string apiServerID = 1; + + // The API server encodes the object to this version when persisting it in + // the backend (e.g., etcd). + optional string encodingVersion = 2; + + // The API server can decode objects encoded in these versions. + // The encodingVersion must be included in the decodableVersions. + // +listType=set + repeated string decodableVersions = 3; +} + +// Storage version of a specific resource. +message StorageVersion { + // The name is .. + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec is an empty spec. It is here to comply with Kubernetes API style. + optional StorageVersionSpec spec = 2; + + // API server instances report the version they can decode and the version they + // encode objects to when persisting objects in the backend. + optional StorageVersionStatus status = 3; +} + +// Describes the state of the storageVersion at a certain point. +message StorageVersionCondition { + // Type of the condition. + // +required + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + // +required + optional string status = 2; + + // If set, this represents the .metadata.generation that the condition was set based upon. + // +optional + optional int64 observedGeneration = 3; + + // Last time the condition transitioned from one status to another. + // +required + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // The reason for the condition's last transition. + // +required + optional string reason = 5; + + // A human readable message indicating details about the transition. + // +required + optional string message = 6; +} + +// A list of StorageVersions. +message StorageVersionList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated StorageVersion items = 2; +} + +// StorageVersionSpec is an empty spec. +message StorageVersionSpec { +} + +// API server instances report the versions they can decode and the version they +// encode objects to when persisting objects in the backend. +message StorageVersionStatus { + // The reported versions per API server instance. + // +optional + // +listType=map + // +listMapKey=apiServerID + repeated ServerStorageVersion storageVersions = 1; + + // If all API server instances agree on the same encoding storage version, + // then this field is set to that version. Otherwise this field is left empty. + // API servers should finish updating its storageVersionStatus entry before + // serving write operations, so that this field will be in sync with the reality. + // +optional + optional string commonEncodingVersion = 2; + + // The latest available observations of the storageVersion's state. + // +optional + // +listType=map + // +listMapKey=type + repeated StorageVersionCondition conditions = 3; +} + diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/register.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/register.go new file mode 100644 index 000000000..5e46e8d9a --- /dev/null +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/register.go @@ -0,0 +1,48 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "internal.apiserver.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &StorageVersion{}, + &StorageVersionList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go new file mode 100644 index 000000000..880091b6f --- /dev/null +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go @@ -0,0 +1,127 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Storage version of a specific resource. +type StorageVersion struct { + metav1.TypeMeta `json:",inline"` + // The name is .. + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec is an empty spec. It is here to comply with Kubernetes API style. + Spec StorageVersionSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // API server instances report the version they can decode and the version they + // encode objects to when persisting objects in the backend. + Status StorageVersionStatus `json:"status" protobuf:"bytes,3,opt,name=status"` +} + +// StorageVersionSpec is an empty spec. +type StorageVersionSpec struct{} + +// API server instances report the versions they can decode and the version they +// encode objects to when persisting objects in the backend. +type StorageVersionStatus struct { + // The reported versions per API server instance. + // +optional + // +listType=map + // +listMapKey=apiServerID + StorageVersions []ServerStorageVersion `json:"storageVersions,omitempty" protobuf:"bytes,1,opt,name=storageVersions"` + // If all API server instances agree on the same encoding storage version, + // then this field is set to that version. Otherwise this field is left empty. + // API servers should finish updating its storageVersionStatus entry before + // serving write operations, so that this field will be in sync with the reality. + // +optional + CommonEncodingVersion *string `json:"commonEncodingVersion,omitempty" protobuf:"bytes,2,opt,name=commonEncodingVersion"` + + // The latest available observations of the storageVersion's state. + // +optional + // +listType=map + // +listMapKey=type + Conditions []StorageVersionCondition `json:"conditions,omitempty" protobuf:"bytes,3,opt,name=conditions"` +} + +// An API server instance reports the version it can decode and the version it +// encodes objects to when persisting objects in the backend. +type ServerStorageVersion struct { + // The ID of the reporting API server. + APIServerID string `json:"apiServerID,omitempty" protobuf:"bytes,1,opt,name=apiServerID"` + + // The API server encodes the object to this version when persisting it in + // the backend (e.g., etcd). + EncodingVersion string `json:"encodingVersion,omitempty" protobuf:"bytes,2,opt,name=encodingVersion"` + + // The API server can decode objects encoded in these versions. + // The encodingVersion must be included in the decodableVersions. + // +listType=set + DecodableVersions []string `json:"decodableVersions,omitempty" protobuf:"bytes,3,opt,name=decodableVersions"` +} + +type StorageVersionConditionType string + +const ( + // Indicates that encoding storage versions reported by all servers are equal. + AllEncodingVersionsEqual StorageVersionConditionType = "AllEncodingVersionsEqual" +) + +type ConditionStatus string + +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// Describes the state of the storageVersion at a certain point. +type StorageVersionCondition struct { + // Type of the condition. + // +required + Type StorageVersionConditionType `json:"type" protobuf:"bytes,1,opt,name=type"` + // Status of the condition, one of True, False, Unknown. + // +required + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status"` + // If set, this represents the .metadata.generation that the condition was set based upon. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` + // Last time the condition transitioned from one status to another. + // +required + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +required + Reason string `json:"reason" protobuf:"bytes,5,opt,name=reason"` + // A human readable message indicating details about the transition. + // +required + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// A list of StorageVersions. +type StorageVersionList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []StorageVersion `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000..b05c28595 --- /dev/null +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,93 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ServerStorageVersion = map[string]string{ + "": "An API server instance reports the version it can decode and the version it encodes objects to when persisting objects in the backend.", + "apiServerID": "The ID of the reporting API server.", + "encodingVersion": "The API server encodes the object to this version when persisting it in the backend (e.g., etcd).", + "decodableVersions": "The API server can decode objects encoded in these versions. The encodingVersion must be included in the decodableVersions.", +} + +func (ServerStorageVersion) SwaggerDoc() map[string]string { + return map_ServerStorageVersion +} + +var map_StorageVersion = map[string]string{ + "": "\n Storage version of a specific resource.", + "metadata": "The name is ..", + "spec": "Spec is an empty spec. It is here to comply with Kubernetes API style.", + "status": "API server instances report the version they can decode and the version they encode objects to when persisting objects in the backend.", +} + +func (StorageVersion) SwaggerDoc() map[string]string { + return map_StorageVersion +} + +var map_StorageVersionCondition = map[string]string{ + "": "Describes the state of the storageVersion at a certain point.", + "type": "Type of the condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "observedGeneration": "If set, this represents the .metadata.generation that the condition was set based upon.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (StorageVersionCondition) SwaggerDoc() map[string]string { + return map_StorageVersionCondition +} + +var map_StorageVersionList = map[string]string{ + "": "A list of StorageVersions.", +} + +func (StorageVersionList) SwaggerDoc() map[string]string { + return map_StorageVersionList +} + +var map_StorageVersionSpec = map[string]string{ + "": "StorageVersionSpec is an empty spec.", +} + +func (StorageVersionSpec) SwaggerDoc() map[string]string { + return map_StorageVersionSpec +} + +var map_StorageVersionStatus = map[string]string{ + "": "API server instances report the versions they can decode and the version they encode objects to when persisting objects in the backend.", + "storageVersions": "The reported versions per API server instance.", + "commonEncodingVersion": "If all API server instances agree on the same encoding storage version, then this field is set to that version. Otherwise this field is left empty. API servers should finish updating its storageVersionStatus entry before serving write operations, so that this field will be in sync with the reality.", + "conditions": "The latest available observations of the storageVersion's state.", +} + +func (StorageVersionStatus) SwaggerDoc() map[string]string { + return map_StorageVersionStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..7e82a9070 --- /dev/null +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,175 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerStorageVersion) DeepCopyInto(out *ServerStorageVersion) { + *out = *in + if in.DecodableVersions != nil { + in, out := &in.DecodableVersions, &out.DecodableVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStorageVersion. +func (in *ServerStorageVersion) DeepCopy() *ServerStorageVersion { + if in == nil { + return nil + } + out := new(ServerStorageVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersion) DeepCopyInto(out *StorageVersion) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersion. +func (in *StorageVersion) DeepCopy() *StorageVersion { + if in == nil { + return nil + } + out := new(StorageVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageVersion) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionCondition) DeepCopyInto(out *StorageVersionCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionCondition. +func (in *StorageVersionCondition) DeepCopy() *StorageVersionCondition { + if in == nil { + return nil + } + out := new(StorageVersionCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionList) DeepCopyInto(out *StorageVersionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionList. +func (in *StorageVersionList) DeepCopy() *StorageVersionList { + if in == nil { + return nil + } + out := new(StorageVersionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageVersionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionSpec) DeepCopyInto(out *StorageVersionSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionSpec. +func (in *StorageVersionSpec) DeepCopy() *StorageVersionSpec { + if in == nil { + return nil + } + out := new(StorageVersionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionStatus) DeepCopyInto(out *StorageVersionStatus) { + *out = *in + if in.StorageVersions != nil { + in, out := &in.StorageVersions, &out.StorageVersions + *out = make([]ServerStorageVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CommonEncodingVersion != nil { + in, out := &in.CommonEncodingVersion, &out.CommonEncodingVersion + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]StorageVersionCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionStatus. +func (in *StorageVersionStatus) DeepCopy() *StorageVersionStatus { + if in == nil { + return nil + } + out := new(StorageVersionStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto index 6c5527974..3ee640462 100644 --- a/vendor/k8s.io/api/apps/v1/generated.proto +++ b/vendor/k8s.io/api/apps/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.apps.v1; diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto index 694f6570d..888f3e79e 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.apps.v1beta1; diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go index 1f4a292f5..9f822faee 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/vendor/k8s.io/api/apps/v1beta1/types.go @@ -58,7 +58,7 @@ type ScaleStatus struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.6 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=autoscaling,v1,Scale // Scale represents a scaling request for a resource. @@ -81,7 +81,7 @@ type Scale struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.5 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,StatefulSet // DEPRECATED - This group version of StatefulSet is deprecated by apps/v1beta2/StatefulSet. See the release notes for @@ -284,7 +284,7 @@ type StatefulSetCondition struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.5 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,StatefulSetList // StatefulSetList is a collection of StatefulSets. @@ -299,7 +299,7 @@ type StatefulSetList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.6 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,Deployment // DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for @@ -373,7 +373,7 @@ type DeploymentSpec struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.6 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,DeploymentRollback // DEPRECATED. @@ -534,7 +534,7 @@ type DeploymentCondition struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.6 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,DeploymentList // DeploymentList is a list of Deployments. @@ -552,7 +552,7 @@ type DeploymentList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.7 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,ControllerRevision // DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the @@ -583,7 +583,7 @@ type ControllerRevision struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.7 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,ControllerRevisionList // ControllerRevisionList is a resource containing a list of ControllerRevision objects. diff --git a/vendor/k8s.io/api/apps/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/apps/v1beta1/zz_generated.prerelease-lifecycle.go index 80a5e2f2b..f3850fc90 100644 --- a/vendor/k8s.io/api/apps/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/apps/v1beta1/zz_generated.prerelease-lifecycle.go @@ -45,7 +45,7 @@ func (in *ControllerRevision) APILifecycleReplacement() schema.GroupVersionKind // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *ControllerRevision) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -69,7 +69,7 @@ func (in *ControllerRevisionList) APILifecycleReplacement() schema.GroupVersionK // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *ControllerRevisionList) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -93,7 +93,7 @@ func (in *Deployment) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *Deployment) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -117,7 +117,7 @@ func (in *DeploymentList) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *DeploymentList) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -141,7 +141,7 @@ func (in *DeploymentRollback) APILifecycleReplacement() schema.GroupVersionKind // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *DeploymentRollback) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -165,7 +165,7 @@ func (in *Scale) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *Scale) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -189,7 +189,7 @@ func (in *StatefulSet) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *StatefulSet) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -213,5 +213,5 @@ func (in *StatefulSetList) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *StatefulSetList) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto index 17e43970f..1ea7e23a8 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.apps.v1beta2; diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go index 3d294697c..fc542ac1c 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/vendor/k8s.io/api/apps/v1beta2/types.go @@ -60,7 +60,7 @@ type ScaleStatus struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.8 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=autoscaling,v1,Scale // Scale represents a scaling request for a resource. @@ -85,7 +85,7 @@ type Scale struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.8 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,StatefulSet // DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for @@ -292,7 +292,7 @@ type StatefulSetCondition struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.8 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,StatefulSetList // StatefulSetList is a collection of StatefulSets. @@ -307,7 +307,7 @@ type StatefulSetList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.8 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,Deployment // DEPRECATED - This group version of Deployment is deprecated by apps/v1/Deployment. See the release notes for @@ -510,7 +510,7 @@ type DeploymentCondition struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.8 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,DeploymentList // DeploymentList is a list of Deployments. @@ -681,7 +681,7 @@ type DaemonSetCondition struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.8 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,DaemonSet // DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for @@ -718,7 +718,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.8 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,DaemonSetList // DaemonSetList is a collection of daemon sets. @@ -737,7 +737,7 @@ type DaemonSetList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.8 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,ReplicaSet // DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for @@ -769,7 +769,7 @@ type ReplicaSet struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.8 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,ReplicaSetList // ReplicaSetList is a collection of ReplicaSets. @@ -873,7 +873,7 @@ type ReplicaSetCondition struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.8 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,ControllerRevision // DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the @@ -904,7 +904,7 @@ type ControllerRevision struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.8 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,ControllerRevisionList // ControllerRevisionList is a resource containing a list of ControllerRevision objects. diff --git a/vendor/k8s.io/api/apps/v1beta2/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/apps/v1beta2/zz_generated.prerelease-lifecycle.go index 3a63b8f12..3368a1896 100644 --- a/vendor/k8s.io/api/apps/v1beta2/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/apps/v1beta2/zz_generated.prerelease-lifecycle.go @@ -45,7 +45,7 @@ func (in *ControllerRevision) APILifecycleReplacement() schema.GroupVersionKind // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *ControllerRevision) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -69,7 +69,7 @@ func (in *ControllerRevisionList) APILifecycleReplacement() schema.GroupVersionK // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *ControllerRevisionList) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -93,7 +93,7 @@ func (in *DaemonSet) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *DaemonSet) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -117,7 +117,7 @@ func (in *DaemonSetList) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *DaemonSetList) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -141,7 +141,7 @@ func (in *Deployment) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *Deployment) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -165,7 +165,7 @@ func (in *DeploymentList) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *DeploymentList) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -189,7 +189,7 @@ func (in *ReplicaSet) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *ReplicaSet) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -213,7 +213,7 @@ func (in *ReplicaSetList) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *ReplicaSetList) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -237,7 +237,7 @@ func (in *Scale) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *Scale) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -261,7 +261,7 @@ func (in *StatefulSet) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *StatefulSet) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -285,5 +285,5 @@ func (in *StatefulSetList) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *StatefulSetList) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } diff --git a/vendor/k8s.io/api/authentication/v1/generated.proto b/vendor/k8s.io/api/authentication/v1/generated.proto index db7be173d..2fb124364 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.authentication.v1; diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.proto b/vendor/k8s.io/api/authentication/v1beta1/generated.proto index caf2a6a53..d3bff49eb 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.authentication.v1beta1; diff --git a/vendor/k8s.io/api/authorization/v1/generated.proto b/vendor/k8s.io/api/authorization/v1/generated.proto index f68a04e49..931b0f499 100644 --- a/vendor/k8s.io/api/authorization/v1/generated.proto +++ b/vendor/k8s.io/api/authorization/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.authorization.v1; diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.proto b/vendor/k8s.io/api/authorization/v1beta1/generated.proto index 3876a3eeb..a9e221608 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.authorization.v1beta1; diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go index 1e3d89076..1de893f7e 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go @@ -47,10 +47,66 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} } +func (*ContainerResourceMetricSource) ProtoMessage() {} +func (*ContainerResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{0} +} +func (m *ContainerResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResourceMetricSource.Merge(m, src) +} +func (m *ContainerResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ContainerResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResourceMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResourceMetricSource proto.InternalMessageInfo + +func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} } +func (*ContainerResourceMetricStatus) ProtoMessage() {} +func (*ContainerResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{1} +} +func (m *ContainerResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResourceMetricStatus.Merge(m, src) +} +func (m *ContainerResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ContainerResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResourceMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResourceMetricStatus proto.InternalMessageInfo + func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{0} + return fileDescriptor_2bb1f2101a7f10e2, []int{2} } func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -78,7 +134,7 @@ var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } func (*ExternalMetricSource) ProtoMessage() {} func (*ExternalMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{1} + return fileDescriptor_2bb1f2101a7f10e2, []int{3} } func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -106,7 +162,7 @@ var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } func (*ExternalMetricStatus) ProtoMessage() {} func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{2} + return fileDescriptor_2bb1f2101a7f10e2, []int{4} } func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,7 +190,7 @@ var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } func (*HorizontalPodAutoscaler) ProtoMessage() {} func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{3} + return fileDescriptor_2bb1f2101a7f10e2, []int{5} } func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -162,7 +218,7 @@ var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{4} + return fileDescriptor_2bb1f2101a7f10e2, []int{6} } func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -190,7 +246,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{5} + return fileDescriptor_2bb1f2101a7f10e2, []int{7} } func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -218,7 +274,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{6} + return fileDescriptor_2bb1f2101a7f10e2, []int{8} } func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -246,7 +302,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{7} + return fileDescriptor_2bb1f2101a7f10e2, []int{9} } func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -274,7 +330,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo func (m *MetricSpec) Reset() { *m = MetricSpec{} } func (*MetricSpec) ProtoMessage() {} func (*MetricSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{8} + return fileDescriptor_2bb1f2101a7f10e2, []int{10} } func (m *MetricSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -302,7 +358,7 @@ var xxx_messageInfo_MetricSpec proto.InternalMessageInfo func (m *MetricStatus) Reset() { *m = MetricStatus{} } func (*MetricStatus) ProtoMessage() {} func (*MetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{9} + return fileDescriptor_2bb1f2101a7f10e2, []int{11} } func (m *MetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -330,7 +386,7 @@ var xxx_messageInfo_MetricStatus proto.InternalMessageInfo func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } func (*ObjectMetricSource) ProtoMessage() {} func (*ObjectMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{10} + return fileDescriptor_2bb1f2101a7f10e2, []int{12} } func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -358,7 +414,7 @@ var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } func (*ObjectMetricStatus) ProtoMessage() {} func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{11} + return fileDescriptor_2bb1f2101a7f10e2, []int{13} } func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -386,7 +442,7 @@ var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } func (*PodsMetricSource) ProtoMessage() {} func (*PodsMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{12} + return fileDescriptor_2bb1f2101a7f10e2, []int{14} } func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -414,7 +470,7 @@ var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } func (*PodsMetricStatus) ProtoMessage() {} func (*PodsMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{13} + return fileDescriptor_2bb1f2101a7f10e2, []int{15} } func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -442,7 +498,7 @@ var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } func (*ResourceMetricSource) ProtoMessage() {} func (*ResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{14} + return fileDescriptor_2bb1f2101a7f10e2, []int{16} } func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -470,7 +526,7 @@ var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } func (*ResourceMetricStatus) ProtoMessage() {} func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{15} + return fileDescriptor_2bb1f2101a7f10e2, []int{17} } func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -498,7 +554,7 @@ var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{16} + return fileDescriptor_2bb1f2101a7f10e2, []int{18} } func (m *Scale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -526,7 +582,7 @@ var xxx_messageInfo_Scale proto.InternalMessageInfo func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} func (*ScaleSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{17} + return fileDescriptor_2bb1f2101a7f10e2, []int{19} } func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -554,7 +610,7 @@ var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} func (*ScaleStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{18} + return fileDescriptor_2bb1f2101a7f10e2, []int{20} } func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -580,6 +636,8 @@ func (m *ScaleStatus) XXX_DiscardUnknown() { var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo func init() { + proto.RegisterType((*ContainerResourceMetricSource)(nil), "k8s.io.api.autoscaling.v1.ContainerResourceMetricSource") + proto.RegisterType((*ContainerResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ContainerResourceMetricStatus") proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v1.CrossVersionObjectReference") proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricSource") proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricStatus") @@ -606,102 +664,206 @@ func init() { } var fileDescriptor_2bb1f2101a7f10e2 = []byte{ - // 1516 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcf, 0x6f, 0x13, 0xc7, - 0x17, 0x8f, 0x7f, 0x24, 0x24, 0xe3, 0x90, 0xe4, 0x3b, 0x20, 0x08, 0xe1, 0x8b, 0x37, 0xda, 0x22, - 0x44, 0x7f, 0xb0, 0x6e, 0x52, 0x8a, 0xe8, 0x31, 0x76, 0x4b, 0x41, 0x8d, 0x21, 0x4c, 0x02, 0xa5, - 0x3f, 0xc5, 0x64, 0x3d, 0x38, 0x43, 0xbc, 0xbb, 0xd6, 0xec, 0xd8, 0x22, 0x48, 0x95, 0xda, 0x43, - 0xef, 0xbd, 0xb4, 0xea, 0xb1, 0x95, 0x7a, 0xed, 0x99, 0x73, 0x6f, 0x1c, 0x39, 0x20, 0x95, 0xd3, - 0xaa, 0x6c, 0x8f, 0xfd, 0x0f, 0x38, 0x55, 0xf3, 0xc3, 0xeb, 0x5d, 0xdb, 0xeb, 0x24, 0x26, 0x44, - 0x6d, 0x6f, 0x3b, 0x33, 0xef, 0x7d, 0xde, 0xec, 0x7b, 0x6f, 0xde, 0x2f, 0x50, 0xde, 0xbe, 0xec, - 0x5b, 0xd4, 0x2b, 0x6d, 0xb7, 0x36, 0x09, 0x73, 0x09, 0x27, 0x7e, 0xa9, 0x4d, 0xdc, 0x9a, 0xc7, - 0x4a, 0xfa, 0x00, 0x37, 0x69, 0x09, 0xb7, 0xb8, 0xe7, 0xdb, 0xb8, 0x41, 0xdd, 0x7a, 0xa9, 0xbd, - 0x54, 0xaa, 0x13, 0x97, 0x30, 0xcc, 0x49, 0xcd, 0x6a, 0x32, 0x8f, 0x7b, 0xf0, 0x94, 0x22, 0xb5, - 0x70, 0x93, 0x5a, 0x31, 0x52, 0xab, 0xbd, 0xb4, 0x70, 0xa1, 0x4e, 0xf9, 0x56, 0x6b, 0xd3, 0xb2, - 0x3d, 0xa7, 0x54, 0xf7, 0xea, 0x5e, 0x49, 0x72, 0x6c, 0xb6, 0xee, 0xc9, 0x95, 0x5c, 0xc8, 0x2f, - 0x85, 0xb4, 0x60, 0xc6, 0x84, 0xda, 0x1e, 0x23, 0x03, 0xa4, 0x2d, 0x5c, 0xec, 0xd2, 0x38, 0xd8, - 0xde, 0xa2, 0x2e, 0x61, 0x3b, 0xa5, 0xe6, 0x76, 0x5d, 0x32, 0x31, 0xe2, 0x7b, 0x2d, 0x66, 0x93, - 0x7d, 0x71, 0xf9, 0x25, 0x87, 0x70, 0x3c, 0x48, 0x56, 0x29, 0x8d, 0x8b, 0xb5, 0x5c, 0x4e, 0x9d, - 0x7e, 0x31, 0x97, 0x76, 0x63, 0xf0, 0xed, 0x2d, 0xe2, 0xe0, 0x5e, 0x3e, 0xf3, 0xfb, 0x0c, 0x38, - 0x5d, 0x61, 0x9e, 0xef, 0xdf, 0x26, 0xcc, 0xa7, 0x9e, 0x7b, 0x63, 0xf3, 0x3e, 0xb1, 0x39, 0x22, - 0xf7, 0x08, 0x23, 0xae, 0x4d, 0xe0, 0x22, 0xc8, 0x6f, 0x53, 0xb7, 0x36, 0x9f, 0x59, 0xcc, 0x9c, - 0x9f, 0x2a, 0x4f, 0x3f, 0x0e, 0x8c, 0xb1, 0x30, 0x30, 0xf2, 0x1f, 0x51, 0xb7, 0x86, 0xe4, 0x89, - 0xa0, 0x70, 0xb1, 0x43, 0xe6, 0xb3, 0x49, 0x8a, 0xeb, 0xd8, 0x21, 0x48, 0x9e, 0xc0, 0x65, 0x00, - 0x70, 0x93, 0x6a, 0x01, 0xf3, 0x39, 0x49, 0x07, 0x35, 0x1d, 0x58, 0x59, 0xbb, 0xa6, 0x4f, 0x50, - 0x8c, 0xca, 0xfc, 0x21, 0x07, 0x8e, 0x7f, 0xf0, 0x80, 0x13, 0xe6, 0xe2, 0x46, 0x95, 0x70, 0x46, - 0xed, 0x75, 0xa9, 0x5f, 0x01, 0xe6, 0xc8, 0xb5, 0x10, 0xa0, 0xaf, 0x15, 0x81, 0x55, 0xa3, 0x13, - 0x14, 0xa3, 0x82, 0x1e, 0x98, 0x51, 0xab, 0x75, 0xd2, 0x20, 0x36, 0xf7, 0x98, 0xbc, 0x6c, 0x61, - 0xf9, 0x1d, 0xab, 0xeb, 0x40, 0x91, 0xd6, 0xac, 0xe6, 0x76, 0x5d, 0x6c, 0xf8, 0x96, 0x30, 0x8e, - 0xd5, 0x5e, 0xb2, 0x56, 0xf1, 0x26, 0x69, 0x74, 0x58, 0xcb, 0x30, 0x0c, 0x8c, 0x99, 0x6a, 0x02, - 0x0e, 0xf5, 0xc0, 0x43, 0x0c, 0x0a, 0x1c, 0xb3, 0x3a, 0xe1, 0xb7, 0x71, 0xa3, 0x45, 0xe4, 0x2f, - 0x17, 0x96, 0xad, 0x61, 0xd2, 0xac, 0x8e, 0x03, 0x59, 0x37, 0x5b, 0xd8, 0xe5, 0x94, 0xef, 0x94, - 0x67, 0xc3, 0xc0, 0x28, 0x6c, 0x74, 0x61, 0x50, 0x1c, 0x13, 0xb6, 0x01, 0x54, 0xcb, 0x95, 0x36, - 0x61, 0xb8, 0x4e, 0x94, 0xa4, 0xfc, 0x48, 0x92, 0x4e, 0x84, 0x81, 0x01, 0x37, 0xfa, 0xd0, 0xd0, - 0x00, 0x09, 0xe6, 0x4f, 0xfd, 0x86, 0xe1, 0x98, 0xb7, 0xfc, 0x7f, 0x87, 0x61, 0xb6, 0xc0, 0xb4, - 0xdd, 0x62, 0x8c, 0xb8, 0x2f, 0x65, 0x99, 0xe3, 0xfa, 0xb7, 0xa6, 0x2b, 0x31, 0x2c, 0x94, 0x40, - 0x86, 0x3b, 0xe0, 0x98, 0x5e, 0x1f, 0x80, 0x81, 0x4e, 0x86, 0x81, 0x71, 0xac, 0xd2, 0x0f, 0x87, - 0x06, 0xc9, 0x30, 0x1f, 0x65, 0xc1, 0xc9, 0xab, 0x1e, 0xa3, 0x0f, 0x3d, 0x97, 0xe3, 0xc6, 0x9a, - 0x57, 0x5b, 0xd1, 0xb1, 0x91, 0x30, 0x78, 0x17, 0x4c, 0x0a, 0xed, 0xd5, 0x30, 0xc7, 0xd2, 0x46, - 0x85, 0xe5, 0xb7, 0xf7, 0xa6, 0x6b, 0x15, 0x18, 0xaa, 0x84, 0xe3, 0xae, 0x55, 0xbb, 0x7b, 0x28, - 0x42, 0x85, 0x77, 0x40, 0xde, 0x6f, 0x12, 0x5b, 0x5b, 0xf2, 0x92, 0x95, 0x1a, 0xa3, 0xad, 0x94, - 0x3b, 0xae, 0x37, 0x89, 0xdd, 0x8d, 0x23, 0x62, 0x85, 0x24, 0x22, 0xbc, 0x0b, 0x26, 0x7c, 0xe9, - 0x6b, 0xda, 0x6c, 0x97, 0x47, 0xc0, 0x96, 0xfc, 0xe5, 0x19, 0x8d, 0x3e, 0xa1, 0xd6, 0x48, 0xe3, - 0x9a, 0xdf, 0xe6, 0xc0, 0x62, 0x0a, 0x67, 0xc5, 0x73, 0x6b, 0x94, 0x53, 0xcf, 0x85, 0x57, 0x41, - 0x9e, 0xef, 0x34, 0x3b, 0x2e, 0x7e, 0xb1, 0x73, 0xd1, 0x8d, 0x9d, 0x26, 0x79, 0x11, 0x18, 0x67, - 0x77, 0xe3, 0x17, 0x74, 0x48, 0x22, 0xc0, 0xd5, 0xe8, 0x87, 0xb2, 0x09, 0x2c, 0x7d, 0xad, 0x17, - 0x81, 0x31, 0x20, 0x2f, 0x59, 0x11, 0x52, 0xf2, 0xf2, 0x22, 0x22, 0x34, 0xb0, 0xcf, 0x37, 0x18, - 0x76, 0x7d, 0x25, 0x89, 0x3a, 0x1d, 0x0f, 0x7f, 0x63, 0x6f, 0x46, 0x16, 0x1c, 0xe5, 0x05, 0x7d, - 0x0b, 0xb8, 0xda, 0x87, 0x86, 0x06, 0x48, 0x80, 0xe7, 0xc0, 0x04, 0x23, 0xd8, 0xf7, 0x5c, 0xe9, - 0xdc, 0x53, 0x5d, 0xe5, 0x22, 0xb9, 0x8b, 0xf4, 0x29, 0x7c, 0x1d, 0x1c, 0x71, 0x88, 0xef, 0xe3, - 0x3a, 0x99, 0x1f, 0x97, 0x84, 0xb3, 0x9a, 0xf0, 0x48, 0x55, 0x6d, 0xa3, 0xce, 0xb9, 0xf9, 0x34, - 0x03, 0x4e, 0xa7, 0xe8, 0x71, 0x95, 0xfa, 0x1c, 0x7e, 0xde, 0xe7, 0xc5, 0xd6, 0x1e, 0x23, 0x06, - 0xf5, 0x95, 0x0f, 0xcf, 0x69, 0xd9, 0x93, 0x9d, 0x9d, 0x98, 0x07, 0x7f, 0x0c, 0xc6, 0x29, 0x27, - 0x8e, 0xb0, 0x4a, 0xee, 0x7c, 0x61, 0x79, 0x79, 0xff, 0x6e, 0x56, 0x3e, 0xaa, 0xe1, 0xc7, 0xaf, - 0x09, 0x20, 0xa4, 0xf0, 0xcc, 0xbf, 0xb2, 0xa9, 0xbf, 0x25, 0xdc, 0x1c, 0xb6, 0xc1, 0x8c, 0x5c, - 0xa9, 0x50, 0x8c, 0xc8, 0x3d, 0xfd, 0x73, 0xc3, 0x1e, 0xd1, 0x90, 0xe4, 0x5d, 0x3e, 0xa1, 0x6f, - 0x31, 0xb3, 0x9e, 0x40, 0x45, 0x3d, 0x52, 0xe0, 0x12, 0x28, 0x38, 0xd4, 0x45, 0xa4, 0xd9, 0xa0, - 0x36, 0x56, 0xce, 0x38, 0xae, 0xd2, 0x4f, 0xb5, 0xbb, 0x8d, 0xe2, 0x34, 0xf0, 0x5d, 0x50, 0x70, - 0xf0, 0x83, 0x88, 0x25, 0x27, 0x59, 0x8e, 0x69, 0x79, 0x85, 0x6a, 0xf7, 0x08, 0xc5, 0xe9, 0xe0, - 0x7d, 0x50, 0x54, 0x39, 0xa5, 0xb2, 0x76, 0xeb, 0x16, 0xa7, 0x0d, 0xfa, 0x10, 0x0b, 0x3f, 0x5a, - 0x23, 0xcc, 0x26, 0x2e, 0x17, 0xae, 0x91, 0x97, 0x48, 0x66, 0x18, 0x18, 0xc5, 0x8d, 0xa1, 0x94, - 0x68, 0x17, 0x24, 0xf3, 0xb7, 0x1c, 0x38, 0x33, 0x34, 0x0c, 0xc0, 0x2b, 0x00, 0x7a, 0x9b, 0x3e, - 0x61, 0x6d, 0x52, 0xfb, 0x50, 0xd5, 0x45, 0xa2, 0x40, 0x11, 0x3a, 0xcf, 0xa9, 0x9c, 0x78, 0xa3, - 0xef, 0x14, 0x0d, 0xe0, 0x80, 0x36, 0x38, 0x2a, 0xde, 0x85, 0xd2, 0x32, 0xd5, 0xb5, 0xd0, 0xfe, - 0x1e, 0xdd, 0xff, 0xc2, 0xc0, 0x38, 0xba, 0x1a, 0x07, 0x41, 0x49, 0x4c, 0xb8, 0x02, 0x66, 0x75, - 0xb0, 0xef, 0xd1, 0xfa, 0x49, 0xad, 0xf5, 0xd9, 0x4a, 0xf2, 0x18, 0xf5, 0xd2, 0x0b, 0x88, 0x1a, - 0xf1, 0x29, 0x23, 0xb5, 0x08, 0x22, 0x9f, 0x84, 0x78, 0x3f, 0x79, 0x8c, 0x7a, 0xe9, 0xa1, 0x03, - 0x0c, 0x8d, 0x9a, 0x6a, 0xc1, 0x71, 0x09, 0xf9, 0x5a, 0x18, 0x18, 0x46, 0x65, 0x38, 0x29, 0xda, - 0x0d, 0x4b, 0x94, 0x81, 0xba, 0x76, 0x90, 0x0f, 0xe4, 0x62, 0x22, 0xf4, 0x2e, 0xf6, 0x84, 0xde, - 0xb9, 0x78, 0xa1, 0x18, 0x0b, 0xb3, 0x37, 0xc1, 0x84, 0x27, 0x5f, 0x86, 0xb6, 0xcb, 0x85, 0x21, - 0xcf, 0x29, 0x4a, 0x69, 0x11, 0x50, 0x19, 0x88, 0x58, 0xa6, 0x9f, 0x96, 0x06, 0x82, 0xd7, 0x40, - 0xbe, 0xe9, 0xd5, 0x3a, 0x89, 0xe8, 0xcd, 0x21, 0x80, 0x6b, 0x5e, 0xcd, 0x4f, 0xc0, 0x4d, 0x8a, - 0x1b, 0x8b, 0x5d, 0x24, 0x21, 0xe0, 0x27, 0x60, 0xb2, 0x93, 0xf0, 0x75, 0x75, 0x50, 0x1a, 0x02, - 0x87, 0x34, 0x69, 0x02, 0x72, 0x5a, 0x04, 0xb2, 0xce, 0x09, 0x8a, 0xe0, 0x04, 0x34, 0xd1, 0xa5, - 0x9a, 0xb4, 0xca, 0x70, 0xe8, 0x41, 0xe5, 0xb6, 0x82, 0xee, 0x9c, 0xa0, 0x08, 0xce, 0xfc, 0x31, - 0x07, 0xa6, 0x13, 0xe5, 0xdf, 0x21, 0x9b, 0x46, 0xe5, 0xf1, 0x03, 0x33, 0x8d, 0x82, 0x3b, 0x50, - 0xd3, 0x28, 0xc8, 0x57, 0x62, 0x9a, 0x18, 0xf4, 0x00, 0xd3, 0x3c, 0xcd, 0x01, 0xd8, 0xef, 0xc6, - 0xf0, 0x4b, 0x30, 0xa1, 0x02, 0xe6, 0x4b, 0x26, 0x95, 0x28, 0xbd, 0xeb, 0xfc, 0xa1, 0x51, 0x7b, - 0xea, 0xff, 0xec, 0x9e, 0xea, 0x7f, 0x72, 0x10, 0x7d, 0x52, 0x94, 0x75, 0x52, 0x7b, 0xa5, 0x2f, - 0xc0, 0xa4, 0xdf, 0x69, 0x30, 0xf2, 0xa3, 0x37, 0x18, 0x52, 0xe1, 0x51, 0x6b, 0x11, 0x41, 0xc2, - 0x1a, 0x98, 0xc6, 0xf1, 0x1a, 0x7f, 0x7c, 0xa4, 0xdf, 0x98, 0x13, 0x0d, 0x45, 0xa2, 0xb8, 0x4f, - 0xa0, 0x9a, 0xbf, 0xf7, 0x9a, 0x55, 0xbd, 0xbb, 0x7f, 0xa2, 0x59, 0x0f, 0xaf, 0xcb, 0xfa, 0x4f, - 0x58, 0xf6, 0xe7, 0x2c, 0x98, 0xeb, 0x4d, 0x13, 0x23, 0xb5, 0xd3, 0x0f, 0x07, 0xce, 0x04, 0xb2, - 0x23, 0x5d, 0x3a, 0xea, 0x02, 0xf6, 0x36, 0x17, 0x48, 0x58, 0x22, 0x77, 0xe0, 0x96, 0x30, 0x7f, - 0x49, 0xea, 0x68, 0xf4, 0x91, 0xc3, 0x57, 0x83, 0xfb, 0xf2, 0xd1, 0x94, 0x74, 0x5a, 0x0b, 0xdb, - 0x73, 0x6f, 0xfe, 0xaa, 0xd5, 0xf4, 0x6b, 0x16, 0x1c, 0x1f, 0x54, 0x22, 0xc0, 0x8a, 0x9e, 0xd2, - 0x29, 0x25, 0x95, 0xe2, 0x53, 0xba, 0x17, 0x81, 0x61, 0x0c, 0x68, 0x33, 0x3b, 0x30, 0xb1, 0x41, - 0xde, 0x1d, 0x30, 0x9f, 0xb0, 0x7c, 0xac, 0x66, 0xd3, 0x4d, 0xc3, 0xff, 0xc3, 0xc0, 0x98, 0xdf, - 0x48, 0xa1, 0x41, 0xa9, 0xdc, 0x29, 0xd3, 0xac, 0xdc, 0x2b, 0x9f, 0x66, 0x3d, 0xea, 0xd7, 0x97, - 0x72, 0xad, 0x03, 0xd1, 0xd7, 0x67, 0xe0, 0x54, 0xd2, 0x07, 0xfa, 0x15, 0x76, 0x26, 0x0c, 0x8c, - 0x53, 0x95, 0x34, 0x22, 0x94, 0xce, 0x9f, 0xe6, 0xc8, 0xb9, 0xc3, 0x71, 0x64, 0xf3, 0x9b, 0x2c, - 0x18, 0x97, 0xcd, 0xc9, 0x21, 0x8c, 0x94, 0xae, 0x24, 0x46, 0x4a, 0x67, 0x87, 0x64, 0x38, 0x79, - 0xa3, 0xd4, 0x01, 0xd2, 0xf5, 0x9e, 0x01, 0xd2, 0xb9, 0x5d, 0x91, 0x86, 0x8f, 0x8b, 0xde, 0x03, - 0x53, 0x91, 0x40, 0xf8, 0x96, 0x28, 0x16, 0x75, 0x57, 0x95, 0x91, 0xb6, 0x8d, 0x66, 0x0c, 0x51, - 0x3b, 0x15, 0x51, 0x98, 0x14, 0x14, 0x62, 0x12, 0xf6, 0xc7, 0x2c, 0xa8, 0xfd, 0xf8, 0xc0, 0x74, - 0xaa, 0x4b, 0xdd, 0x1f, 0x13, 0xca, 0xe7, 0x1f, 0x3f, 0x2f, 0x8e, 0x3d, 0x79, 0x5e, 0x1c, 0x7b, - 0xf6, 0xbc, 0x38, 0xf6, 0x75, 0x58, 0xcc, 0x3c, 0x0e, 0x8b, 0x99, 0x27, 0x61, 0x31, 0xf3, 0x2c, - 0x2c, 0x66, 0xfe, 0x08, 0x8b, 0x99, 0xef, 0xfe, 0x2c, 0x8e, 0x7d, 0x9a, 0x6d, 0x2f, 0xfd, 0x1d, - 0x00, 0x00, 0xff, 0xff, 0x3c, 0x26, 0x41, 0xcb, 0x94, 0x19, 0x00, 0x00, + // 1605 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4d, 0x70, 0xd3, 0xd6, + 0x16, 0x8e, 0x7f, 0x12, 0x92, 0xe3, 0x90, 0x9f, 0x0b, 0x0f, 0x4c, 0x78, 0x58, 0x19, 0x3d, 0x86, + 0xc9, 0x7b, 0xaf, 0x48, 0x8d, 0x4b, 0x19, 0xba, 0x8c, 0xdc, 0x52, 0x98, 0xc6, 0x10, 0x6e, 0x02, + 0xa5, 0xbf, 0xc3, 0x8d, 0x7c, 0x71, 0x44, 0x2c, 0xc9, 0x23, 0xc9, 0x1e, 0xc2, 0x0c, 0x33, 0xed, + 0xa2, 0xfb, 0x6e, 0x68, 0xb7, 0xed, 0x4c, 0xb7, 0x5d, 0xb3, 0xee, 0x8e, 0x25, 0x0b, 0x66, 0xca, + 0xca, 0x53, 0xd4, 0x45, 0x17, 0x5d, 0x75, 0xcb, 0xaa, 0xa3, 0xab, 0x2b, 0x59, 0xb2, 0x2d, 0xc5, + 0x71, 0x42, 0xa6, 0xed, 0xb0, 0xb3, 0x7c, 0xcf, 0xf9, 0xce, 0xbd, 0xe7, 0xff, 0x1c, 0x50, 0xb6, + 0x2f, 0xd9, 0x92, 0x66, 0xca, 0xdb, 0xad, 0x4d, 0x6a, 0x19, 0xd4, 0xa1, 0xb6, 0xdc, 0xa6, 0x46, + 0xcd, 0xb4, 0x64, 0x7e, 0x40, 0x9a, 0x9a, 0x4c, 0x5a, 0x8e, 0x69, 0xab, 0xa4, 0xa1, 0x19, 0x75, + 0xb9, 0xbd, 0x2c, 0xd7, 0xa9, 0x41, 0x2d, 0xe2, 0xd0, 0x9a, 0xd4, 0xb4, 0x4c, 0xc7, 0x44, 0xa7, + 0x7c, 0x52, 0x89, 0x34, 0x35, 0x29, 0x42, 0x2a, 0xb5, 0x97, 0x17, 0xce, 0xd7, 0x35, 0x67, 0xab, + 0xb5, 0x29, 0xa9, 0xa6, 0x2e, 0xd7, 0xcd, 0xba, 0x29, 0x33, 0x8e, 0xcd, 0xd6, 0x5d, 0xf6, 0xc5, + 0x3e, 0xd8, 0x2f, 0x1f, 0x69, 0x41, 0x8c, 0x08, 0x55, 0x4d, 0x8b, 0x0e, 0x90, 0xb6, 0x70, 0xa1, + 0x4b, 0xa3, 0x13, 0x75, 0x4b, 0x33, 0xa8, 0xb5, 0x23, 0x37, 0xb7, 0xeb, 0x8c, 0xc9, 0xa2, 0xb6, + 0xd9, 0xb2, 0x54, 0xba, 0x27, 0x2e, 0x5b, 0xd6, 0xa9, 0x43, 0x06, 0xc9, 0x92, 0x93, 0xb8, 0xac, + 0x96, 0xe1, 0x68, 0x7a, 0xbf, 0x98, 0x8b, 0xbb, 0x31, 0xd8, 0xea, 0x16, 0xd5, 0x49, 0x2f, 0x9f, + 0xf8, 0x5b, 0x16, 0xce, 0x54, 0x4c, 0xc3, 0x21, 0x1e, 0x07, 0xe6, 0x8f, 0xa8, 0x52, 0xc7, 0xd2, + 0xd4, 0x75, 0xf6, 0x1b, 0x55, 0x20, 0x6f, 0x10, 0x9d, 0x16, 0x33, 0x8b, 0x99, 0xa5, 0x29, 0x45, + 0x7e, 0xd2, 0x11, 0xc6, 0xdc, 0x8e, 0x90, 0xbf, 0x46, 0x74, 0xfa, 0xb2, 0x23, 0x08, 0xfd, 0x8a, + 0x93, 0x02, 0x18, 0x8f, 0x04, 0x33, 0x66, 0x74, 0x1b, 0x8a, 0x0e, 0xb1, 0xea, 0xd4, 0x59, 0x69, + 0x53, 0x8b, 0xd4, 0xe9, 0x4d, 0x47, 0x6b, 0x68, 0x0f, 0x88, 0xa3, 0x99, 0x46, 0x31, 0xbb, 0x98, + 0x59, 0x1a, 0x57, 0xfe, 0xed, 0x76, 0x84, 0xe2, 0x46, 0x02, 0x0d, 0x4e, 0xe4, 0x46, 0x6d, 0x40, + 0xb1, 0xb3, 0x5b, 0xa4, 0xd1, 0xa2, 0xc5, 0xdc, 0x62, 0x66, 0xa9, 0x50, 0x96, 0xa4, 0xae, 0x83, + 0x84, 0x5a, 0x91, 0x9a, 0xdb, 0x75, 0xe6, 0x31, 0x81, 0xc9, 0xa4, 0x1b, 0x2d, 0x62, 0x38, 0x9a, + 0xb3, 0xa3, 0x9c, 0x70, 0x3b, 0x02, 0xda, 0xe8, 0x43, 0xc3, 0x03, 0x24, 0x20, 0x19, 0xa6, 0xd4, + 0x40, 0x6f, 0xc5, 0x71, 0xa6, 0x9b, 0x79, 0xae, 0x9b, 0xa9, 0xae, 0x42, 0xbb, 0x34, 0xe2, 0x1f, + 0x29, 0x9a, 0x76, 0x88, 0xd3, 0xb2, 0x0f, 0x46, 0xd3, 0x9f, 0xc0, 0x29, 0xb5, 0x65, 0x59, 0xd4, + 0x48, 0x56, 0xf5, 0x19, 0xb7, 0x23, 0x9c, 0xaa, 0x24, 0x11, 0xe1, 0x64, 0x7e, 0xf4, 0x10, 0x8e, + 0xc5, 0x0f, 0xf7, 0xa3, 0xed, 0xd3, 0xfc, 0x81, 0xc7, 0x2a, 0xfd, 0x90, 0x78, 0x90, 0x9c, 0xb8, + 0xce, 0xf3, 0x43, 0xe8, 0xfc, 0x51, 0x06, 0x4e, 0x57, 0x2c, 0xd3, 0xb6, 0x6f, 0x51, 0xcb, 0xd6, + 0x4c, 0xe3, 0xfa, 0xe6, 0x3d, 0xaa, 0x3a, 0x98, 0xde, 0xa5, 0x16, 0x35, 0x54, 0x8a, 0x16, 0x21, + 0xbf, 0xad, 0x19, 0x35, 0xae, 0xf1, 0xe9, 0x40, 0xe3, 0x1f, 0x68, 0x46, 0x0d, 0xb3, 0x13, 0x8f, + 0x82, 0xd9, 0x24, 0x1b, 0xa7, 0x88, 0x28, 0xbc, 0x0c, 0x40, 0x9a, 0x1a, 0x17, 0xc0, 0x54, 0x31, + 0xa5, 0x20, 0x4e, 0x07, 0x2b, 0x6b, 0x57, 0xf9, 0x09, 0x8e, 0x50, 0x89, 0xdf, 0xe4, 0xe0, 0xf8, + 0x7b, 0xf7, 0x1d, 0x6a, 0x19, 0xa4, 0x11, 0x0b, 0xb6, 0x32, 0x80, 0xce, 0xbe, 0xaf, 0x75, 0x1d, + 0x21, 0x04, 0xab, 0x86, 0x27, 0x38, 0x42, 0x85, 0x4c, 0x98, 0xf1, 0xbf, 0xd6, 0x69, 0x83, 0xaa, + 0x8e, 0x69, 0xb1, 0xcb, 0x16, 0xca, 0x6f, 0xa5, 0xd9, 0xc3, 0x96, 0xbc, 0xd4, 0x23, 0xb5, 0x97, + 0xa5, 0x55, 0xb2, 0x49, 0x1b, 0x01, 0xab, 0x82, 0xdc, 0x8e, 0x30, 0x53, 0x8d, 0xc1, 0xe1, 0x1e, + 0x78, 0x44, 0xa0, 0xe0, 0x07, 0xc4, 0x7e, 0xac, 0x3f, 0xeb, 0x76, 0x84, 0xc2, 0x46, 0x17, 0x06, + 0x47, 0x31, 0x13, 0xa2, 0x3a, 0xff, 0xaa, 0xa3, 0x5a, 0xfc, 0xae, 0xdf, 0x30, 0x7e, 0x6c, 0xfe, + 0x2d, 0x0c, 0xb3, 0x05, 0xd3, 0x3c, 0x6c, 0xf6, 0x63, 0x99, 0xe3, 0xfc, 0x59, 0xd3, 0x95, 0x08, + 0x16, 0x8e, 0x21, 0xa3, 0x9d, 0xc1, 0x89, 0x60, 0x34, 0x03, 0x9d, 0xdc, 0x4b, 0x12, 0x10, 0x1f, + 0x67, 0xe1, 0xe4, 0x15, 0xd3, 0xd2, 0x1e, 0x78, 0x51, 0xde, 0x58, 0x33, 0x6b, 0x2b, 0xbc, 0xf2, + 0x53, 0x0b, 0xdd, 0x81, 0x49, 0x4f, 0x7b, 0x35, 0xe2, 0x10, 0x66, 0xa3, 0x42, 0xf9, 0xcd, 0xe1, + 0x74, 0xed, 0x27, 0x86, 0x2a, 0x75, 0x48, 0xd7, 0xaa, 0xdd, 0xff, 0x70, 0x88, 0x8a, 0x6e, 0x43, + 0xde, 0x6e, 0x52, 0x95, 0x5b, 0xf2, 0xa2, 0x94, 0xd8, 0x81, 0x48, 0x09, 0x77, 0x5c, 0x6f, 0x52, + 0xb5, 0x9b, 0x47, 0xbc, 0x2f, 0xcc, 0x10, 0xd1, 0x1d, 0x98, 0xb0, 0x99, 0xaf, 0x71, 0xb3, 0x5d, + 0x1a, 0x01, 0x9b, 0xf1, 0x2b, 0x33, 0x1c, 0x7d, 0xc2, 0xff, 0xc6, 0x1c, 0x57, 0xfc, 0x2a, 0x07, + 0x8b, 0x09, 0x9c, 0x15, 0xd3, 0xa8, 0x69, 0x2c, 0xc5, 0x5f, 0x81, 0xbc, 0xb3, 0xd3, 0x0c, 0x5c, + 0xfc, 0x42, 0x70, 0xd1, 0x8d, 0x9d, 0xa6, 0x57, 0x84, 0xce, 0xee, 0xc6, 0xef, 0xd1, 0x61, 0x86, + 0x80, 0x56, 0xc3, 0x07, 0x65, 0x63, 0x58, 0xfc, 0x5a, 0x2f, 0x3b, 0xc2, 0x80, 0xae, 0x4b, 0x0a, + 0x91, 0xe2, 0x97, 0xf7, 0x32, 0x42, 0x83, 0xd8, 0xce, 0x86, 0x45, 0x0c, 0xdb, 0x97, 0xa4, 0xe9, + 0x81, 0x87, 0xff, 0x6f, 0x38, 0x23, 0x7b, 0x1c, 0xca, 0x02, 0xbf, 0x05, 0x5a, 0xed, 0x43, 0xc3, + 0x03, 0x24, 0xa0, 0x73, 0x30, 0x61, 0x51, 0x62, 0x9b, 0x06, 0x2f, 0x38, 0xa1, 0x72, 0x31, 0xfb, + 0x17, 0xf3, 0x53, 0xf4, 0x5f, 0x38, 0xa2, 0x53, 0xdb, 0x26, 0x75, 0xca, 0xbb, 0x81, 0x59, 0x4e, + 0x78, 0xa4, 0xea, 0xff, 0x8d, 0x83, 0x73, 0xf1, 0x59, 0x06, 0x4e, 0x27, 0xe8, 0x71, 0x55, 0xb3, + 0x1d, 0xf4, 0x69, 0x9f, 0x17, 0x4b, 0x43, 0x66, 0x0c, 0xcd, 0xf6, 0x7d, 0x78, 0x8e, 0xcb, 0x9e, + 0x0c, 0xfe, 0x89, 0x78, 0xf0, 0x87, 0x30, 0xae, 0x39, 0x54, 0xf7, 0xac, 0x92, 0x5b, 0x2a, 0x94, + 0xcb, 0x7b, 0x77, 0x33, 0xe5, 0x28, 0x87, 0x1f, 0xbf, 0xea, 0x01, 0x61, 0x1f, 0x4f, 0xfc, 0x3d, + 0x9b, 0xf8, 0x2c, 0xcf, 0xcd, 0x51, 0x1b, 0x66, 0xd8, 0x97, 0x9f, 0x8a, 0x31, 0xbd, 0xcb, 0x1f, + 0x97, 0x16, 0x44, 0x29, 0xc5, 0x5b, 0x39, 0xc1, 0x6f, 0x31, 0xb3, 0x1e, 0x43, 0xc5, 0x3d, 0x52, + 0xd0, 0x32, 0x14, 0x74, 0xcd, 0xc0, 0xb4, 0xd9, 0xd0, 0x54, 0x62, 0xf3, 0x1e, 0x88, 0x95, 0x9f, + 0x6a, 0xf7, 0x6f, 0x1c, 0xa5, 0x41, 0x6f, 0x43, 0x41, 0x27, 0xf7, 0x43, 0x96, 0x1c, 0x63, 0x39, + 0xc6, 0xe5, 0x15, 0xaa, 0xdd, 0x23, 0x1c, 0xa5, 0x43, 0xf7, 0xa0, 0xe4, 0xd7, 0x94, 0xca, 0xda, + 0xcd, 0x48, 0xdb, 0xb4, 0x46, 0x2d, 0x95, 0x1a, 0x8e, 0xe7, 0x1a, 0x79, 0x86, 0x24, 0xba, 0x1d, + 0xa1, 0xb4, 0x91, 0x4a, 0x89, 0x77, 0x41, 0x12, 0x7f, 0xca, 0xc1, 0x99, 0xd4, 0x34, 0x80, 0x2e, + 0x03, 0x32, 0x37, 0x6d, 0x6a, 0xb5, 0x69, 0xed, 0x7d, 0xbf, 0xeb, 0xf7, 0x1a, 0x14, 0x4f, 0xe7, + 0x39, 0xbf, 0x26, 0x5e, 0xef, 0x3b, 0xc5, 0x03, 0x38, 0x90, 0x0a, 0x47, 0xbd, 0xb8, 0xf0, 0xb5, + 0xac, 0xf1, 0x5e, 0x68, 0x6f, 0x41, 0x37, 0xef, 0x76, 0x84, 0xa3, 0xab, 0x51, 0x10, 0x1c, 0xc7, + 0x44, 0x2b, 0x30, 0xcb, 0x93, 0x7d, 0x8f, 0xd6, 0x4f, 0x72, 0xad, 0xcf, 0x56, 0xe2, 0xc7, 0xb8, + 0x97, 0xde, 0x83, 0xa8, 0x51, 0x5b, 0xb3, 0x68, 0x2d, 0x84, 0xc8, 0xc7, 0x21, 0xde, 0x8d, 0x1f, + 0xe3, 0x5e, 0x7a, 0xa4, 0x83, 0xc0, 0x51, 0x13, 0x2d, 0x38, 0xce, 0x20, 0xff, 0xe3, 0x76, 0x04, + 0xa1, 0x92, 0x4e, 0x8a, 0x77, 0xc3, 0x12, 0x1f, 0xe5, 0x81, 0xf7, 0x0e, 0x2c, 0x40, 0x2e, 0xc4, + 0x52, 0xef, 0x62, 0x4f, 0xea, 0x9d, 0x8b, 0x36, 0x8a, 0x91, 0x34, 0x7b, 0x03, 0x26, 0x4c, 0x16, + 0x19, 0xdc, 0x2e, 0xe7, 0x53, 0xc2, 0x29, 0x2c, 0x69, 0x21, 0x90, 0x02, 0x5e, 0x2e, 0xe3, 0xa1, + 0xc5, 0x81, 0xd0, 0x55, 0xc8, 0x37, 0xcd, 0x5a, 0x50, 0x88, 0xfe, 0x9f, 0x02, 0xb8, 0x66, 0xd6, + 0xec, 0x18, 0xdc, 0xa4, 0x77, 0x63, 0xef, 0x5f, 0xcc, 0x20, 0xd0, 0x47, 0x30, 0x19, 0x14, 0x7c, + 0xde, 0x1d, 0xc8, 0x29, 0x70, 0x83, 0x06, 0x50, 0x65, 0xda, 0x4b, 0x64, 0xc1, 0x09, 0x0e, 0xe1, + 0xd0, 0x43, 0x98, 0x57, 0x7b, 0xe7, 0xa9, 0xe2, 0x91, 0x5d, 0x6b, 0x67, 0xea, 0xb4, 0xab, 0xfc, + 0xcb, 0xed, 0x08, 0xf3, 0x7d, 0x24, 0xb8, 0x5f, 0x92, 0xf7, 0x32, 0xca, 0x3b, 0x45, 0xe6, 0x14, + 0xe9, 0x2f, 0x1b, 0xd4, 0xed, 0xfb, 0x2f, 0x0b, 0x4e, 0x70, 0x08, 0x27, 0x7e, 0x9b, 0x87, 0xe9, + 0x58, 0xf7, 0x79, 0xc8, 0x9e, 0xe1, 0xb7, 0x11, 0x07, 0xe6, 0x19, 0x3e, 0xdc, 0x81, 0x7a, 0x86, + 0x0f, 0x79, 0x48, 0x9e, 0xe1, 0x0b, 0x3b, 0x24, 0xcf, 0x88, 0xbc, 0x6c, 0x80, 0x67, 0x3c, 0xcb, + 0x01, 0xea, 0x0f, 0x62, 0xf4, 0x39, 0x4c, 0xf8, 0xe5, 0x62, 0x9f, 0x25, 0x35, 0x6c, 0x6e, 0x78, + 0xf5, 0xe4, 0xa8, 0x3d, 0xd3, 0x4f, 0x76, 0xa8, 0xe9, 0x87, 0x1e, 0xc4, 0x94, 0x18, 0xd6, 0xdc, + 0xc4, 0x49, 0xf1, 0x33, 0x98, 0xb4, 0x83, 0xf1, 0x2a, 0x3f, 0xfa, 0x78, 0xc5, 0x14, 0x1e, 0x0e, + 0x56, 0x21, 0x24, 0xaa, 0xc1, 0x34, 0x89, 0x4e, 0x38, 0xe3, 0x23, 0x3d, 0x63, 0xce, 0x1b, 0xa7, + 0x62, 0xa3, 0x4d, 0x0c, 0x55, 0xfc, 0xb9, 0xd7, 0xac, 0x7e, 0xd8, 0xff, 0x15, 0xcd, 0x7a, 0x78, + 0x33, 0xe6, 0x3f, 0xc2, 0xb2, 0xdf, 0x67, 0x61, 0xae, 0xb7, 0x48, 0x8e, 0xb4, 0x4c, 0x78, 0x30, + 0x70, 0x23, 0x92, 0x1d, 0xe9, 0xd2, 0xe1, 0x0c, 0x34, 0xe4, 0xae, 0x33, 0x6a, 0x89, 0xdc, 0x81, + 0x5b, 0x42, 0xfc, 0x21, 0xae, 0xa3, 0xd1, 0x17, 0x2e, 0x09, 0xeb, 0xc9, 0xec, 0x21, 0xad, 0x27, + 0x5f, 0xb1, 0x9a, 0x7e, 0xcc, 0xc2, 0xf1, 0xd7, 0x1b, 0xfa, 0xe1, 0x77, 0x79, 0x8f, 0xfb, 0xf5, + 0xf5, 0x7a, 0xcf, 0x3e, 0xd4, 0x8a, 0xed, 0xcb, 0x2c, 0x8c, 0xb3, 0xd1, 0xec, 0x10, 0x16, 0x6a, + 0x97, 0x63, 0x0b, 0xb5, 0xb3, 0x29, 0x15, 0x8e, 0xdd, 0x28, 0x71, 0x7d, 0x76, 0xad, 0x67, 0x7d, + 0x76, 0x6e, 0x57, 0xa4, 0xf4, 0x65, 0xd9, 0x3b, 0x30, 0x15, 0x0a, 0x44, 0x6f, 0x78, 0xbd, 0x2a, + 0x9f, 0x29, 0x33, 0xcc, 0xb6, 0xe1, 0x86, 0x25, 0x1c, 0x26, 0x43, 0x0a, 0x51, 0x83, 0x42, 0x44, + 0xc2, 0xde, 0x98, 0x3d, 0x6a, 0x3b, 0xba, 0x2e, 0x9e, 0xea, 0x52, 0xf7, 0xe7, 0x04, 0x65, 0xe9, + 0xc9, 0x8b, 0xd2, 0xd8, 0xd3, 0x17, 0xa5, 0xb1, 0xe7, 0x2f, 0x4a, 0x63, 0x5f, 0xb8, 0xa5, 0xcc, + 0x13, 0xb7, 0x94, 0x79, 0xea, 0x96, 0x32, 0xcf, 0xdd, 0x52, 0xe6, 0x17, 0xb7, 0x94, 0xf9, 0xfa, + 0xd7, 0xd2, 0xd8, 0xc7, 0xd9, 0xf6, 0xf2, 0x9f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xc7, 0x83, 0x99, + 0xe1, 0x70, 0x1d, 0x00, 0x00, +} + +func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x2a + if m.TargetAverageValue != nil { + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.TargetAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ContainerResourceMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x22 + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.CurrentAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { @@ -1138,6 +1300,18 @@ func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ContainerResource != nil { + { + size, err := m.ContainerResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } if m.External != nil { { size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) @@ -1214,6 +1388,18 @@ func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ContainerResource != nil { + { + size, err := m.ContainerResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } if m.External != nil { { size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) @@ -1723,6 +1909,44 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *ContainerResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.TargetAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.TargetAverageUtilization)) + } + if m.TargetAverageValue != nil { + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.CurrentAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.CurrentAverageUtilization)) + } + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *CrossVersionObjectReference) Size() (n int) { if m == nil { return 0 @@ -1896,6 +2120,10 @@ func (m *MetricSpec) Size() (n int) { l = m.External.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.ContainerResource != nil { + l = m.ContainerResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -1923,6 +2151,10 @@ func (m *MetricStatus) Size() (n int) { l = m.External.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.ContainerResource != nil { + l = m.ContainerResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -2083,6 +2315,32 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *ContainerResourceMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResourceMetricSource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerResourceMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} func (this *CrossVersionObjectReference) String() string { if this == nil { return "nil" @@ -2200,6 +2458,7 @@ func (this *MetricSpec) String() string { `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `ContainerResource:` + strings.Replace(this.ContainerResource.String(), "ContainerResourceMetricSource", "ContainerResourceMetricSource", 1) + `,`, `}`, }, "") return s @@ -2214,6 +2473,7 @@ func (this *MetricStatus) String() string { `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `ContainerResource:` + strings.Replace(this.ContainerResource.String(), "ContainerResourceMetricStatus", "ContainerResourceMetricStatus", 1) + `,`, `}`, }, "") return s @@ -2335,7 +2595,7 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { +func (m *ContainerResourceMetricSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2358,15 +2618,15 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerResourceMetricSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2394,13 +2654,13 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(dAtA[iNdEx:postIndex]) + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageUtilization", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2410,18 +2670,361 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + m.TargetAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetAverageValue == nil { + m.TargetAverageValue = &resource.Quantity{} + } + if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerResourceMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResourceMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CurrentAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF @@ -3870,6 +4473,42 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerResource == nil { + m.ContainerResource = &ContainerResourceMetricSource{} + } + if err := m.ContainerResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4099,6 +4738,42 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerResource == nil { + m.ContainerResource = &ContainerResourceMetricStatus{} + } + if err := m.ContainerResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto index f50ed9d1f..08b8667c8 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.autoscaling.v1; @@ -30,6 +30,60 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; +// ContainerResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in the requests and limits, describing a single container in +// each of the pods of the current scale target(e.g. CPU or memory). The values will be +// averaged together before being compared to the target. Such metrics are built into +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +message ContainerResourceMetricSource { + // name is the name of the resource in question. + optional string name = 1; + + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + optional int32 targetAverageUtilization = 2; + + // targetAverageValue is the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; + + // container is the name of the container in the pods of the scaling target. + optional string container = 5; +} + +// ContainerResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing a single container in each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +message ContainerResourceMetricStatus { + // name is the name of the resource in question. + optional string name = 1; + + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + optional int32 currentAverageUtilization = 2; + + // currentAverageValue is the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; + + // container is the name of the container in the pods of the scaling taget + optional string container = 4; +} + // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" @@ -184,8 +238,10 @@ message HorizontalPodAutoscalerStatus { // MetricSpec specifies how to scale based on a single metric // (only `type` and one other matching field should be set at once). message MetricSpec { - // type is the type of metric source. It should be one of "Object", - // "Pods" or "Resource", each mapping to a matching field in the object. + // type is the type of metric source. It should be one of "ContainerResource", + // "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -207,6 +263,15 @@ message MetricSpec { // +optional optional ResourceMetricSource resource = 4; + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod of the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. + // +optional + optional ContainerResourceMetricSource containerResource = 7; + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -218,8 +283,10 @@ message MetricSpec { // MetricStatus describes the last-read state of a single metric. message MetricStatus { - // type is the type of metric source. It will be one of "Object", - // "Pods" or "Resource", each corresponds to a matching field in the object. + // type is the type of metric source. It will be one of "ContainerResource", + // "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -241,6 +308,14 @@ message MetricStatus { // +optional optional ResourceMetricStatus resource = 4; + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ContainerResourceMetricStatus containerResource = 7; + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go index 55b2a0d6b..3343177b2 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types.go +++ b/vendor/k8s.io/api/autoscaling/v1/types.go @@ -165,6 +165,12 @@ const ( // Kubernetes, and have special scaling options on top of those available // to normal per-pod metrics (the "pods" source). ResourceMetricSourceType MetricSourceType = "Resource" + // ContainerResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing a single container in each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ContainerResourceMetricSourceType MetricSourceType = "ContainerResource" // ExternalMetricSourceType is a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -176,8 +182,10 @@ const ( // MetricSpec specifies how to scale based on a single metric // (only `type` and one other matching field should be set at once). type MetricSpec struct { - // type is the type of metric source. It should be one of "Object", - // "Pods" or "Resource", each mapping to a matching field in the object. + // type is the type of metric source. It should be one of "ContainerResource", + // "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -196,6 +204,14 @@ type MetricSpec struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod of the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. + // +optional + ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -267,6 +283,30 @@ type ResourceMetricSource struct { TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` } +// ContainerResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in the requests and limits, describing a single container in +// each of the pods of the current scale target(e.g. CPU or memory). The values will be +// averaged together before being compared to the target. Such metrics are built into +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ContainerResourceMetricSource struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` + // targetAverageValue is the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` + // container is the name of the container in the pods of the scaling target. + Container string `json:"container" protobuf:"bytes,5,opt,name=container"` +} + // ExternalMetricSource indicates how to scale on a metric not associated with // any Kubernetes object (for example length of queue in cloud // messaging service, or QPS from loadbalancer running outside of cluster). @@ -289,8 +329,10 @@ type ExternalMetricSource struct { // MetricStatus describes the last-read state of a single metric. type MetricStatus struct { - // type is the type of metric source. It will be one of "Object", - // "Pods" or "Resource", each corresponds to a matching field in the object. + // type is the type of metric source. It will be one of "ContainerResource", + // "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -309,6 +351,13 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + ContainerResource *ContainerResourceMetricStatus `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -414,6 +463,30 @@ type ResourceMetricStatus struct { CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` } +// ContainerResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing a single container in each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ContainerResourceMetricStatus struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` + // currentAverageValue is the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` + // container is the name of the container in the pods of the scaling taget + Container string `json:"container" protobuf:"bytes,4,opt,name=container"` +} + // ExternalMetricStatus indicates the current value of a global metric // not associated with any Kubernetes object. type ExternalMetricStatus struct { diff --git a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go index 129ce2b48..192dc5f39 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go @@ -27,6 +27,30 @@ package v1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ContainerResourceMetricSource = map[string]string{ + "": "ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in the requests and limits, describing a single container in each of the pods of the current scale target(e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built into Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", + "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", + "targetAverageValue": "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", + "container": "container is the name of the container in the pods of the scaling target.", +} + +func (ContainerResourceMetricSource) SwaggerDoc() map[string]string { + return map_ContainerResourceMetricSource +} + +var map_ContainerResourceMetricStatus = map[string]string{ + "": "ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "name is the name of the resource in question.", + "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", + "currentAverageValue": "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", + "container": "container is the name of the container in the pods of the scaling taget", +} + +func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { + return map_ContainerResourceMetricStatus +} + var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", @@ -122,12 +146,13 @@ func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { } var map_MetricSpec = map[string]string{ - "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", - "type": "type is the type of metric source. It should be one of \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", - "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", - "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", + "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } func (MetricSpec) SwaggerDoc() map[string]string { @@ -135,12 +160,13 @@ func (MetricSpec) SwaggerDoc() map[string]string { } var map_MetricStatus = map[string]string{ - "": "MetricStatus describes the last-read state of a single metric.", - "type": "type is the type of metric source. It will be one of \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", - "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", - "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "": "MetricStatus describes the last-read state of a single metric.", + "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } func (MetricStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go index ddb601128..05ae6ebda 100644 --- a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go @@ -25,6 +25,54 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResourceMetricSource) DeepCopyInto(out *ContainerResourceMetricSource) { + *out = *in + if in.TargetAverageUtilization != nil { + in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization + *out = new(int32) + **out = **in + } + if in.TargetAverageValue != nil { + in, out := &in.TargetAverageValue, &out.TargetAverageValue + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricSource. +func (in *ContainerResourceMetricSource) DeepCopy() *ContainerResourceMetricSource { + if in == nil { + return nil + } + out := new(ContainerResourceMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResourceMetricStatus) DeepCopyInto(out *ContainerResourceMetricStatus) { + *out = *in + if in.CurrentAverageUtilization != nil { + in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization + *out = new(int32) + **out = **in + } + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricStatus. +func (in *ContainerResourceMetricStatus) DeepCopy() *ContainerResourceMetricStatus { + if in == nil { + return nil + } + out := new(ContainerResourceMetricStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { *out = *in @@ -252,6 +300,11 @@ func (in *MetricSpec) DeepCopyInto(out *MetricSpec) { *out = new(ResourceMetricSource) (*in).DeepCopyInto(*out) } + if in.ContainerResource != nil { + in, out := &in.ContainerResource, &out.ContainerResource + *out = new(ContainerResourceMetricSource) + (*in).DeepCopyInto(*out) + } if in.External != nil { in, out := &in.External, &out.External *out = new(ExternalMetricSource) @@ -288,6 +341,11 @@ func (in *MetricStatus) DeepCopyInto(out *MetricStatus) { *out = new(ResourceMetricStatus) (*in).DeepCopyInto(*out) } + if in.ContainerResource != nil { + in, out := &in.ContainerResource, &out.ContainerResource + *out = new(ContainerResourceMetricStatus) + (*in).DeepCopyInto(*out) + } if in.External != nil { in, out := &in.External, &out.External *out = new(ExternalMetricStatus) diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go index e129e41b8..750808f8d 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go @@ -47,10 +47,66 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} } +func (*ContainerResourceMetricSource) ProtoMessage() {} +func (*ContainerResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{0} +} +func (m *ContainerResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResourceMetricSource.Merge(m, src) +} +func (m *ContainerResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ContainerResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResourceMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResourceMetricSource proto.InternalMessageInfo + +func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} } +func (*ContainerResourceMetricStatus) ProtoMessage() {} +func (*ContainerResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{1} +} +func (m *ContainerResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResourceMetricStatus.Merge(m, src) +} +func (m *ContainerResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ContainerResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResourceMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResourceMetricStatus proto.InternalMessageInfo + func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{0} + return fileDescriptor_26c1bfc7a52d0478, []int{2} } func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -78,7 +134,7 @@ var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } func (*ExternalMetricSource) ProtoMessage() {} func (*ExternalMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{1} + return fileDescriptor_26c1bfc7a52d0478, []int{3} } func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -106,7 +162,7 @@ var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } func (*ExternalMetricStatus) ProtoMessage() {} func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{2} + return fileDescriptor_26c1bfc7a52d0478, []int{4} } func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,7 +190,7 @@ var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } func (*HorizontalPodAutoscaler) ProtoMessage() {} func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{3} + return fileDescriptor_26c1bfc7a52d0478, []int{5} } func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -162,7 +218,7 @@ var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{4} + return fileDescriptor_26c1bfc7a52d0478, []int{6} } func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -190,7 +246,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{5} + return fileDescriptor_26c1bfc7a52d0478, []int{7} } func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -218,7 +274,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{6} + return fileDescriptor_26c1bfc7a52d0478, []int{8} } func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -246,7 +302,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{7} + return fileDescriptor_26c1bfc7a52d0478, []int{9} } func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -274,7 +330,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo func (m *MetricSpec) Reset() { *m = MetricSpec{} } func (*MetricSpec) ProtoMessage() {} func (*MetricSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{8} + return fileDescriptor_26c1bfc7a52d0478, []int{10} } func (m *MetricSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -302,7 +358,7 @@ var xxx_messageInfo_MetricSpec proto.InternalMessageInfo func (m *MetricStatus) Reset() { *m = MetricStatus{} } func (*MetricStatus) ProtoMessage() {} func (*MetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{9} + return fileDescriptor_26c1bfc7a52d0478, []int{11} } func (m *MetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -330,7 +386,7 @@ var xxx_messageInfo_MetricStatus proto.InternalMessageInfo func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } func (*ObjectMetricSource) ProtoMessage() {} func (*ObjectMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{10} + return fileDescriptor_26c1bfc7a52d0478, []int{12} } func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -358,7 +414,7 @@ var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } func (*ObjectMetricStatus) ProtoMessage() {} func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{11} + return fileDescriptor_26c1bfc7a52d0478, []int{13} } func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -386,7 +442,7 @@ var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } func (*PodsMetricSource) ProtoMessage() {} func (*PodsMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{12} + return fileDescriptor_26c1bfc7a52d0478, []int{14} } func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -414,7 +470,7 @@ var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } func (*PodsMetricStatus) ProtoMessage() {} func (*PodsMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{13} + return fileDescriptor_26c1bfc7a52d0478, []int{15} } func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -442,7 +498,7 @@ var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } func (*ResourceMetricSource) ProtoMessage() {} func (*ResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{14} + return fileDescriptor_26c1bfc7a52d0478, []int{16} } func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -470,7 +526,7 @@ var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } func (*ResourceMetricStatus) ProtoMessage() {} func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{15} + return fileDescriptor_26c1bfc7a52d0478, []int{17} } func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -496,6 +552,8 @@ func (m *ResourceMetricStatus) XXX_DiscardUnknown() { var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo func init() { + proto.RegisterType((*ContainerResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.ContainerResourceMetricSource") + proto.RegisterType((*ContainerResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.ContainerResourceMetricStatus") proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2beta1.CrossVersionObjectReference") proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.ExternalMetricSource") proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.ExternalMetricStatus") @@ -519,100 +577,203 @@ func init() { } var fileDescriptor_26c1bfc7a52d0478 = []byte{ - // 1475 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcb, 0x8f, 0x1b, 0x45, - 0x13, 0x5f, 0x3f, 0x76, 0xb3, 0x69, 0x6f, 0x76, 0xf7, 0xeb, 0x44, 0x89, 0xb3, 0xf9, 0x62, 0xaf, - 0x2c, 0x84, 0x42, 0x44, 0x66, 0x12, 0xb3, 0x3c, 0x24, 0x84, 0xc4, 0xda, 0x40, 0x12, 0xb1, 0x4e, - 0x42, 0xef, 0x26, 0x42, 0x90, 0x20, 0xda, 0x33, 0x1d, 0x6f, 0xb3, 0x9e, 0x19, 0x6b, 0xba, 0x6d, - 0x65, 0x83, 0x90, 0xb8, 0x70, 0xe7, 0x02, 0x67, 0x90, 0x38, 0x21, 0xb8, 0xc2, 0x99, 0x5b, 0x8e, - 0x39, 0x26, 0x02, 0x59, 0x64, 0xf8, 0x2f, 0x72, 0x42, 0xfd, 0x98, 0xf1, 0x8c, 0x1f, 0x6b, 0xc7, - 0x38, 0xe1, 0x71, 0x9b, 0xee, 0xaa, 0xfa, 0x55, 0x4f, 0xfd, 0xaa, 0xab, 0xbb, 0x1a, 0x5c, 0xdc, - 0x7b, 0x8d, 0x19, 0xd4, 0x33, 0xf7, 0xda, 0x75, 0xe2, 0xbb, 0x84, 0x13, 0x66, 0x76, 0x88, 0x6b, - 0x7b, 0xbe, 0xa9, 0x05, 0xb8, 0x45, 0x4d, 0xdc, 0xe6, 0x1e, 0xb3, 0x70, 0x93, 0xba, 0x0d, 0xb3, - 0x53, 0xae, 0x13, 0x8e, 0x2f, 0x98, 0x0d, 0xe2, 0x12, 0x1f, 0x73, 0x62, 0x1b, 0x2d, 0xdf, 0xe3, - 0x1e, 0x2c, 0x28, 0x7d, 0x03, 0xb7, 0xa8, 0x11, 0xd3, 0x37, 0xb4, 0xfe, 0xda, 0xb9, 0x06, 0xe5, - 0xbb, 0xed, 0xba, 0x61, 0x79, 0x8e, 0xd9, 0xf0, 0x1a, 0x9e, 0x29, 0xcd, 0xea, 0xed, 0xdb, 0x72, - 0x24, 0x07, 0xf2, 0x4b, 0xc1, 0xad, 0x95, 0x62, 0xee, 0x2d, 0xcf, 0x27, 0x66, 0x67, 0xc0, 0xe5, - 0xda, 0x46, 0x4f, 0xc7, 0xc1, 0xd6, 0x2e, 0x75, 0x89, 0xbf, 0x6f, 0xb6, 0xf6, 0x1a, 0xd2, 0xc8, - 0x27, 0xcc, 0x6b, 0xfb, 0x16, 0x79, 0x22, 0x2b, 0x66, 0x3a, 0x84, 0xe3, 0x61, 0xbe, 0xcc, 0x51, - 0x56, 0x7e, 0xdb, 0xe5, 0xd4, 0x19, 0x74, 0xf3, 0xca, 0x38, 0x03, 0x66, 0xed, 0x12, 0x07, 0xf7, - 0xdb, 0x95, 0xbe, 0x4a, 0x81, 0x53, 0x55, 0xdf, 0x63, 0xec, 0x06, 0xf1, 0x19, 0xf5, 0xdc, 0xab, - 0xf5, 0x4f, 0x88, 0xc5, 0x11, 0xb9, 0x4d, 0x7c, 0xe2, 0x5a, 0x04, 0xae, 0x83, 0xec, 0x1e, 0x75, - 0xed, 0x7c, 0x6a, 0x3d, 0x75, 0xe6, 0x70, 0x65, 0xe9, 0x5e, 0xb7, 0x38, 0x17, 0x74, 0x8b, 0xd9, - 0x77, 0xa9, 0x6b, 0x23, 0x29, 0x11, 0x1a, 0x2e, 0x76, 0x48, 0x3e, 0x9d, 0xd4, 0xb8, 0x82, 0x1d, - 0x82, 0xa4, 0x04, 0x96, 0x01, 0xc0, 0x2d, 0xaa, 0x1d, 0xe4, 0x33, 0x52, 0x0f, 0x6a, 0x3d, 0xb0, - 0x79, 0xed, 0xb2, 0x96, 0xa0, 0x98, 0x56, 0xe9, 0xeb, 0x0c, 0x38, 0xf6, 0xf6, 0x1d, 0x4e, 0x7c, - 0x17, 0x37, 0x6b, 0x84, 0xfb, 0xd4, 0xda, 0x96, 0xf1, 0x15, 0x60, 0x8e, 0x1c, 0x0b, 0x07, 0x7a, - 0x59, 0x11, 0x58, 0x2d, 0x92, 0xa0, 0x98, 0x16, 0xf4, 0xc0, 0xb2, 0x1a, 0x6d, 0x93, 0x26, 0xb1, - 0xb8, 0xe7, 0xcb, 0xc5, 0xe6, 0xca, 0x2f, 0x19, 0xbd, 0x2c, 0x8a, 0xa2, 0x66, 0xb4, 0xf6, 0x1a, - 0x62, 0x82, 0x19, 0x82, 0x1c, 0xa3, 0x73, 0xc1, 0xd8, 0xc2, 0x75, 0xd2, 0x0c, 0x4d, 0x2b, 0x30, - 0xe8, 0x16, 0x97, 0x6b, 0x09, 0x38, 0xd4, 0x07, 0x0f, 0x31, 0xc8, 0x71, 0xec, 0x37, 0x08, 0xbf, - 0x81, 0x9b, 0x6d, 0x22, 0x7f, 0x39, 0x57, 0x36, 0x0e, 0xf2, 0x66, 0x84, 0x09, 0x64, 0xbc, 0xd7, - 0xc6, 0x2e, 0xa7, 0x7c, 0xbf, 0xb2, 0x12, 0x74, 0x8b, 0xb9, 0x9d, 0x1e, 0x0c, 0x8a, 0x63, 0xc2, - 0x0e, 0x80, 0x6a, 0xb8, 0xd9, 0x21, 0x3e, 0x6e, 0x10, 0xe5, 0x29, 0x3b, 0x95, 0xa7, 0xe3, 0x41, - 0xb7, 0x08, 0x77, 0x06, 0xd0, 0xd0, 0x10, 0x0f, 0xa5, 0x6f, 0x06, 0x89, 0xe1, 0x98, 0xb7, 0xd9, - 0xbf, 0x83, 0x98, 0x5d, 0xb0, 0x64, 0xb5, 0x7d, 0x9f, 0xb8, 0x7f, 0x89, 0x99, 0x63, 0xfa, 0xb7, - 0x96, 0xaa, 0x31, 0x2c, 0x94, 0x40, 0x86, 0xfb, 0xe0, 0xa8, 0x1e, 0xcf, 0x80, 0xa0, 0x13, 0x41, - 0xb7, 0x78, 0xb4, 0x3a, 0x08, 0x87, 0x86, 0xf9, 0x28, 0xfd, 0x92, 0x06, 0x27, 0x2e, 0x79, 0x3e, - 0xbd, 0xeb, 0xb9, 0x1c, 0x37, 0xaf, 0x79, 0xf6, 0xa6, 0x2e, 0x90, 0xc4, 0x87, 0x1f, 0x83, 0x45, - 0x11, 0x3d, 0x1b, 0x73, 0x2c, 0x39, 0xca, 0x95, 0xcf, 0x4f, 0x16, 0x6b, 0x55, 0x18, 0x6a, 0x84, - 0xe3, 0x1e, 0xab, 0xbd, 0x39, 0x14, 0xa1, 0xc2, 0x5b, 0x20, 0xcb, 0x5a, 0xc4, 0xd2, 0x4c, 0xbe, - 0x6e, 0x1c, 0x5c, 0xa8, 0x8d, 0x11, 0x0b, 0xdd, 0x6e, 0x11, 0xab, 0x57, 0x4c, 0xc4, 0x08, 0x49, - 0x58, 0x48, 0xc0, 0x02, 0x93, 0x09, 0xa7, 0xb9, 0x7b, 0x63, 0x5a, 0x07, 0x12, 0xa4, 0xb2, 0xac, - 0x5d, 0x2c, 0xa8, 0x31, 0xd2, 0xe0, 0xa5, 0x2f, 0x32, 0x60, 0x7d, 0x84, 0x65, 0xd5, 0x73, 0x6d, - 0xca, 0xa9, 0xe7, 0xc2, 0x4b, 0x20, 0xcb, 0xf7, 0x5b, 0x61, 0xb2, 0x6f, 0x84, 0xab, 0xdd, 0xd9, - 0x6f, 0x91, 0xc7, 0xdd, 0xe2, 0x73, 0xe3, 0xec, 0x85, 0x1e, 0x92, 0x08, 0x70, 0x2b, 0xfa, 0xab, - 0x74, 0x02, 0x4b, 0x2f, 0xeb, 0x71, 0xb7, 0x38, 0xe4, 0x84, 0x32, 0x22, 0xa4, 0xe4, 0xe2, 0x45, - 0x6d, 0x68, 0x62, 0xc6, 0x77, 0x7c, 0xec, 0x32, 0xe5, 0x89, 0x3a, 0x61, 0xae, 0x9f, 0x9d, 0x8c, - 0x6e, 0x61, 0x51, 0x59, 0xd3, 0xab, 0x80, 0x5b, 0x03, 0x68, 0x68, 0x88, 0x07, 0xf8, 0x3c, 0x58, - 0xf0, 0x09, 0x66, 0x9e, 0x2b, 0xd3, 0xfc, 0x70, 0x2f, 0xb8, 0x48, 0xce, 0x22, 0x2d, 0x85, 0x2f, - 0x80, 0x43, 0x0e, 0x61, 0x0c, 0x37, 0x48, 0x7e, 0x5e, 0x2a, 0xae, 0x68, 0xc5, 0x43, 0x35, 0x35, - 0x8d, 0x42, 0x79, 0xe9, 0x61, 0x0a, 0x9c, 0x1a, 0x11, 0xc7, 0x2d, 0xca, 0x38, 0xbc, 0x39, 0x90, - 0xcf, 0xc6, 0x84, 0xb5, 0x83, 0x32, 0x95, 0xcd, 0xab, 0xda, 0xf7, 0x62, 0x38, 0x13, 0xcb, 0xe5, - 0x9b, 0x60, 0x9e, 0x72, 0xe2, 0x08, 0x56, 0x32, 0x67, 0x72, 0xe5, 0x57, 0xa7, 0xcc, 0xb5, 0xca, - 0x11, 0xed, 0x63, 0xfe, 0xb2, 0x40, 0x43, 0x0a, 0xb4, 0xf4, 0x6b, 0x7a, 0xe4, 0xbf, 0x89, 0x84, - 0x87, 0x9f, 0x82, 0x65, 0x39, 0x52, 0x95, 0x19, 0x91, 0xdb, 0xfa, 0x0f, 0xc7, 0xee, 0xa9, 0x03, - 0x0e, 0xf4, 0xca, 0x71, 0xbd, 0x94, 0xe5, 0xed, 0x04, 0x34, 0xea, 0x73, 0x05, 0x2f, 0x80, 0x9c, - 0x43, 0x5d, 0x44, 0x5a, 0x4d, 0x6a, 0x61, 0x95, 0x96, 0xf3, 0xea, 0x48, 0xaa, 0xf5, 0xa6, 0x51, - 0x5c, 0x07, 0xbe, 0x0c, 0x72, 0x0e, 0xbe, 0x13, 0x99, 0x64, 0xa4, 0xc9, 0x51, 0xed, 0x2f, 0x57, - 0xeb, 0x89, 0x50, 0x5c, 0x0f, 0x5e, 0x17, 0xd9, 0x20, 0xaa, 0x34, 0xcb, 0x67, 0x65, 0x98, 0xcf, - 0x8e, 0xfb, 0x3f, 0x5d, 0xe4, 0x45, 0x89, 0x88, 0x65, 0x8e, 0x84, 0x40, 0x21, 0x56, 0xe9, 0xa7, - 0x2c, 0x38, 0x7d, 0xe0, 0xde, 0x87, 0xef, 0x00, 0xe8, 0xd5, 0x19, 0xf1, 0x3b, 0xc4, 0xbe, 0xa8, - 0xae, 0x45, 0xe2, 0x7e, 0x22, 0x62, 0x9c, 0x51, 0x47, 0xe2, 0xd5, 0x01, 0x29, 0x1a, 0x62, 0x01, - 0x2d, 0x70, 0x44, 0x6c, 0x06, 0x15, 0x50, 0xaa, 0xaf, 0x42, 0x4f, 0xb6, 0xd3, 0xfe, 0x17, 0x74, - 0x8b, 0x47, 0xb6, 0xe2, 0x20, 0x28, 0x89, 0x09, 0x37, 0xc1, 0x8a, 0xae, 0xf5, 0x7d, 0x01, 0x3e, - 0xa1, 0x23, 0xb0, 0x52, 0x4d, 0x8a, 0x51, 0xbf, 0xbe, 0x80, 0xb0, 0x09, 0xa3, 0x3e, 0xb1, 0x23, - 0x88, 0x6c, 0x12, 0xe2, 0xad, 0xa4, 0x18, 0xf5, 0xeb, 0xc3, 0x26, 0x58, 0xd6, 0xa8, 0x3a, 0xde, - 0xf9, 0x79, 0x49, 0xd9, 0x8b, 0x13, 0x52, 0xa6, 0x8a, 0x6e, 0x94, 0x83, 0xd5, 0x04, 0x16, 0xea, - 0xc3, 0x86, 0x1c, 0x00, 0x2b, 0x2c, 0x71, 0x2c, 0xbf, 0x20, 0x3d, 0xbd, 0x39, 0xe5, 0x1e, 0x8c, - 0x6a, 0x65, 0xef, 0xf8, 0x8a, 0xa6, 0x18, 0x8a, 0xf9, 0x29, 0x7d, 0x9f, 0x01, 0xa0, 0x97, 0x61, - 0x70, 0x23, 0x51, 0xe4, 0xd7, 0xfb, 0x8a, 0xfc, 0x6a, 0xfc, 0x72, 0x1a, 0x2b, 0xe8, 0x37, 0xc0, - 0x82, 0x27, 0x77, 0x9e, 0x4e, 0x86, 0xf2, 0xb8, 0x65, 0x47, 0x67, 0x69, 0x84, 0x56, 0x01, 0xa2, - 0x74, 0xea, 0xfd, 0xab, 0xd1, 0xe0, 0x15, 0x90, 0x6d, 0x79, 0x76, 0x78, 0xf8, 0x9d, 0x1f, 0x87, - 0x7a, 0xcd, 0xb3, 0x59, 0x02, 0x73, 0x51, 0xac, 0x5d, 0xcc, 0x22, 0x89, 0x03, 0x3f, 0x02, 0x8b, - 0xe1, 0x75, 0x43, 0xdf, 0x4d, 0x36, 0xc6, 0x61, 0x22, 0xad, 0x9f, 0xc0, 0x5d, 0x12, 0x15, 0x34, - 0x94, 0xa0, 0x08, 0x53, 0xe0, 0x13, 0x7d, 0x5b, 0x94, 0xb5, 0x7e, 0x02, 0xfc, 0x61, 0xd7, 0x7e, - 0x85, 0x1f, 0x4a, 0x50, 0x84, 0x59, 0xfa, 0x21, 0x03, 0x96, 0x12, 0xd7, 0xd0, 0xbf, 0x83, 0x2e, - 0x95, 0xd5, 0xb3, 0xa5, 0x4b, 0x61, 0xce, 0x9e, 0x2e, 0x85, 0xfb, 0xf4, 0xe8, 0x8a, 0xe1, 0x0f, - 0xa1, 0xeb, 0x61, 0x06, 0xc0, 0xc1, 0x4c, 0x87, 0x16, 0x58, 0x50, 0xad, 0xc6, 0x2c, 0x4e, 0xb8, - 0xe8, 0xd6, 0xa1, 0x0f, 0x33, 0x0d, 0xdd, 0xd7, 0xa0, 0xa4, 0x27, 0x6a, 0x50, 0xc8, 0x2c, 0x1a, - 0xb9, 0xe8, 0x08, 0x1c, 0xd9, 0xcc, 0xdd, 0x02, 0x8b, 0x2c, 0xec, 0x80, 0xb2, 0xd3, 0x77, 0x40, - 0x32, 0xea, 0x51, 0xef, 0x13, 0x41, 0x42, 0x1b, 0x2c, 0xe1, 0x78, 0x13, 0x32, 0x3f, 0xd5, 0x6f, - 0xac, 0x8a, 0x8e, 0x27, 0xd1, 0x7d, 0x24, 0x50, 0x4b, 0xbf, 0xf5, 0x73, 0xab, 0x36, 0xe4, 0x3f, - 0x96, 0xdb, 0x67, 0xd7, 0x0b, 0xfe, 0x27, 0xe8, 0xfd, 0x36, 0x0d, 0x56, 0xfb, 0x8f, 0x93, 0xa9, - 0x9a, 0xfe, 0xbb, 0x43, 0x5f, 0x2e, 0xd2, 0x53, 0x2d, 0x3a, 0xea, 0x50, 0x26, 0x7b, 0xbd, 0x48, - 0x30, 0x91, 0x99, 0x39, 0x13, 0xa5, 0xef, 0x92, 0x31, 0x9a, 0xfe, 0x61, 0xe4, 0xb3, 0xe1, 0xaf, - 0x07, 0xd3, 0x05, 0xe9, 0x94, 0x76, 0x36, 0xf1, 0x0b, 0xc2, 0xd3, 0x0e, 0xd3, 0x8f, 0x69, 0x70, - 0x6c, 0xd8, 0x2d, 0x02, 0x56, 0xf5, 0x5b, 0xa2, 0x0a, 0x92, 0x19, 0x7f, 0x4b, 0x7c, 0xdc, 0x2d, - 0x16, 0x87, 0xb4, 0xc0, 0x21, 0x4c, 0xec, 0xb9, 0xf1, 0x7d, 0x90, 0x4f, 0x30, 0x7f, 0x9d, 0xd3, - 0x26, 0xbd, 0xab, 0x2e, 0xf7, 0xaa, 0x8d, 0xf9, 0x7f, 0xd0, 0x2d, 0xe6, 0x77, 0x46, 0xe8, 0xa0, - 0x91, 0xd6, 0x23, 0xde, 0xdc, 0x32, 0x4f, 0xfd, 0xcd, 0xed, 0xe7, 0xc1, 0x78, 0xa9, 0xd4, 0x9a, - 0x49, 0xbc, 0x3e, 0x04, 0x27, 0x93, 0x39, 0x30, 0x18, 0xb0, 0xd3, 0x41, 0xb7, 0x78, 0xb2, 0x3a, - 0x4a, 0x09, 0x8d, 0xb6, 0x1f, 0x95, 0xc8, 0x99, 0x67, 0x93, 0xc8, 0x95, 0x73, 0xf7, 0x1e, 0x15, - 0xe6, 0xee, 0x3f, 0x2a, 0xcc, 0x3d, 0x78, 0x54, 0x98, 0xfb, 0x3c, 0x28, 0xa4, 0xee, 0x05, 0x85, - 0xd4, 0xfd, 0xa0, 0x90, 0x7a, 0x10, 0x14, 0x52, 0xbf, 0x07, 0x85, 0xd4, 0x97, 0x7f, 0x14, 0xe6, - 0x3e, 0x38, 0xa4, 0x8f, 0x9e, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x05, 0x26, 0x31, 0x5d, 0x9f, - 0x18, 0x00, 0x00, + // 1562 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0x4b, 0x6c, 0x1b, 0xc5, + 0x1b, 0x8f, 0xed, 0xcd, 0xeb, 0x73, 0x9a, 0xc7, 0xb4, 0xff, 0xd6, 0x4d, 0xff, 0xb5, 0xa3, 0x15, + 0x42, 0xa1, 0xa2, 0xbb, 0xad, 0x09, 0x0f, 0x09, 0x21, 0x11, 0x1b, 0x68, 0x2b, 0x92, 0xb6, 0x4c, + 0xd2, 0x0a, 0x41, 0x8b, 0x98, 0xac, 0xa7, 0xce, 0x12, 0x7b, 0xd7, 0xda, 0x19, 0x5b, 0x4d, 0x11, + 0x12, 0x42, 0xe2, 0xce, 0x05, 0xce, 0x20, 0x71, 0x45, 0x88, 0x0b, 0x9c, 0xb9, 0xf5, 0xd8, 0x63, + 0x2b, 0x90, 0x45, 0xcd, 0x81, 0x33, 0xd7, 0x9e, 0xd0, 0xcc, 0xce, 0xae, 0x77, 0xfd, 0x88, 0x1d, + 0x37, 0x0d, 0x0f, 0xf5, 0xe6, 0xdd, 0xf9, 0xbe, 0xdf, 0x37, 0xf3, 0xfb, 0x5e, 0xf3, 0xad, 0xe1, + 0xc2, 0xce, 0x2b, 0xcc, 0xb0, 0x5d, 0x73, 0xa7, 0xbe, 0x45, 0x3d, 0x87, 0x72, 0xca, 0xcc, 0x06, + 0x75, 0x4a, 0xae, 0x67, 0xaa, 0x05, 0x52, 0xb3, 0x4d, 0x52, 0xe7, 0x2e, 0xb3, 0x48, 0xc5, 0x76, + 0xca, 0x66, 0x23, 0xbf, 0x45, 0x39, 0x39, 0x6f, 0x96, 0xa9, 0x43, 0x3d, 0xc2, 0x69, 0xc9, 0xa8, + 0x79, 0x2e, 0x77, 0x51, 0xd6, 0x97, 0x37, 0x48, 0xcd, 0x36, 0x22, 0xf2, 0x86, 0x92, 0x5f, 0x3c, + 0x5b, 0xb6, 0xf9, 0x76, 0x7d, 0xcb, 0xb0, 0xdc, 0xaa, 0x59, 0x76, 0xcb, 0xae, 0x29, 0xd5, 0xb6, + 0xea, 0xb7, 0xe4, 0x93, 0x7c, 0x90, 0xbf, 0x7c, 0xb8, 0x45, 0x3d, 0x62, 0xde, 0x72, 0x3d, 0x6a, + 0x36, 0xba, 0x4c, 0x2e, 0xae, 0xb4, 0x65, 0xaa, 0xc4, 0xda, 0xb6, 0x1d, 0xea, 0xed, 0x9a, 0xb5, + 0x9d, 0xb2, 0x54, 0xf2, 0x28, 0x73, 0xeb, 0x9e, 0x45, 0xf7, 0xa5, 0xc5, 0xcc, 0x2a, 0xe5, 0xa4, + 0x97, 0x2d, 0xb3, 0x9f, 0x96, 0x57, 0x77, 0xb8, 0x5d, 0xed, 0x36, 0xf3, 0xd2, 0x20, 0x05, 0x66, + 0x6d, 0xd3, 0x2a, 0xe9, 0xd4, 0xd3, 0xff, 0x48, 0xc2, 0xe9, 0xa2, 0xeb, 0x70, 0x22, 0x34, 0xb0, + 0x3a, 0xc4, 0x3a, 0xe5, 0x9e, 0x6d, 0x6d, 0xc8, 0xdf, 0xa8, 0x08, 0x9a, 0x43, 0xaa, 0x34, 0x93, + 0x58, 0x4a, 0x2c, 0x4f, 0x17, 0xcc, 0xbb, 0xcd, 0xdc, 0x58, 0xab, 0x99, 0xd3, 0x2e, 0x93, 0x2a, + 0x7d, 0xd4, 0xcc, 0xe5, 0xba, 0x89, 0x33, 0x02, 0x18, 0x21, 0x82, 0xa5, 0x32, 0x7a, 0x17, 0x32, + 0x9c, 0x78, 0x65, 0xca, 0x57, 0x1b, 0xd4, 0x23, 0x65, 0x7a, 0x8d, 0xdb, 0x15, 0xfb, 0x0e, 0xe1, + 0xb6, 0xeb, 0x64, 0x92, 0x4b, 0x89, 0xe5, 0xf1, 0xc2, 0xff, 0x5b, 0xcd, 0x5c, 0x66, 0xb3, 0x8f, + 0x0c, 0xee, 0xab, 0x8d, 0x1a, 0x80, 0x62, 0x6b, 0xd7, 0x49, 0xa5, 0x4e, 0x33, 0xa9, 0xa5, 0xc4, + 0x72, 0x3a, 0x6f, 0x18, 0xed, 0x28, 0x09, 0x59, 0x31, 0x6a, 0x3b, 0x65, 0x19, 0x36, 0x81, 0xcb, + 0x8c, 0x77, 0xea, 0xc4, 0xe1, 0x36, 0xdf, 0x2d, 0x1c, 0x6f, 0x35, 0x73, 0x68, 0xb3, 0x0b, 0x0d, + 0xf7, 0xb0, 0x80, 0x4c, 0x98, 0xb6, 0x02, 0xde, 0x32, 0x9a, 0xe4, 0x66, 0x41, 0x71, 0x33, 0xdd, + 0x26, 0xb4, 0x2d, 0xa3, 0xff, 0xb9, 0x07, 0xd3, 0x9c, 0xf0, 0x3a, 0x3b, 0x18, 0xa6, 0xdf, 0x87, + 0x93, 0x56, 0xdd, 0xf3, 0xa8, 0xd3, 0x9f, 0xea, 0xd3, 0xad, 0x66, 0xee, 0x64, 0xb1, 0x9f, 0x10, + 0xee, 0xaf, 0x8f, 0x3e, 0x81, 0xa3, 0xf1, 0xc5, 0xc7, 0x61, 0xfb, 0x94, 0x3a, 0xe0, 0xd1, 0x62, + 0x37, 0x24, 0xee, 0x65, 0x67, 0xff, 0x9c, 0x7f, 0x99, 0x80, 0x53, 0x45, 0xcf, 0x65, 0xec, 0x3a, + 0xf5, 0x98, 0xed, 0x3a, 0x57, 0xb6, 0x3e, 0xa2, 0x16, 0xc7, 0xf4, 0x16, 0xf5, 0xa8, 0x63, 0x51, + 0xb4, 0x04, 0xda, 0x8e, 0xed, 0x94, 0x14, 0xe3, 0x33, 0x01, 0xe3, 0x6f, 0xdb, 0x4e, 0x09, 0xcb, + 0x15, 0x21, 0x21, 0x7d, 0x92, 0x8c, 0x4b, 0x44, 0x08, 0xcf, 0x03, 0x90, 0x9a, 0xad, 0x0c, 0x48, + 0x2a, 0xa6, 0x0b, 0x48, 0xc9, 0xc1, 0xea, 0xd5, 0x4b, 0x6a, 0x05, 0x47, 0xa4, 0xf4, 0xaf, 0x52, + 0x70, 0xec, 0xcd, 0xdb, 0x9c, 0x7a, 0x0e, 0xa9, 0xc4, 0x92, 0x2d, 0x0f, 0x50, 0x95, 0xcf, 0x97, + 0xdb, 0x81, 0x10, 0x82, 0xad, 0x87, 0x2b, 0x38, 0x22, 0x85, 0x5c, 0x98, 0xf5, 0x9f, 0x36, 0x68, + 0x85, 0x5a, 0xdc, 0xf5, 0xe4, 0x66, 0xd3, 0xf9, 0x17, 0xf6, 0xf2, 0x07, 0x33, 0x44, 0xe9, 0x31, + 0x1a, 0xe7, 0x8d, 0x35, 0xb2, 0x45, 0x2b, 0x81, 0x6a, 0x01, 0xb5, 0x9a, 0xb9, 0xd9, 0xf5, 0x18, + 0x1c, 0xee, 0x80, 0x47, 0x04, 0xd2, 0x7e, 0x42, 0x3c, 0x8e, 0xf7, 0xe7, 0x5a, 0xcd, 0x5c, 0x7a, + 0xb3, 0x0d, 0x83, 0xa3, 0x98, 0x7d, 0xb2, 0x5a, 0x7b, 0xd2, 0x59, 0xad, 0x7f, 0xdd, 0xed, 0x18, + 0x3f, 0x37, 0xff, 0x15, 0x8e, 0xd9, 0x86, 0x19, 0x95, 0x36, 0x8f, 0xe3, 0x99, 0x63, 0xea, 0x58, + 0x33, 0xc5, 0x08, 0x16, 0x8e, 0x21, 0xa3, 0xdd, 0xde, 0x85, 0x60, 0x34, 0x07, 0x9d, 0xd8, 0x4f, + 0x11, 0xd0, 0x7f, 0x4e, 0xc2, 0x89, 0x8b, 0xae, 0x67, 0xdf, 0x11, 0x59, 0x5e, 0xb9, 0xea, 0x96, + 0x56, 0x55, 0xfb, 0xa7, 0x1e, 0xfa, 0x10, 0xa6, 0x04, 0x7b, 0x25, 0xc2, 0x89, 0xf4, 0x51, 0x3a, + 0x7f, 0x6e, 0x38, 0xae, 0xfd, 0xc2, 0xb0, 0x4e, 0x39, 0x69, 0x7b, 0xb5, 0xfd, 0x0e, 0x87, 0xa8, + 0xe8, 0x26, 0x68, 0xac, 0x46, 0x2d, 0xe5, 0xc9, 0x57, 0x8d, 0xbd, 0xaf, 0x21, 0x46, 0x9f, 0x8d, + 0x6e, 0xd4, 0xa8, 0xd5, 0x2e, 0x26, 0xe2, 0x09, 0x4b, 0x58, 0x44, 0x61, 0x82, 0xc9, 0x80, 0x53, + 0xbe, 0x7b, 0x6d, 0x54, 0x03, 0x12, 0xa4, 0x30, 0xab, 0x4c, 0x4c, 0xf8, 0xcf, 0x58, 0x81, 0xeb, + 0x9f, 0xa7, 0x60, 0xa9, 0x8f, 0x66, 0xd1, 0x75, 0x4a, 0xb6, 0x2c, 0xf6, 0x17, 0x41, 0xe3, 0xbb, + 0xb5, 0x20, 0xd8, 0x57, 0x82, 0xdd, 0x6e, 0xee, 0xd6, 0x44, 0x3b, 0x7a, 0x66, 0x90, 0xbe, 0x90, + 0xc3, 0x12, 0x01, 0xad, 0x85, 0xa7, 0x4a, 0xc6, 0xb0, 0xd4, 0xb6, 0x1e, 0x35, 0x73, 0x3d, 0xee, + 0x5f, 0x46, 0x88, 0x14, 0xdf, 0xbc, 0xa8, 0x0d, 0x15, 0xc2, 0xf8, 0xa6, 0x47, 0x1c, 0xe6, 0x5b, + 0xb2, 0xab, 0x41, 0xac, 0x9f, 0x19, 0xce, 0xdd, 0x42, 0xa3, 0xb0, 0xa8, 0x76, 0x81, 0xd6, 0xba, + 0xd0, 0x70, 0x0f, 0x0b, 0xe8, 0x59, 0x98, 0xf0, 0x28, 0x61, 0xae, 0xa3, 0x5a, 0x4f, 0x48, 0x2e, + 0x96, 0x6f, 0xb1, 0x5a, 0x45, 0xcf, 0xc1, 0x64, 0x95, 0x32, 0x46, 0xca, 0x34, 0x33, 0x2e, 0x05, + 0xe7, 0x94, 0xe0, 0xe4, 0xba, 0xff, 0x1a, 0x07, 0xeb, 0xfa, 0x83, 0x04, 0x9c, 0xea, 0xc3, 0xe3, + 0x9a, 0xcd, 0x38, 0xba, 0xd1, 0x15, 0xcf, 0xc6, 0x90, 0xb5, 0xc3, 0x66, 0x7e, 0x34, 0xcf, 0x2b, + 0xdb, 0x53, 0xc1, 0x9b, 0x48, 0x2c, 0xdf, 0x80, 0x71, 0x9b, 0xd3, 0xaa, 0xf0, 0x4a, 0x6a, 0x39, + 0x9d, 0x7f, 0x79, 0xc4, 0x58, 0x2b, 0x1c, 0x51, 0x36, 0xc6, 0x2f, 0x09, 0x34, 0xec, 0x83, 0xea, + 0xbf, 0x24, 0xfb, 0x9e, 0x4d, 0x04, 0x3c, 0xfa, 0x18, 0x66, 0xe5, 0x93, 0x5f, 0x99, 0x31, 0xbd, + 0xa5, 0x4e, 0x38, 0x30, 0xa7, 0xf6, 0x68, 0xe8, 0x85, 0xe3, 0x6a, 0x2b, 0xb3, 0x1b, 0x31, 0x68, + 0xdc, 0x61, 0x0a, 0x9d, 0x87, 0x74, 0xd5, 0x76, 0x30, 0xad, 0x55, 0x6c, 0x8b, 0x30, 0x75, 0x2f, + 0x92, 0x2d, 0x69, 0xbd, 0xfd, 0x1a, 0x47, 0x65, 0xd0, 0x8b, 0x90, 0xae, 0x92, 0xdb, 0xa1, 0x4a, + 0x4a, 0xaa, 0x1c, 0x55, 0xf6, 0xd2, 0xeb, 0xed, 0x25, 0x1c, 0x95, 0x43, 0xd7, 0x44, 0x34, 0x88, + 0x2a, 0xcd, 0x32, 0x9a, 0xa4, 0xf9, 0xcc, 0xa0, 0xf3, 0xa9, 0x22, 0x2f, 0x4a, 0x44, 0x24, 0x72, + 0x24, 0x04, 0x0e, 0xb0, 0xf4, 0x1f, 0x35, 0x38, 0xbd, 0x67, 0xee, 0xa3, 0xb7, 0x00, 0xb9, 0x5b, + 0x8c, 0x7a, 0x0d, 0x5a, 0xba, 0xe0, 0x5f, 0xfa, 0xc5, 0xfd, 0x44, 0x70, 0x9c, 0xf2, 0x5b, 0xe2, + 0x95, 0xae, 0x55, 0xdc, 0x43, 0x03, 0x59, 0x70, 0x44, 0x24, 0x83, 0x4f, 0xa8, 0xad, 0xae, 0x42, + 0xfb, 0xcb, 0xb4, 0x85, 0x56, 0x33, 0x77, 0x64, 0x2d, 0x0a, 0x82, 0xe3, 0x98, 0x68, 0x15, 0xe6, + 0x54, 0xad, 0xef, 0x20, 0xf8, 0x84, 0x62, 0x60, 0xae, 0x18, 0x5f, 0xc6, 0x9d, 0xf2, 0x02, 0xa2, + 0x44, 0x99, 0xed, 0xd1, 0x52, 0x08, 0xa1, 0xc5, 0x21, 0xde, 0x88, 0x2f, 0xe3, 0x4e, 0x79, 0x54, + 0x81, 0x59, 0x85, 0xaa, 0xf8, 0xce, 0x8c, 0x4b, 0x97, 0x3d, 0x3f, 0xa4, 0xcb, 0xfc, 0xa2, 0x1b, + 0xc6, 0x60, 0x31, 0x86, 0x85, 0x3b, 0xb0, 0x11, 0x07, 0xb0, 0x82, 0x12, 0xc7, 0x32, 0x13, 0xd2, + 0xd2, 0xeb, 0x23, 0xe6, 0x60, 0x58, 0x2b, 0xdb, 0xed, 0x2b, 0x7c, 0xc5, 0x70, 0xc4, 0x8e, 0xfe, + 0xbd, 0x06, 0xd0, 0x8e, 0x30, 0xb4, 0x12, 0x2b, 0xf2, 0x4b, 0x1d, 0x45, 0x7e, 0x3e, 0x7a, 0x39, + 0x8d, 0x14, 0xf4, 0xeb, 0x30, 0xe1, 0xca, 0xcc, 0x53, 0xc1, 0x90, 0x1f, 0xb4, 0xed, 0xb0, 0x97, + 0x86, 0x68, 0x05, 0x10, 0xa5, 0x53, 0xe5, 0xaf, 0x42, 0x43, 0x97, 0x41, 0xab, 0xb9, 0xa5, 0xa0, + 0xf9, 0x9d, 0x1b, 0x84, 0x7a, 0xd5, 0x2d, 0xb1, 0x18, 0xe6, 0x94, 0xd8, 0xbb, 0x78, 0x8b, 0x25, + 0x0e, 0xfa, 0x00, 0xa6, 0x82, 0xeb, 0x86, 0xba, 0x9b, 0xac, 0x0c, 0xc2, 0xec, 0x35, 0x03, 0x17, + 0x66, 0x44, 0x05, 0x0d, 0x56, 0x70, 0x88, 0x89, 0x3e, 0x4b, 0xc0, 0x82, 0xd5, 0x39, 0xd3, 0x65, + 0x26, 0x87, 0x6b, 0xdd, 0x7b, 0x8e, 0xdd, 0x85, 0xff, 0xb5, 0x9a, 0xb9, 0x85, 0x2e, 0x11, 0xdc, + 0x6d, 0x4e, 0x1c, 0x92, 0xaa, 0x2b, 0xab, 0x6c, 0x38, 0x43, 0x1c, 0xb2, 0xd7, 0xec, 0xe1, 0x1f, + 0x32, 0x58, 0xc1, 0x21, 0xa6, 0xfe, 0x83, 0x06, 0x33, 0xb1, 0xbb, 0xf0, 0xdf, 0x11, 0x33, 0x7e, + 0x6a, 0x1d, 0x6c, 0xcc, 0xf8, 0x98, 0x07, 0x1f, 0x33, 0x3e, 0xee, 0xa1, 0xc6, 0x8c, 0x6f, 0xf2, + 0x30, 0x63, 0x26, 0x72, 0xc8, 0x1e, 0x31, 0xf3, 0x20, 0x05, 0xa8, 0x3b, 0xe7, 0x91, 0x05, 0x13, + 0xfe, 0xd0, 0x75, 0x10, 0xbd, 0x3e, 0xbc, 0x7f, 0xa9, 0xb6, 0xae, 0xa0, 0x3b, 0x46, 0xb5, 0xe4, + 0x50, 0xa3, 0x1a, 0x3d, 0x88, 0x91, 0x36, 0xbc, 0x0c, 0xf4, 0x1d, 0x6b, 0x6f, 0xc2, 0x14, 0x0b, + 0x66, 0x41, 0x6d, 0xf4, 0x59, 0x50, 0xb2, 0x1e, 0x4e, 0x81, 0x21, 0x24, 0x2a, 0xc1, 0x0c, 0x89, + 0x8e, 0x63, 0xe3, 0x23, 0x1d, 0x63, 0x5e, 0xcc, 0x7e, 0xb1, 0x39, 0x2c, 0x86, 0xaa, 0xff, 0xda, + 0xe9, 0x5b, 0xbf, 0x2a, 0xfc, 0x63, 0x7d, 0x7b, 0x78, 0x53, 0xf1, 0x7f, 0xc2, 0xbd, 0xdf, 0x24, + 0x61, 0xbe, 0xb3, 0xb1, 0x8e, 0xf4, 0xf9, 0xe3, 0x4e, 0xcf, 0x6f, 0x38, 0xc9, 0x91, 0x36, 0x1d, + 0xce, 0x6a, 0x43, 0x7e, 0x9d, 0x8d, 0x7a, 0x22, 0x75, 0xe0, 0x9e, 0xd0, 0xbf, 0x8d, 0x73, 0x34, + 0xfa, 0x27, 0xa2, 0x3e, 0x1f, 0x54, 0x93, 0x87, 0xf4, 0x41, 0xf5, 0x09, 0xd3, 0xf4, 0x5d, 0x12, + 0x8e, 0x3d, 0xfd, 0x4f, 0x61, 0xf8, 0xaf, 0x8f, 0x3f, 0x75, 0xf3, 0xf5, 0xf4, 0x9f, 0x81, 0x61, + 0x02, 0xb9, 0x70, 0xf6, 0xee, 0xc3, 0xec, 0xd8, 0xbd, 0x87, 0xd9, 0xb1, 0xfb, 0x0f, 0xb3, 0x63, + 0x9f, 0xb6, 0xb2, 0x89, 0xbb, 0xad, 0x6c, 0xe2, 0x5e, 0x2b, 0x9b, 0xb8, 0xdf, 0xca, 0x26, 0x7e, + 0x6b, 0x65, 0x13, 0x5f, 0xfc, 0x9e, 0x1d, 0x7b, 0x6f, 0x52, 0xb5, 0x9e, 0xbf, 0x02, 0x00, 0x00, + 0xff, 0xff, 0x4d, 0x4c, 0xa8, 0x42, 0x87, 0x1c, 0x00, 0x00, +} + +func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x22 + if m.TargetAverageValue != nil { + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.TargetAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ContainerResourceMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x22 + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.CurrentAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { @@ -1081,6 +1242,18 @@ func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ContainerResource != nil { + { + size, err := m.ContainerResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } if m.External != nil { { size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) @@ -1157,6 +1330,18 @@ func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ContainerResource != nil { + { + size, err := m.ContainerResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } if m.External != nil { { size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) @@ -1556,6 +1741,44 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *ContainerResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.TargetAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.TargetAverageUtilization)) + } + if m.TargetAverageValue != nil { + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.CurrentAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.CurrentAverageUtilization)) + } + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *CrossVersionObjectReference) Size() (n int) { if m == nil { return 0 @@ -1741,6 +1964,10 @@ func (m *MetricSpec) Size() (n int) { l = m.External.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.ContainerResource != nil { + l = m.ContainerResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -1768,6 +1995,10 @@ func (m *MetricStatus) Size() (n int) { l = m.External.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.ContainerResource != nil { + l = m.ContainerResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -1891,6 +2122,32 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *ContainerResourceMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResourceMetricSource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerResourceMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} func (this *CrossVersionObjectReference) String() string { if this == nil { return "nil" @@ -2024,6 +2281,7 @@ func (this *MetricSpec) String() string { `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `ContainerResource:` + strings.Replace(this.ContainerResource.String(), "ContainerResourceMetricSource", "ContainerResourceMetricSource", 1) + `,`, `}`, }, "") return s @@ -2038,6 +2296,7 @@ func (this *MetricStatus) String() string { `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `ContainerResource:` + strings.Replace(this.ContainerResource.String(), "ContainerResourceMetricStatus", "ContainerResourceMetricStatus", 1) + `,`, `}`, }, "") return s @@ -2126,7 +2385,7 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { +func (m *ContainerResourceMetricSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2149,15 +2408,15 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerResourceMetricSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2185,13 +2444,13 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(dAtA[iNdEx:postIndex]) + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageUtilization", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2201,29 +2460,372 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.TargetAverageUtilization = &v case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) } - var stringLen uint64 + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetAverageValue == nil { + m.TargetAverageValue = &resource.Quantity{} + } + if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerResourceMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResourceMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CurrentAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3723,6 +4325,42 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerResource == nil { + m.ContainerResource = &ContainerResourceMetricSource{} + } + if err := m.ContainerResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3952,6 +4590,42 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerResource == nil { + m.ContainerResource = &ContainerResourceMetricStatus{} + } + if err := m.ContainerResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto index f90f93f9f..5ad55fa72 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.autoscaling.v2beta1; @@ -30,6 +30,60 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v2beta1"; +// ContainerResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +message ContainerResourceMetricSource { + // name is the name of the resource in question. + optional string name = 1; + + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + optional int32 targetAverageUtilization = 2; + + // targetAverageValue is the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; + + // container is the name of the container in the pods of the scaling target + optional string container = 4; +} + +// ContainerResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing a single container in each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +message ContainerResourceMetricStatus { + // name is the name of the resource in question. + optional string name = 1; + + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + optional int32 currentAverageUtilization = 2; + + // currentAverageValue is the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; + + // container is the name of the container in the pods of the scaling target + optional string container = 4; +} + // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" @@ -200,8 +254,10 @@ message HorizontalPodAutoscalerStatus { // MetricSpec specifies how to scale based on a single metric // (only `type` and one other matching field should be set at once). message MetricSpec { - // type is the type of metric source. It should be one of "Object", - // "Pods" or "Resource", each mapping to a matching field in the object. + // type is the type of metric source. It should be one of "ContainerResource", + // "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -223,6 +279,15 @@ message MetricSpec { // +optional optional ResourceMetricSource resource = 4; + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in + // each pod of the current scale target (e.g. CPU or memory). Such metrics are + // built in to Kubernetes, and have special scaling options on top of those + // available to normal per-pod metrics using the "pods" source. + // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. + // +optional + optional ContainerResourceMetricSource containerResource = 7; + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -234,8 +299,10 @@ message MetricSpec { // MetricStatus describes the last-read state of a single metric. message MetricStatus { - // type is the type of metric source. It will be one of "Object", - // "Pods" or "Resource", each corresponds to a matching field in the object. + // type is the type of metric source. It will be one of "ContainerResource", + // "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -257,6 +324,14 @@ message MetricStatus { // +optional optional ResourceMetricStatus resource = 4; + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ContainerResourceMetricStatus containerResource = 7; + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types.go b/vendor/k8s.io/api/autoscaling/v2beta1/types.go index d76e87967..05023d9bc 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types.go @@ -76,6 +76,12 @@ const ( // Kubernetes, and have special scaling options on top of those available // to normal per-pod metrics (the "pods" source). ResourceMetricSourceType MetricSourceType = "Resource" + // ContainerResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing a single container in each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ContainerResourceMetricSourceType MetricSourceType = "ContainerResource" // ExternalMetricSourceType is a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -87,8 +93,10 @@ const ( // MetricSpec specifies how to scale based on a single metric // (only `type` and one other matching field should be set at once). type MetricSpec struct { - // type is the type of metric source. It should be one of "Object", - // "Pods" or "Resource", each mapping to a matching field in the object. + // type is the type of metric source. It should be one of "ContainerResource", + // "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -107,6 +115,14 @@ type MetricSpec struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in + // each pod of the current scale target (e.g. CPU or memory). Such metrics are + // built in to Kubernetes, and have special scaling options on top of those + // available to normal per-pod metrics using the "pods" source. + // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. + // +optional + ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -178,6 +194,30 @@ type ResourceMetricSource struct { TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` } +// ContainerResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ContainerResourceMetricSource struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` + // targetAverageValue is the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` + // container is the name of the container in the pods of the scaling target + Container string `json:"container" protobuf:"bytes,4,opt,name=container"` +} + // ExternalMetricSource indicates how to scale on a metric not associated with // any Kubernetes object (for example length of queue in cloud // messaging service, or QPS from loadbalancer running outside of cluster). @@ -265,8 +305,10 @@ type HorizontalPodAutoscalerCondition struct { // MetricStatus describes the last-read state of a single metric. type MetricStatus struct { - // type is the type of metric source. It will be one of "Object", - // "Pods" or "Resource", each corresponds to a matching field in the object. + // type is the type of metric source. It will be one of "ContainerResource", + // "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -285,6 +327,13 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + ContainerResource *ContainerResourceMetricStatus `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -354,6 +403,30 @@ type ResourceMetricStatus struct { CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` } +// ContainerResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing a single container in each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ContainerResourceMetricStatus struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` + // currentAverageValue is the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` + // container is the name of the container in the pods of the scaling target + Container string `json:"container" protobuf:"bytes,4,opt,name=container"` +} + // ExternalMetricStatus indicates the current value of a global metric // not associated with any Kubernetes object. type ExternalMetricStatus struct { diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go index a0d5f5337..08a54621f 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go @@ -27,6 +27,30 @@ package v2beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ContainerResourceMetricSource = map[string]string{ + "": "ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", + "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", + "targetAverageValue": "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", + "container": "container is the name of the container in the pods of the scaling target", +} + +func (ContainerResourceMetricSource) SwaggerDoc() map[string]string { + return map_ContainerResourceMetricSource +} + +var map_ContainerResourceMetricStatus = map[string]string{ + "": "ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "name is the name of the resource in question.", + "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", + "currentAverageValue": "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", + "container": "container is the name of the container in the pods of the scaling target", +} + +func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { + return map_ContainerResourceMetricStatus +} + var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", @@ -123,12 +147,13 @@ func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { } var map_MetricSpec = map[string]string{ - "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", - "type": "type is the type of metric source. It should be one of \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", - "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", - "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", + "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } func (MetricSpec) SwaggerDoc() map[string]string { @@ -136,12 +161,13 @@ func (MetricSpec) SwaggerDoc() map[string]string { } var map_MetricStatus = map[string]string{ - "": "MetricStatus describes the last-read state of a single metric.", - "type": "type is the type of metric source. It will be one of \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", - "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", - "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "": "MetricStatus describes the last-read state of a single metric.", + "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } func (MetricStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go index c51e05b8f..f10c7884d 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go @@ -25,6 +25,54 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResourceMetricSource) DeepCopyInto(out *ContainerResourceMetricSource) { + *out = *in + if in.TargetAverageUtilization != nil { + in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization + *out = new(int32) + **out = **in + } + if in.TargetAverageValue != nil { + in, out := &in.TargetAverageValue, &out.TargetAverageValue + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricSource. +func (in *ContainerResourceMetricSource) DeepCopy() *ContainerResourceMetricSource { + if in == nil { + return nil + } + out := new(ContainerResourceMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResourceMetricStatus) DeepCopyInto(out *ContainerResourceMetricStatus) { + *out = *in + if in.CurrentAverageUtilization != nil { + in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization + *out = new(int32) + **out = **in + } + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricStatus. +func (in *ContainerResourceMetricStatus) DeepCopy() *ContainerResourceMetricStatus { + if in == nil { + return nil + } + out := new(ContainerResourceMetricStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { *out = *in @@ -263,6 +311,11 @@ func (in *MetricSpec) DeepCopyInto(out *MetricSpec) { *out = new(ResourceMetricSource) (*in).DeepCopyInto(*out) } + if in.ContainerResource != nil { + in, out := &in.ContainerResource, &out.ContainerResource + *out = new(ContainerResourceMetricSource) + (*in).DeepCopyInto(*out) + } if in.External != nil { in, out := &in.External, &out.External *out = new(ExternalMetricSource) @@ -299,6 +352,11 @@ func (in *MetricStatus) DeepCopyInto(out *MetricStatus) { *out = new(ResourceMetricStatus) (*in).DeepCopyInto(*out) } + if in.ContainerResource != nil { + in, out := &in.ContainerResource, &out.ContainerResource + *out = new(ContainerResourceMetricStatus) + (*in).DeepCopyInto(*out) + } if in.External != nil { in, out := &in.External, &out.External *out = new(ExternalMetricStatus) diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go index c69d6cb9e..43e06f9eb 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go @@ -47,10 +47,66 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} } +func (*ContainerResourceMetricSource) ProtoMessage() {} +func (*ContainerResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{0} +} +func (m *ContainerResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResourceMetricSource.Merge(m, src) +} +func (m *ContainerResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ContainerResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResourceMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResourceMetricSource proto.InternalMessageInfo + +func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} } +func (*ContainerResourceMetricStatus) ProtoMessage() {} +func (*ContainerResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{1} +} +func (m *ContainerResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResourceMetricStatus.Merge(m, src) +} +func (m *ContainerResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ContainerResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResourceMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResourceMetricStatus proto.InternalMessageInfo + func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{0} + return fileDescriptor_592ad94d7d6be24f, []int{2} } func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -78,7 +134,7 @@ var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } func (*ExternalMetricSource) ProtoMessage() {} func (*ExternalMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{1} + return fileDescriptor_592ad94d7d6be24f, []int{3} } func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -106,7 +162,7 @@ var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } func (*ExternalMetricStatus) ProtoMessage() {} func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{2} + return fileDescriptor_592ad94d7d6be24f, []int{4} } func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,7 +190,7 @@ var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo func (m *HPAScalingPolicy) Reset() { *m = HPAScalingPolicy{} } func (*HPAScalingPolicy) ProtoMessage() {} func (*HPAScalingPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{3} + return fileDescriptor_592ad94d7d6be24f, []int{5} } func (m *HPAScalingPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -162,7 +218,7 @@ var xxx_messageInfo_HPAScalingPolicy proto.InternalMessageInfo func (m *HPAScalingRules) Reset() { *m = HPAScalingRules{} } func (*HPAScalingRules) ProtoMessage() {} func (*HPAScalingRules) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{4} + return fileDescriptor_592ad94d7d6be24f, []int{6} } func (m *HPAScalingRules) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -190,7 +246,7 @@ var xxx_messageInfo_HPAScalingRules proto.InternalMessageInfo func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } func (*HorizontalPodAutoscaler) ProtoMessage() {} func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{5} + return fileDescriptor_592ad94d7d6be24f, []int{7} } func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -218,7 +274,7 @@ var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerBehavior) Reset() { *m = HorizontalPodAutoscalerBehavior{} } func (*HorizontalPodAutoscalerBehavior) ProtoMessage() {} func (*HorizontalPodAutoscalerBehavior) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{6} + return fileDescriptor_592ad94d7d6be24f, []int{8} } func (m *HorizontalPodAutoscalerBehavior) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -246,7 +302,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerBehavior proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{7} + return fileDescriptor_592ad94d7d6be24f, []int{9} } func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -274,7 +330,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{8} + return fileDescriptor_592ad94d7d6be24f, []int{10} } func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -302,7 +358,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{9} + return fileDescriptor_592ad94d7d6be24f, []int{11} } func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -330,7 +386,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{10} + return fileDescriptor_592ad94d7d6be24f, []int{12} } func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -358,7 +414,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo func (m *MetricIdentifier) Reset() { *m = MetricIdentifier{} } func (*MetricIdentifier) ProtoMessage() {} func (*MetricIdentifier) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{11} + return fileDescriptor_592ad94d7d6be24f, []int{13} } func (m *MetricIdentifier) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -386,7 +442,7 @@ var xxx_messageInfo_MetricIdentifier proto.InternalMessageInfo func (m *MetricSpec) Reset() { *m = MetricSpec{} } func (*MetricSpec) ProtoMessage() {} func (*MetricSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{12} + return fileDescriptor_592ad94d7d6be24f, []int{14} } func (m *MetricSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -414,7 +470,7 @@ var xxx_messageInfo_MetricSpec proto.InternalMessageInfo func (m *MetricStatus) Reset() { *m = MetricStatus{} } func (*MetricStatus) ProtoMessage() {} func (*MetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{13} + return fileDescriptor_592ad94d7d6be24f, []int{15} } func (m *MetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -442,7 +498,7 @@ var xxx_messageInfo_MetricStatus proto.InternalMessageInfo func (m *MetricTarget) Reset() { *m = MetricTarget{} } func (*MetricTarget) ProtoMessage() {} func (*MetricTarget) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{14} + return fileDescriptor_592ad94d7d6be24f, []int{16} } func (m *MetricTarget) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -470,7 +526,7 @@ var xxx_messageInfo_MetricTarget proto.InternalMessageInfo func (m *MetricValueStatus) Reset() { *m = MetricValueStatus{} } func (*MetricValueStatus) ProtoMessage() {} func (*MetricValueStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{15} + return fileDescriptor_592ad94d7d6be24f, []int{17} } func (m *MetricValueStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -498,7 +554,7 @@ var xxx_messageInfo_MetricValueStatus proto.InternalMessageInfo func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } func (*ObjectMetricSource) ProtoMessage() {} func (*ObjectMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{16} + return fileDescriptor_592ad94d7d6be24f, []int{18} } func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -526,7 +582,7 @@ var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } func (*ObjectMetricStatus) ProtoMessage() {} func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{17} + return fileDescriptor_592ad94d7d6be24f, []int{19} } func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -554,7 +610,7 @@ var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } func (*PodsMetricSource) ProtoMessage() {} func (*PodsMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{18} + return fileDescriptor_592ad94d7d6be24f, []int{20} } func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -582,7 +638,7 @@ var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } func (*PodsMetricStatus) ProtoMessage() {} func (*PodsMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{19} + return fileDescriptor_592ad94d7d6be24f, []int{21} } func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -610,7 +666,7 @@ var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } func (*ResourceMetricSource) ProtoMessage() {} func (*ResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{20} + return fileDescriptor_592ad94d7d6be24f, []int{22} } func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -638,7 +694,7 @@ var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } func (*ResourceMetricStatus) ProtoMessage() {} func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{21} + return fileDescriptor_592ad94d7d6be24f, []int{23} } func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -664,6 +720,8 @@ func (m *ResourceMetricStatus) XXX_DiscardUnknown() { var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo func init() { + proto.RegisterType((*ContainerResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.ContainerResourceMetricSource") + proto.RegisterType((*ContainerResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.ContainerResourceMetricStatus") proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2beta2.CrossVersionObjectReference") proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.ExternalMetricSource") proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.ExternalMetricStatus") @@ -693,111 +751,202 @@ func init() { } var fileDescriptor_592ad94d7d6be24f = []byte{ - // 1657 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xdb, 0x6f, 0x1b, 0x45, - 0x17, 0xcf, 0xda, 0xce, 0x6d, 0x9c, 0x5b, 0xa7, 0xfd, 0x5a, 0x2b, 0xd5, 0x67, 0x47, 0xfb, 0x55, - 0x1f, 0x50, 0xd1, 0x35, 0x31, 0x01, 0x2a, 0x55, 0x08, 0xe2, 0x14, 0xda, 0xaa, 0x49, 0x1b, 0xc6, - 0x69, 0x40, 0x28, 0xad, 0x18, 0xef, 0x4e, 0x9c, 0x21, 0xf6, 0xae, 0xb5, 0xb3, 0x76, 0x9b, 0x22, - 0x21, 0x5e, 0x78, 0x47, 0x20, 0x5e, 0xf9, 0x03, 0x10, 0x42, 0xe2, 0x05, 0x89, 0x47, 0x2e, 0xaa, - 0x2a, 0x84, 0x50, 0xdf, 0x28, 0x2f, 0x16, 0x35, 0xff, 0x45, 0x9e, 0xd0, 0x5c, 0x76, 0xbd, 0xbb, - 0x76, 0x62, 0x27, 0x4a, 0x8a, 0xfa, 0xb6, 0x33, 0xe7, 0x9c, 0xdf, 0x99, 0x39, 0xf7, 0x59, 0x70, - 0x65, 0xfb, 0x22, 0x33, 0xa8, 0x93, 0xdf, 0x6e, 0x94, 0x89, 0x6b, 0x13, 0x8f, 0xb0, 0x7c, 0x93, - 0xd8, 0x96, 0xe3, 0xe6, 0x15, 0x01, 0xd7, 0x69, 0x1e, 0x37, 0x3c, 0x87, 0x99, 0xb8, 0x4a, 0xed, - 0x4a, 0xbe, 0x59, 0x28, 0x13, 0x0f, 0x17, 0xf2, 0x15, 0x62, 0x13, 0x17, 0x7b, 0xc4, 0x32, 0xea, - 0xae, 0xe3, 0x39, 0x30, 0x2b, 0xf9, 0x0d, 0x5c, 0xa7, 0x46, 0x88, 0xdf, 0x50, 0xfc, 0xb3, 0x17, - 0x2a, 0xd4, 0xdb, 0x6a, 0x94, 0x0d, 0xd3, 0xa9, 0xe5, 0x2b, 0x4e, 0xc5, 0xc9, 0x0b, 0xb1, 0x72, - 0x63, 0x53, 0xac, 0xc4, 0x42, 0x7c, 0x49, 0xb8, 0x59, 0x3d, 0xa4, 0xde, 0x74, 0x5c, 0x92, 0x6f, - 0xce, 0xc7, 0x55, 0xce, 0x2e, 0x74, 0x78, 0x6a, 0xd8, 0xdc, 0xa2, 0x36, 0x71, 0x77, 0xf2, 0xf5, - 0xed, 0x8a, 0x10, 0x72, 0x09, 0x73, 0x1a, 0xae, 0x49, 0x0e, 0x24, 0xc5, 0xf2, 0x35, 0xe2, 0xe1, - 0x5e, 0xba, 0xf2, 0x7b, 0x49, 0xb9, 0x0d, 0xdb, 0xa3, 0xb5, 0x6e, 0x35, 0xaf, 0xf6, 0x13, 0x60, - 0xe6, 0x16, 0xa9, 0xe1, 0xb8, 0x9c, 0xfe, 0xa5, 0x06, 0xce, 0x2e, 0xb9, 0x0e, 0x63, 0xeb, 0xc4, - 0x65, 0xd4, 0xb1, 0x6f, 0x96, 0x3f, 0x24, 0xa6, 0x87, 0xc8, 0x26, 0x71, 0x89, 0x6d, 0x12, 0x38, - 0x07, 0x52, 0xdb, 0xd4, 0xb6, 0x32, 0xda, 0x9c, 0xf6, 0xfc, 0x78, 0x71, 0xe2, 0x61, 0x2b, 0x37, - 0xd4, 0x6e, 0xe5, 0x52, 0xd7, 0xa9, 0x6d, 0x21, 0x41, 0xe1, 0x1c, 0x36, 0xae, 0x91, 0x4c, 0x22, - 0xca, 0x71, 0x03, 0xd7, 0x08, 0x12, 0x14, 0x58, 0x00, 0x00, 0xd7, 0xa9, 0x52, 0x90, 0x49, 0x0a, - 0x3e, 0xa8, 0xf8, 0xc0, 0xe2, 0xea, 0x35, 0x45, 0x41, 0x21, 0x2e, 0xfd, 0x81, 0x06, 0x4e, 0xbd, - 0x75, 0xcf, 0x23, 0xae, 0x8d, 0xab, 0x2b, 0xc4, 0x73, 0xa9, 0x59, 0x12, 0xf6, 0x85, 0xef, 0x81, - 0x91, 0x9a, 0x58, 0x8b, 0x23, 0xa5, 0x0b, 0x2f, 0x19, 0xfb, 0x47, 0x82, 0x21, 0xa5, 0xaf, 0x59, - 0xc4, 0xf6, 0xe8, 0x26, 0x25, 0x6e, 0x71, 0x4a, 0xa9, 0x1e, 0x91, 0x14, 0xa4, 0xf0, 0xe0, 0x1a, - 0x18, 0xf1, 0xb0, 0x5b, 0x21, 0x9e, 0xb8, 0x4a, 0xba, 0xf0, 0xe2, 0x60, 0xc8, 0x6b, 0x42, 0xa6, - 0x83, 0x2a, 0xd7, 0x48, 0x61, 0xe9, 0xbf, 0x77, 0x5f, 0xc4, 0xc3, 0x5e, 0x83, 0x1d, 0xe3, 0x45, - 0x36, 0xc0, 0xa8, 0xd9, 0x70, 0x5d, 0x62, 0xfb, 0x37, 0x99, 0x1f, 0x0c, 0x7a, 0x1d, 0x57, 0x1b, - 0x44, 0x9e, 0xae, 0x38, 0xad, 0xb0, 0x47, 0x97, 0x24, 0x12, 0xf2, 0x21, 0xf5, 0x6f, 0x35, 0x30, - 0x73, 0x75, 0x75, 0xb1, 0x24, 0x21, 0x56, 0x9d, 0x2a, 0x35, 0x77, 0xe0, 0x45, 0x90, 0xf2, 0x76, - 0xea, 0x44, 0x85, 0xc9, 0x39, 0x3f, 0x08, 0xd6, 0x76, 0xea, 0x64, 0xb7, 0x95, 0x3b, 0x15, 0xe7, - 0xe7, 0xfb, 0x48, 0x48, 0xc0, 0xff, 0x81, 0xe1, 0x26, 0xd7, 0x2b, 0x8e, 0x3a, 0x5c, 0x9c, 0x54, - 0xa2, 0xc3, 0xe2, 0x30, 0x48, 0xd2, 0xe0, 0x25, 0x30, 0x59, 0x27, 0x2e, 0x75, 0xac, 0x12, 0x31, - 0x1d, 0xdb, 0x62, 0x22, 0x88, 0x86, 0x8b, 0xff, 0x51, 0xcc, 0x93, 0xab, 0x61, 0x22, 0x8a, 0xf2, - 0xea, 0x5f, 0x25, 0xc0, 0x74, 0xe7, 0x00, 0xa8, 0x51, 0x25, 0x0c, 0xde, 0x01, 0xb3, 0xcc, 0xc3, - 0x65, 0x5a, 0xa5, 0xf7, 0xb1, 0x47, 0x1d, 0xfb, 0x5d, 0x6a, 0x5b, 0xce, 0xdd, 0x28, 0x7a, 0xb6, - 0xdd, 0xca, 0xcd, 0x96, 0xf6, 0xe4, 0x42, 0xfb, 0x20, 0xc0, 0xeb, 0x60, 0x82, 0x91, 0x2a, 0x31, - 0x3d, 0x79, 0x5f, 0x65, 0x97, 0xe7, 0xda, 0xad, 0xdc, 0x44, 0x29, 0xb4, 0xbf, 0xdb, 0xca, 0x9d, - 0x8c, 0x18, 0x46, 0x12, 0x51, 0x44, 0x18, 0xde, 0x01, 0x63, 0x75, 0xfe, 0x45, 0x09, 0xcb, 0x24, - 0xe6, 0x92, 0x83, 0xc4, 0x4a, 0xdc, 0xe0, 0xc5, 0x19, 0x65, 0xaa, 0xb1, 0x55, 0x85, 0x84, 0x02, - 0x4c, 0xfd, 0xc7, 0x04, 0x38, 0x73, 0xd5, 0x71, 0xe9, 0x7d, 0xc7, 0xf6, 0x70, 0x75, 0xd5, 0xb1, - 0x16, 0x15, 0x22, 0x71, 0xe1, 0x07, 0x60, 0x8c, 0xd7, 0x28, 0x0b, 0x7b, 0xb8, 0x47, 0x9c, 0x06, - 0xa5, 0xc6, 0xa8, 0x6f, 0x57, 0xf8, 0x06, 0x33, 0x38, 0xb7, 0xd1, 0x9c, 0x37, 0x64, 0x21, 0x59, - 0x21, 0x1e, 0xee, 0xe4, 0x7a, 0x67, 0x0f, 0x05, 0xa8, 0xf0, 0x36, 0x48, 0xb1, 0x3a, 0x31, 0x55, - 0xa8, 0x5e, 0xea, 0x7b, 0xb3, 0xde, 0x07, 0x2d, 0xd5, 0x89, 0xd9, 0x29, 0x3e, 0x7c, 0x85, 0x04, - 0x2c, 0x24, 0x60, 0x84, 0x89, 0x90, 0x16, 0x5e, 0x4d, 0x17, 0x5e, 0x3f, 0xac, 0x02, 0x99, 0x17, - 0x41, 0xce, 0xc9, 0x35, 0x52, 0xe0, 0xfa, 0x1f, 0x1a, 0xc8, 0xed, 0x21, 0x59, 0x24, 0x5b, 0xb8, - 0x49, 0x1d, 0x17, 0xae, 0x83, 0x51, 0xb1, 0x73, 0xab, 0xae, 0x4c, 0x99, 0x1f, 0xdc, 0x8d, 0x22, - 0x6c, 0x8b, 0x69, 0x9e, 0x91, 0x25, 0x89, 0x81, 0x7c, 0x30, 0xb8, 0x01, 0xc6, 0xc5, 0xe7, 0x65, - 0xe7, 0xae, 0xad, 0xcc, 0x78, 0x60, 0xe4, 0xc9, 0x76, 0x2b, 0x37, 0x5e, 0xf2, 0x51, 0x50, 0x07, - 0x50, 0xff, 0x34, 0x09, 0xe6, 0xf6, 0xb8, 0xd9, 0x92, 0x63, 0x5b, 0x94, 0x07, 0x3f, 0xbc, 0x1a, - 0xc9, 0xff, 0x85, 0x58, 0xfe, 0x9f, 0xeb, 0x27, 0x1f, 0xaa, 0x07, 0xcb, 0x81, 0xbf, 0x12, 0x11, - 0x2c, 0x65, 0xf0, 0xdd, 0x56, 0xae, 0x47, 0xaf, 0x36, 0x02, 0xa4, 0xa8, 0x5b, 0x60, 0x13, 0xc0, - 0x2a, 0x66, 0xde, 0x9a, 0x8b, 0x6d, 0x26, 0x35, 0xd1, 0x1a, 0x51, 0x91, 0x70, 0x7e, 0xb0, 0x40, - 0xe6, 0x12, 0xc5, 0x59, 0x75, 0x0a, 0xb8, 0xdc, 0x85, 0x86, 0x7a, 0x68, 0x80, 0xff, 0x07, 0x23, - 0x2e, 0xc1, 0xcc, 0xb1, 0x33, 0x29, 0x71, 0x8b, 0x20, 0x6c, 0x90, 0xd8, 0x45, 0x8a, 0x0a, 0x5f, - 0x00, 0xa3, 0x35, 0xc2, 0x18, 0xae, 0x90, 0xcc, 0xb0, 0x60, 0x0c, 0xea, 0xee, 0x8a, 0xdc, 0x46, - 0x3e, 0x5d, 0xff, 0x53, 0x03, 0x67, 0xf7, 0xb0, 0xe3, 0x32, 0x65, 0x1e, 0xdc, 0xe8, 0xca, 0x54, - 0x63, 0xb0, 0x0b, 0x72, 0x69, 0x91, 0xa7, 0x41, 0x8d, 0xf0, 0x77, 0x42, 0x59, 0xba, 0x01, 0x86, - 0xa9, 0x47, 0x6a, 0x7e, 0x01, 0x7a, 0xed, 0x90, 0x59, 0xd4, 0xa9, 0xef, 0xd7, 0x38, 0x1a, 0x92, - 0xa0, 0xfa, 0x83, 0xe4, 0x9e, 0x77, 0xe3, 0xa9, 0x0c, 0x3f, 0x02, 0x53, 0x62, 0xa5, 0x7a, 0x2b, - 0xd9, 0x54, 0x37, 0xec, 0x5b, 0x2d, 0xf6, 0x19, 0x6d, 0x8a, 0xa7, 0xd5, 0x51, 0xa6, 0x4a, 0x11, - 0x68, 0x14, 0x53, 0x05, 0xe7, 0x41, 0xba, 0x46, 0x6d, 0x44, 0xea, 0x55, 0x6a, 0x62, 0xa6, 0xfa, - 0xd4, 0x74, 0xbb, 0x95, 0x4b, 0xaf, 0x74, 0xb6, 0x51, 0x98, 0x07, 0xbe, 0x02, 0xd2, 0x35, 0x7c, - 0x2f, 0x10, 0x91, 0xfd, 0xe4, 0xa4, 0xd2, 0x97, 0x5e, 0xe9, 0x90, 0x50, 0x98, 0x0f, 0xde, 0xe2, - 0xd1, 0xc0, 0x3b, 0x31, 0xcb, 0xa4, 0x84, 0x99, 0xcf, 0x0f, 0xd6, 0xb8, 0x45, 0xf1, 0x0b, 0x45, - 0x8e, 0x80, 0x40, 0x3e, 0x16, 0xa4, 0x60, 0xac, 0xac, 0x6a, 0x90, 0x88, 0xb2, 0x74, 0xe1, 0x8d, - 0xc3, 0xba, 0x4f, 0xc1, 0x14, 0x27, 0x78, 0x98, 0xf8, 0x2b, 0x14, 0xc0, 0xeb, 0xdf, 0xa7, 0xc0, - 0x7f, 0xf7, 0x2d, 0xa0, 0xf0, 0x6d, 0x00, 0x9d, 0x32, 0x23, 0x6e, 0x93, 0x58, 0x57, 0xe4, 0x2c, - 0xca, 0x87, 0x42, 0xee, 0xce, 0x64, 0xf1, 0x34, 0xcf, 0xb0, 0x9b, 0x5d, 0x54, 0xd4, 0x43, 0x02, - 0x9a, 0x60, 0x92, 0xe7, 0x9d, 0xf4, 0x1d, 0x55, 0xf3, 0xe7, 0xc1, 0x92, 0xfa, 0x04, 0x1f, 0x1d, - 0x96, 0xc3, 0x20, 0x28, 0x8a, 0x09, 0x17, 0xc1, 0xb4, 0x1a, 0x7b, 0x62, 0xbe, 0x3c, 0xa3, 0x8c, - 0x3d, 0xbd, 0x14, 0x25, 0xa3, 0x38, 0x3f, 0x87, 0xb0, 0x08, 0xa3, 0x2e, 0xb1, 0x02, 0x88, 0x54, - 0x14, 0xe2, 0x72, 0x94, 0x8c, 0xe2, 0xfc, 0xb0, 0x0a, 0xa6, 0x14, 0xaa, 0x72, 0x6d, 0x66, 0x58, - 0x44, 0xc7, 0x80, 0x03, 0xaa, 0xea, 0x5c, 0x41, 0xb8, 0x2f, 0x45, 0xb0, 0x50, 0x0c, 0x1b, 0x7a, - 0x00, 0x98, 0x7e, 0x35, 0x65, 0x99, 0x11, 0xa1, 0xe9, 0xcd, 0x43, 0xc6, 0x4b, 0x50, 0x96, 0x3b, - 0x33, 0x40, 0xb0, 0xc5, 0x50, 0x48, 0x8f, 0xfe, 0x85, 0x06, 0x66, 0xe2, 0x03, 0x6e, 0xf0, 0xb4, - 0xd0, 0xf6, 0x7c, 0x5a, 0xdc, 0x06, 0x63, 0x72, 0x54, 0x72, 0x5c, 0x15, 0x00, 0x2f, 0x0f, 0x58, - 0xf4, 0x70, 0x99, 0x54, 0x4b, 0x4a, 0x54, 0x86, 0xb3, 0xbf, 0x42, 0x01, 0xa4, 0xfe, 0x75, 0x12, - 0x80, 0x4e, 0x8a, 0xc1, 0x85, 0x48, 0x97, 0x9b, 0x8b, 0x75, 0xb9, 0x99, 0xf0, 0x3b, 0x25, 0xd4, - 0xd1, 0xd6, 0xc1, 0x88, 0x23, 0x4a, 0x8f, 0x3a, 0x61, 0xa1, 0x9f, 0x31, 0x83, 0x31, 0x29, 0x40, - 0x2b, 0x02, 0xde, 0x3b, 0x54, 0x01, 0x53, 0x68, 0xf0, 0x06, 0x48, 0xd5, 0x1d, 0xcb, 0x9f, 0x6b, - 0xfa, 0x8e, 0x84, 0xab, 0x8e, 0xc5, 0x22, 0x98, 0x63, 0xfc, 0xec, 0x7c, 0x17, 0x09, 0x1c, 0x3e, - 0x66, 0xfa, 0xaf, 0x58, 0x11, 0xa2, 0xe9, 0xc2, 0x42, 0x3f, 0x4c, 0xa4, 0xf8, 0x23, 0xb8, 0xc2, - 0x98, 0x3e, 0x05, 0x05, 0x98, 0x1c, 0x9f, 0xa8, 0x87, 0x90, 0x2a, 0x43, 0x7d, 0xf1, 0x7b, 0xbd, - 0x00, 0x25, 0xbe, 0x4f, 0x41, 0x01, 0xa6, 0xfe, 0x4d, 0x12, 0x4c, 0x44, 0x5e, 0x58, 0xff, 0x86, - 0xbb, 0x64, 0xae, 0x1d, 0xad, 0xbb, 0x24, 0xe6, 0xd1, 0xbb, 0x4b, 0xe2, 0x1e, 0x9f, 0xbb, 0x42, - 0xf8, 0x3d, 0xdc, 0xf5, 0x73, 0xc2, 0x77, 0x97, 0x6c, 0xb5, 0x83, 0xb9, 0x4b, 0xf2, 0x86, 0xdc, - 0x75, 0x33, 0xfc, 0x7e, 0xec, 0x33, 0xf3, 0x18, 0xfe, 0xe5, 0x8c, 0x77, 0x1a, 0xd8, 0xf6, 0xa8, - 0xb7, 0x53, 0x1c, 0xef, 0x7a, 0x6b, 0x5a, 0x60, 0x02, 0x37, 0x89, 0x8b, 0x2b, 0x44, 0x6c, 0x2b, - 0x7f, 0x1d, 0x14, 0x77, 0x86, 0x3f, 0xf5, 0x16, 0x43, 0x38, 0x28, 0x82, 0xca, 0xdb, 0xa0, 0x5a, - 0xdf, 0xf2, 0x82, 0x37, 0xa4, 0xea, 0x0c, 0xa2, 0x0d, 0x2e, 0x76, 0x51, 0x51, 0x0f, 0x09, 0xfd, - 0xf3, 0x04, 0x38, 0xd1, 0xf5, 0x7a, 0xef, 0x18, 0x45, 0x3b, 0x26, 0xa3, 0x24, 0x9e, 0xa2, 0x51, - 0x92, 0x07, 0x36, 0xca, 0x2f, 0x09, 0x00, 0xbb, 0x8b, 0x28, 0xfc, 0x58, 0xb4, 0x62, 0xd3, 0xa5, - 0x65, 0x62, 0x49, 0xf2, 0x51, 0x8c, 0x91, 0xe1, 0x3e, 0x1e, 0xc6, 0x46, 0x71, 0x65, 0xc7, 0xf3, - 0x83, 0x29, 0xf4, 0x1f, 0x29, 0x79, 0xb4, 0xff, 0x91, 0xf4, 0xdf, 0xe2, 0x66, 0x7c, 0xa6, 0x7f, - 0x5c, 0xf5, 0x72, 0x7f, 0xf2, 0x29, 0xba, 0x5f, 0xff, 0x49, 0x03, 0x33, 0xf1, 0x26, 0xfc, 0xcc, - 0xfd, 0xce, 0xfc, 0x35, 0x7a, 0x89, 0x67, 0xfb, 0x57, 0xe6, 0x77, 0x1a, 0x38, 0xd5, 0x6b, 0x84, - 0x81, 0x4b, 0x91, 0xc1, 0x33, 0x1f, 0x1e, 0x3c, 0x77, 0x5b, 0xb9, 0x5c, 0x8f, 0x1f, 0x10, 0x3e, - 0x4c, 0x68, 0x36, 0x3d, 0x1e, 0x07, 0xfc, 0xd0, 0x7d, 0x66, 0xe9, 0x84, 0x23, 0x39, 0xf3, 0xb1, - 0xda, 0xbb, 0x78, 0xe1, 0xe1, 0x93, 0xec, 0xd0, 0xa3, 0x27, 0xd9, 0xa1, 0xc7, 0x4f, 0xb2, 0x43, - 0x9f, 0xb4, 0xb3, 0xda, 0xc3, 0x76, 0x56, 0x7b, 0xd4, 0xce, 0x6a, 0x8f, 0xdb, 0x59, 0xed, 0xaf, - 0x76, 0x56, 0xfb, 0xec, 0xef, 0xec, 0xd0, 0xfb, 0xa3, 0x0a, 0xfa, 0x9f, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x79, 0xae, 0x08, 0x04, 0x2d, 0x1a, 0x00, 0x00, + // 1741 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcd, 0x6f, 0x1b, 0xc7, + 0x15, 0xd7, 0x92, 0xd4, 0xd7, 0x50, 0x9f, 0xe3, 0x2f, 0x42, 0x86, 0x49, 0x61, 0x6b, 0xb4, 0xae, + 0x51, 0x2f, 0x2b, 0x56, 0x6d, 0x0d, 0x18, 0x45, 0xab, 0x95, 0x5b, 0xdb, 0xb0, 0x64, 0xab, 0x43, + 0x59, 0x2d, 0x0a, 0xd9, 0xe8, 0x70, 0x77, 0x44, 0x4d, 0x45, 0xee, 0x12, 0xbb, 0x4b, 0xda, 0x72, + 0x81, 0xa2, 0x08, 0x90, 0x7b, 0x90, 0x20, 0xd7, 0xfc, 0x09, 0x09, 0x7c, 0x09, 0x90, 0x63, 0x3e, + 0x60, 0x18, 0x41, 0x10, 0xf8, 0x16, 0xe7, 0x42, 0xc4, 0xcc, 0x31, 0xc7, 0xdc, 0x7c, 0x0a, 0xe6, + 0x63, 0x3f, 0x49, 0x89, 0x94, 0x20, 0x29, 0xd0, 0x8d, 0x3b, 0xf3, 0xde, 0xef, 0xcd, 0x7b, 0xf3, + 0x7b, 0x6f, 0xde, 0x0c, 0xc1, 0xad, 0x9d, 0xeb, 0xae, 0x46, 0xed, 0xe2, 0x4e, 0xb3, 0x42, 0x1c, + 0x8b, 0x78, 0xc4, 0x2d, 0xb6, 0x88, 0x65, 0xda, 0x4e, 0x51, 0x4e, 0xe0, 0x06, 0x2d, 0xe2, 0xa6, + 0x67, 0xbb, 0x06, 0xae, 0x51, 0xab, 0x5a, 0x6c, 0x95, 0x2a, 0xc4, 0xc3, 0xa5, 0x62, 0x95, 0x58, + 0xc4, 0xc1, 0x1e, 0x31, 0xb5, 0x86, 0x63, 0x7b, 0x36, 0xcc, 0x0b, 0x79, 0x0d, 0x37, 0xa8, 0x16, + 0x91, 0xd7, 0xa4, 0xfc, 0xdc, 0xb5, 0x2a, 0xf5, 0xb6, 0x9b, 0x15, 0xcd, 0xb0, 0xeb, 0xc5, 0xaa, + 0x5d, 0xb5, 0x8b, 0x5c, 0xad, 0xd2, 0xdc, 0xe2, 0x5f, 0xfc, 0x83, 0xff, 0x12, 0x70, 0x73, 0x6a, + 0xc4, 0xbc, 0x61, 0x3b, 0xa4, 0xd8, 0x5a, 0x48, 0x9a, 0x9c, 0x5b, 0x0c, 0x65, 0xea, 0xd8, 0xd8, + 0xa6, 0x16, 0x71, 0x76, 0x8b, 0x8d, 0x9d, 0x2a, 0x57, 0x72, 0x88, 0x6b, 0x37, 0x1d, 0x83, 0x1c, + 0x48, 0xcb, 0x2d, 0xd6, 0x89, 0x87, 0x7b, 0xd9, 0x2a, 0xee, 0xa5, 0xe5, 0x34, 0x2d, 0x8f, 0xd6, + 0xbb, 0xcd, 0xfc, 0xa1, 0x9f, 0x82, 0x6b, 0x6c, 0x93, 0x3a, 0x4e, 0xea, 0xa9, 0x3f, 0x28, 0xe0, + 0xd2, 0xb2, 0x6d, 0x79, 0x98, 0x69, 0x20, 0xe9, 0xc4, 0x2a, 0xf1, 0x1c, 0x6a, 0x94, 0xf9, 0x6f, + 0xb8, 0x0c, 0x32, 0x16, 0xae, 0x93, 0x9c, 0x32, 0xaf, 0x5c, 0x19, 0xd7, 0x8b, 0x2f, 0xda, 0x85, + 0xa1, 0x4e, 0xbb, 0x90, 0xb9, 0x87, 0xeb, 0xe4, 0x4d, 0xbb, 0x50, 0xe8, 0x0e, 0x9c, 0xe6, 0xc3, + 0x30, 0x11, 0xc4, 0x95, 0xe1, 0x3a, 0x18, 0xf1, 0xb0, 0x53, 0x25, 0x5e, 0x2e, 0x35, 0xaf, 0x5c, + 0xc9, 0x96, 0x7e, 0xa3, 0xed, 0xbf, 0x7f, 0x9a, 0x58, 0xc2, 0x3a, 0xd7, 0xd1, 0xa7, 0xa4, 0xd1, + 0x11, 0xf1, 0x8d, 0x24, 0x16, 0x2c, 0x82, 0x71, 0xc3, 0x5f, 0x7b, 0x2e, 0xcd, 0xd7, 0x37, 0x2b, + 0x45, 0xc7, 0x43, 0xa7, 0x42, 0x19, 0xf5, 0xc7, 0x7d, 0xbc, 0xf5, 0xb0, 0xd7, 0x74, 0x8f, 0xc6, + 0xdb, 0x4d, 0x30, 0x6a, 0x34, 0x1d, 0x87, 0x58, 0xbe, 0xbb, 0x0b, 0x83, 0xb9, 0xbb, 0x81, 0x6b, + 0x4d, 0x22, 0x16, 0xa2, 0x4f, 0x4b, 0xd3, 0xa3, 0xcb, 0x02, 0x09, 0xf9, 0x90, 0x07, 0xf7, 0xfa, + 0x7d, 0x05, 0x5c, 0x5c, 0x76, 0x6c, 0xd7, 0xdd, 0x20, 0x8e, 0x4b, 0x6d, 0xeb, 0x7e, 0xe5, 0x3f, + 0xc4, 0xf0, 0x10, 0xd9, 0x22, 0x0e, 0xb1, 0x0c, 0x02, 0xe7, 0x41, 0x66, 0x87, 0x5a, 0xa6, 0xf4, + 0x79, 0xc2, 0xf7, 0xf9, 0x2e, 0xb5, 0x4c, 0xc4, 0x67, 0x98, 0x04, 0x8f, 0x4a, 0x2a, 0x2e, 0x11, + 0x71, 0xb9, 0x04, 0x00, 0x6e, 0x50, 0x69, 0x40, 0xae, 0x0a, 0x4a, 0x39, 0xb0, 0xb4, 0x76, 0x47, + 0xce, 0xa0, 0x88, 0x94, 0xfa, 0x5c, 0x01, 0x67, 0xff, 0xfa, 0xc4, 0x23, 0x8e, 0x85, 0x6b, 0x31, + 0xca, 0xfd, 0x13, 0x8c, 0xd4, 0xf9, 0x37, 0x5f, 0x52, 0xb6, 0xf4, 0xdb, 0xc1, 0xc2, 0x77, 0xc7, + 0x24, 0x96, 0x47, 0xb7, 0x28, 0x71, 0x42, 0xc6, 0x88, 0x19, 0x24, 0xf1, 0x8e, 0x87, 0x87, 0xea, + 0xd7, 0xdd, 0x8e, 0x08, 0x36, 0x1d, 0x9f, 0x23, 0xc7, 0x4a, 0x31, 0xf5, 0x43, 0x05, 0xcc, 0xdc, + 0x5e, 0x5b, 0x2a, 0x0b, 0x88, 0x35, 0xbb, 0x46, 0x8d, 0x5d, 0x78, 0x1d, 0x64, 0xbc, 0xdd, 0x86, + 0x9f, 0x1a, 0x97, 0x7d, 0x12, 0xac, 0xef, 0x36, 0x58, 0x6a, 0x9c, 0x4d, 0xca, 0xb3, 0x71, 0xc4, + 0x35, 0xe0, 0x2f, 0xc0, 0x70, 0x8b, 0xd9, 0xe5, 0x4b, 0x1d, 0xd6, 0x27, 0xa5, 0xea, 0x30, 0x5f, + 0x0c, 0x12, 0x73, 0xf0, 0x06, 0x98, 0x6c, 0x10, 0x87, 0xda, 0x66, 0x99, 0x18, 0xb6, 0x65, 0xba, + 0x9c, 0x44, 0xc3, 0xfa, 0x39, 0x29, 0x3c, 0xb9, 0x16, 0x9d, 0x44, 0x71, 0x59, 0xf5, 0x83, 0x14, + 0x98, 0x0e, 0x17, 0x80, 0x9a, 0x35, 0xe2, 0xc2, 0x47, 0x60, 0xce, 0xf5, 0x70, 0x85, 0xd6, 0xe8, + 0x53, 0xec, 0x51, 0xdb, 0xfa, 0x07, 0xb5, 0x4c, 0xfb, 0x71, 0x1c, 0x3d, 0xdf, 0x69, 0x17, 0xe6, + 0xca, 0x7b, 0x4a, 0xa1, 0x7d, 0x10, 0xe0, 0x5d, 0x30, 0xe1, 0x92, 0x1a, 0x31, 0x3c, 0xe1, 0xaf, + 0x8c, 0xcb, 0xaf, 0x3a, 0xed, 0xc2, 0x44, 0x39, 0x32, 0xfe, 0xa6, 0x5d, 0x38, 0x13, 0x0b, 0x8c, + 0x98, 0x44, 0x31, 0x65, 0xf8, 0x08, 0x8c, 0x35, 0xd8, 0x2f, 0x4a, 0xdc, 0x5c, 0x6a, 0x3e, 0x3d, + 0x08, 0x57, 0x92, 0x01, 0xd7, 0x67, 0x64, 0xa8, 0xc6, 0xd6, 0x24, 0x12, 0x0a, 0x30, 0xd5, 0x4f, + 0x53, 0xe0, 0xc2, 0x6d, 0xdb, 0xa1, 0x4f, 0x59, 0x55, 0xa8, 0xad, 0xd9, 0xe6, 0x92, 0x44, 0x24, + 0x0e, 0xfc, 0x37, 0x18, 0x63, 0xe7, 0x90, 0x89, 0x3d, 0xdc, 0x83, 0xa7, 0xc1, 0x71, 0xa2, 0x35, + 0x76, 0xaa, 0x6c, 0xc0, 0xd5, 0x98, 0xb4, 0xd6, 0x5a, 0xd0, 0x44, 0x21, 0x59, 0x25, 0x1e, 0x0e, + 0x73, 0x3d, 0x1c, 0x43, 0x01, 0x2a, 0x7c, 0x08, 0x32, 0x6e, 0x83, 0x18, 0x92, 0xaa, 0x37, 0xfa, + 0x7a, 0xd6, 0x7b, 0xa1, 0xe5, 0x06, 0x31, 0xc2, 0xe2, 0xc3, 0xbe, 0x10, 0x87, 0x85, 0x04, 0x8c, + 0xb8, 0x9c, 0xd2, 0x7c, 0x57, 0xb3, 0xa5, 0x3f, 0x1d, 0xd6, 0x80, 0xc8, 0x8b, 0x20, 0xe7, 0xc4, + 0x37, 0x92, 0xe0, 0xea, 0x37, 0x0a, 0x28, 0xec, 0xa1, 0xa9, 0x93, 0x6d, 0xdc, 0xa2, 0xb6, 0x03, + 0x37, 0xc0, 0x28, 0x1f, 0x79, 0xd0, 0x90, 0xa1, 0x2c, 0x0e, 0xbe, 0x8d, 0x9c, 0xb6, 0x7a, 0x96, + 0x65, 0x64, 0x59, 0x60, 0x20, 0x1f, 0x0c, 0x6e, 0x82, 0x71, 0xfe, 0xf3, 0xa6, 0xfd, 0xd8, 0x92, + 0x61, 0x3c, 0x30, 0xf2, 0x24, 0x3b, 0x21, 0xca, 0x3e, 0x0a, 0x0a, 0x01, 0xd5, 0xb7, 0xd3, 0x60, + 0x7e, 0x0f, 0xcf, 0x96, 0x6d, 0xcb, 0xa4, 0x8c, 0xfc, 0xf0, 0x76, 0x2c, 0xff, 0x17, 0x13, 0xf9, + 0x7f, 0xb9, 0x9f, 0x7e, 0xa4, 0x1e, 0xac, 0x04, 0xfb, 0x95, 0x8a, 0x61, 0xc9, 0x80, 0xbf, 0x69, + 0x17, 0x7a, 0xf4, 0x63, 0x5a, 0x80, 0x14, 0xdf, 0x16, 0xd8, 0x02, 0xb0, 0x86, 0x5d, 0x6f, 0xdd, + 0xc1, 0x96, 0x2b, 0x2c, 0xd1, 0x3a, 0x91, 0x4c, 0xb8, 0x3a, 0x18, 0x91, 0x99, 0x86, 0x3e, 0x27, + 0x57, 0x01, 0x57, 0xba, 0xd0, 0x50, 0x0f, 0x0b, 0xf0, 0x97, 0x60, 0xc4, 0x21, 0xd8, 0xb5, 0xad, + 0x5c, 0x86, 0x7b, 0x11, 0xd0, 0x06, 0xf1, 0x51, 0x24, 0x67, 0xe1, 0xaf, 0xc1, 0x68, 0x9d, 0xb8, + 0x2e, 0xae, 0x92, 0xdc, 0x30, 0x17, 0x0c, 0xea, 0xee, 0xaa, 0x18, 0x46, 0xfe, 0xbc, 0xfa, 0xad, + 0x02, 0x2e, 0xee, 0x11, 0xc7, 0x15, 0xea, 0x7a, 0x70, 0xb3, 0x2b, 0x53, 0xb5, 0xc1, 0x1c, 0x64, + 0xda, 0x3c, 0x4f, 0x83, 0x1a, 0xe1, 0x8f, 0x44, 0xb2, 0x74, 0x13, 0x0c, 0x53, 0x8f, 0xd4, 0xfd, + 0x02, 0xf4, 0xc7, 0x43, 0x66, 0x51, 0x58, 0xdf, 0xef, 0x30, 0x34, 0x24, 0x40, 0xd5, 0xe7, 0xe9, + 0x3d, 0x7d, 0x63, 0xa9, 0x0c, 0xff, 0x0b, 0xa6, 0xf8, 0x97, 0x3c, 0x5b, 0xc9, 0x96, 0xf4, 0xb0, + 0x6f, 0xb5, 0xd8, 0xa7, 0xb5, 0xd1, 0xcf, 0xcb, 0xa5, 0x4c, 0x95, 0x63, 0xd0, 0x28, 0x61, 0x0a, + 0x2e, 0x80, 0x6c, 0x9d, 0x5a, 0x88, 0x34, 0x6a, 0xd4, 0xc0, 0xae, 0x3c, 0xa7, 0xa6, 0x3b, 0xed, + 0x42, 0x76, 0x35, 0x1c, 0x46, 0x51, 0x19, 0xf8, 0x7b, 0x90, 0xad, 0xe3, 0x27, 0x81, 0x8a, 0x38, + 0x4f, 0xce, 0x48, 0x7b, 0xd9, 0xd5, 0x70, 0x0a, 0x45, 0xe5, 0xe0, 0x03, 0xc6, 0x06, 0x76, 0x12, + 0xbb, 0xb9, 0x0c, 0x0f, 0xf3, 0xd5, 0xc1, 0x0e, 0x6e, 0x5e, 0xfc, 0x22, 0xcc, 0xe1, 0x10, 0xc8, + 0xc7, 0x82, 0x14, 0x8c, 0x55, 0x64, 0x0d, 0xe2, 0x2c, 0xcb, 0x96, 0xfe, 0x7c, 0xd8, 0xed, 0x93, + 0x30, 0xfa, 0x04, 0xa3, 0x89, 0xff, 0x85, 0x02, 0x78, 0xf5, 0xe3, 0x0c, 0xb8, 0xb4, 0x6f, 0x01, + 0x85, 0x7f, 0x03, 0xd0, 0xae, 0xb8, 0xc4, 0x69, 0x11, 0xf3, 0x96, 0xb8, 0x6f, 0xb0, 0xa6, 0x90, + 0x6d, 0x67, 0x5a, 0x3f, 0xcf, 0x32, 0xec, 0x7e, 0xd7, 0x2c, 0xea, 0xa1, 0x01, 0x0d, 0x30, 0xc9, + 0xf2, 0x4e, 0xec, 0x1d, 0x95, 0xfd, 0xe7, 0xc1, 0x92, 0x7a, 0x96, 0xb5, 0x0e, 0x2b, 0x51, 0x10, + 0x14, 0xc7, 0x84, 0x4b, 0x60, 0x5a, 0xb6, 0x3d, 0x89, 0xbd, 0xbc, 0x20, 0x83, 0x3d, 0xbd, 0x1c, + 0x9f, 0x46, 0x49, 0x79, 0x06, 0x61, 0x12, 0x97, 0x3a, 0xc4, 0x0c, 0x20, 0x32, 0x71, 0x88, 0x9b, + 0xf1, 0x69, 0x94, 0x94, 0x87, 0x35, 0x30, 0x25, 0x51, 0xe5, 0xd6, 0xe6, 0x86, 0x39, 0x3b, 0x06, + 0x6c, 0x50, 0xe5, 0xc9, 0x15, 0xd0, 0x7d, 0x39, 0x86, 0x85, 0x12, 0xd8, 0xd0, 0x03, 0xc0, 0xf0, + 0xab, 0xa9, 0x9b, 0x1b, 0xe1, 0x96, 0xfe, 0x72, 0x48, 0xbe, 0x04, 0x65, 0x39, 0xec, 0x01, 0x82, + 0x21, 0x17, 0x45, 0xec, 0xa8, 0xef, 0x29, 0x60, 0x26, 0xd9, 0xe0, 0x06, 0x57, 0x0b, 0x65, 0xcf, + 0xab, 0xc5, 0x43, 0x30, 0x26, 0x5a, 0x25, 0xdb, 0x91, 0x04, 0xf8, 0xdd, 0x80, 0x45, 0x0f, 0x57, + 0x48, 0xad, 0x2c, 0x55, 0x05, 0x9d, 0xfd, 0x2f, 0x14, 0x40, 0xaa, 0x1f, 0x65, 0x00, 0x08, 0x53, + 0x0c, 0x2e, 0xc6, 0x4e, 0xb9, 0xf9, 0xc4, 0x29, 0x37, 0x13, 0xbd, 0xa7, 0x44, 0x4e, 0xb4, 0x0d, + 0x30, 0x62, 0xf3, 0xd2, 0x23, 0x57, 0x58, 0xea, 0x17, 0xcc, 0xa0, 0x4d, 0x0a, 0xd0, 0x74, 0xc0, + 0xce, 0x0e, 0x59, 0xc0, 0x24, 0x1a, 0xbc, 0x07, 0x32, 0x0d, 0xdb, 0xf4, 0xfb, 0x9a, 0xbe, 0x2d, + 0xe1, 0x9a, 0x6d, 0xba, 0x31, 0xcc, 0x31, 0xb6, 0x76, 0x36, 0x8a, 0x38, 0x0e, 0x6b, 0x33, 0xfd, + 0x97, 0x0a, 0x4e, 0xd1, 0x6c, 0x69, 0xb1, 0x1f, 0x66, 0xaf, 0x47, 0x01, 0x11, 0x4c, 0x7f, 0x06, + 0x05, 0x98, 0xf0, 0x2d, 0x05, 0xcc, 0x1a, 0xc9, 0x0b, 0x76, 0x6e, 0x74, 0xb0, 0xae, 0x6c, 0xdf, + 0x77, 0x08, 0xfd, 0x5c, 0xa7, 0x5d, 0x98, 0xed, 0x12, 0x41, 0xdd, 0xe6, 0x98, 0x93, 0x44, 0xde, + 0xc6, 0x64, 0x2d, 0xec, 0xeb, 0x64, 0xaf, 0x6b, 0xa8, 0x70, 0xd2, 0x9f, 0x41, 0x01, 0xa6, 0xfa, + 0x2c, 0x03, 0x26, 0x62, 0xd7, 0xbc, 0x9f, 0x83, 0x33, 0x22, 0xe1, 0x8f, 0x96, 0x33, 0x02, 0xf3, + 0xe8, 0x39, 0x23, 0x70, 0x4f, 0x94, 0x33, 0xc2, 0xe4, 0x49, 0x72, 0x26, 0xe2, 0x64, 0x0f, 0xce, + 0x7c, 0x9e, 0xf2, 0x39, 0x23, 0x9a, 0x8e, 0xc1, 0x38, 0x23, 0x64, 0x23, 0x9c, 0xb9, 0x1f, 0xbd, + 0x49, 0xf7, 0xe9, 0xfe, 0x34, 0x3f, 0xc2, 0xda, 0xdf, 0x9b, 0xd8, 0xf2, 0xa8, 0xb7, 0xab, 0x8f, + 0x77, 0xdd, 0xba, 0x4d, 0x30, 0x81, 0x5b, 0xc4, 0xc1, 0x55, 0xc2, 0x87, 0x25, 0x69, 0x0e, 0x8a, + 0x3b, 0xc3, 0x2e, 0xbd, 0x4b, 0x11, 0x1c, 0x14, 0x43, 0x65, 0x0d, 0x81, 0xfc, 0x7e, 0xe0, 0x05, + 0xb7, 0x69, 0x79, 0x46, 0xf2, 0x86, 0x60, 0xa9, 0x6b, 0x16, 0xf5, 0xd0, 0x50, 0xdf, 0x4d, 0x81, + 0xd9, 0xae, 0x77, 0x8c, 0x30, 0x28, 0xca, 0x31, 0x05, 0x25, 0x75, 0x82, 0x41, 0x49, 0x1f, 0x38, + 0x28, 0x5f, 0xa4, 0x00, 0xec, 0x3e, 0x4e, 0xe0, 0xff, 0x78, 0x53, 0x62, 0x38, 0xb4, 0x42, 0x4c, + 0x31, 0x7d, 0x14, 0x0d, 0x75, 0xb4, 0xa3, 0x89, 0x62, 0xa3, 0xa4, 0xb1, 0x63, 0x7a, 0xf2, 0x0d, + 0x5f, 0xd4, 0xd2, 0x47, 0xfb, 0xa2, 0xa6, 0x7e, 0x95, 0x0c, 0xe3, 0xa9, 0x7e, 0xc2, 0xeb, 0xb5, + 0xfd, 0xe9, 0x13, 0xdc, 0x7e, 0xf5, 0x33, 0x05, 0xcc, 0x24, 0xdb, 0x91, 0x53, 0xf7, 0xb0, 0xfb, + 0x65, 0xdc, 0x89, 0xd3, 0xfd, 0xa8, 0xfb, 0x4c, 0x01, 0x67, 0x4f, 0xd9, 0x3f, 0x3c, 0xea, 0x27, + 0xdd, 0x6b, 0x3e, 0x2d, 0xff, 0xd3, 0xe8, 0xd7, 0x5e, 0xbc, 0xce, 0x0f, 0xbd, 0x7c, 0x9d, 0x1f, + 0x7a, 0xf5, 0x3a, 0x3f, 0xf4, 0xff, 0x4e, 0x5e, 0x79, 0xd1, 0xc9, 0x2b, 0x2f, 0x3b, 0x79, 0xe5, + 0x55, 0x27, 0xaf, 0x7c, 0xd7, 0xc9, 0x2b, 0xef, 0x7c, 0x9f, 0x1f, 0xfa, 0xd7, 0xa8, 0x84, 0xfe, + 0x29, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x79, 0x7e, 0xc9, 0x1b, 0x1d, 0x00, 0x00, +} + +func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ContainerResourceMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { @@ -1408,6 +1557,18 @@ func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ContainerResource != nil { + { + size, err := m.ContainerResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } if m.External != nil { { size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) @@ -1484,6 +1645,18 @@ func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ContainerResource != nil { + { + size, err := m.ContainerResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } if m.External != nil { { size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) @@ -1928,6 +2101,36 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *ContainerResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Current.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *CrossVersionObjectReference) Size() (n int) { if m == nil { return 0 @@ -2166,6 +2369,10 @@ func (m *MetricSpec) Size() (n int) { l = m.External.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.ContainerResource != nil { + l = m.ContainerResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -2193,6 +2400,10 @@ func (m *MetricStatus) Size() (n int) { l = m.External.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.ContainerResource != nil { + l = m.ContainerResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -2326,6 +2537,30 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *ContainerResourceMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResourceMetricSource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "MetricTarget", "MetricTarget", 1), `&`, ``, 1) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerResourceMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} func (this *CrossVersionObjectReference) String() string { if this == nil { return "nil" @@ -2507,6 +2742,7 @@ func (this *MetricSpec) String() string { `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `ContainerResource:` + strings.Replace(this.ContainerResource.String(), "ContainerResourceMetricSource", "ContainerResourceMetricSource", 1) + `,`, `}`, }, "") return s @@ -2521,6 +2757,7 @@ func (this *MetricStatus) String() string { `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `ContainerResource:` + strings.Replace(this.ContainerResource.String(), "ContainerResourceMetricStatus", "ContainerResourceMetricStatus", 1) + `,`, `}`, }, "") return s @@ -2626,7 +2863,7 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { +func (m *ContainerResourceMetricSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2649,15 +2886,15 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerResourceMetricSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2685,13 +2922,13 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(dAtA[iNdEx:postIndex]) + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2701,27 +2938,28 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2749,7 +2987,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.APIVersion = string(dAtA[iNdEx:postIndex]) + m.Container = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -2775,7 +3013,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } return nil } -func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { +func (m *ContainerResourceMetricStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2798,17 +3036,17 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExternalMetricSource: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerResourceMetricStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExternalMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2818,24 +3056,323 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Current.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { @@ -4623,6 +5160,42 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerResource == nil { + m.ContainerResource = &ContainerResourceMetricSource{} + } + if err := m.ContainerResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4852,6 +5425,42 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerResource == nil { + m.ContainerResource = &ContainerResourceMetricStatus{} + } + if err := m.ContainerResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto index 24dc5882e..77a6cb379 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.autoscaling.v2beta2; @@ -30,6 +30,40 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v2beta2"; +// ContainerResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +message ContainerResourceMetricSource { + // name is the name of the resource in question. + optional string name = 1; + + // target specifies the target value for the given metric + optional MetricTarget target = 2; + + // container is the name of the container in the pods of the scaling target + optional string container = 3; +} + +// ContainerResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing a single container in each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +message ContainerResourceMetricStatus { + // Name is the name of the resource in question. + optional string name = 1; + + // current contains the current value for the given metric + optional MetricValueStatus current = 2; + + // Container is the name of the container in the pods of the scaling target + optional string container = 3; +} + // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" @@ -256,8 +290,10 @@ message MetricIdentifier { // MetricSpec specifies how to scale based on a single metric // (only `type` and one other matching field should be set at once). message MetricSpec { - // type is the type of metric source. It should be one of "Object", - // "Pods" or "Resource", each mapping to a matching field in the object. + // type is the type of metric source. It should be one of "ContainerResource", "External", + // "Object", "Pods" or "Resource", each mapping to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -279,6 +315,15 @@ message MetricSpec { // +optional optional ResourceMetricSource resource = 4; + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in + // each pod of the current scale target (e.g. CPU or memory). Such metrics are + // built in to Kubernetes, and have special scaling options on top of those + // available to normal per-pod metrics using the "pods" source. + // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. + // +optional + optional ContainerResourceMetricSource containerResource = 7; + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -290,8 +335,10 @@ message MetricSpec { // MetricStatus describes the last-read state of a single metric. message MetricStatus { - // type is the type of metric source. It will be one of "Object", - // "Pods" or "Resource", each corresponds to a matching field in the object. + // type is the type of metric source. It will be one of "ContainerResource", "External", + // "Object", "Pods" or "Resource", each corresponds to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -313,6 +360,14 @@ message MetricStatus { // +optional optional ResourceMetricStatus resource = 4; + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ContainerResourceMetricStatus containerResource = 7; + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/vendor/k8s.io/api/autoscaling/v2beta2/types.go index 6e5b8f68c..ac6cb6769 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types.go @@ -96,8 +96,10 @@ type CrossVersionObjectReference struct { // MetricSpec specifies how to scale based on a single metric // (only `type` and one other matching field should be set at once). type MetricSpec struct { - // type is the type of metric source. It should be one of "Object", - // "Pods" or "Resource", each mapping to a matching field in the object. + // type is the type of metric source. It should be one of "ContainerResource", "External", + // "Object", "Pods" or "Resource", each mapping to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -116,6 +118,14 @@ type MetricSpec struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in + // each pod of the current scale target (e.g. CPU or memory). Such metrics are + // built in to Kubernetes, and have special scaling options on top of those + // available to normal per-pod metrics using the "pods" source. + // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. + // +optional + ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -169,7 +179,7 @@ type HPAScalingRules struct { // - For scale up: 0 (i.e. no stabilization is done). // - For scale down: 300 (i.e. the stabilization window is 300 seconds long). // +optional - StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds" protobuf:"varint,3,opt,name=stabilizationWindowSeconds"` + StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty" protobuf:"varint,3,opt,name=stabilizationWindowSeconds"` // selectPolicy is used to specify which policy should be used. // If not set, the default value MaxPolicySelect is used. // +optional @@ -220,6 +230,12 @@ const ( // Kubernetes, and have special scaling options on top of those available // to normal per-pod metrics (the "pods" source). ResourceMetricSourceType MetricSourceType = "Resource" + // ContainerResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing a single container in each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ContainerResourceMetricSourceType MetricSourceType = "ContainerResource" // ExternalMetricSourceType is a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -263,6 +279,22 @@ type ResourceMetricSource struct { Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } +// ContainerResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ContainerResourceMetricSource struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric + Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // container is the name of the container in the pods of the scaling target + Container string `json:"container" protobuf:"bytes,3,opt,name=container"` +} + // ExternalMetricSource indicates how to scale on a metric not associated with // any Kubernetes object (for example length of queue in cloud // messaging service, or QPS from loadbalancer running outside of cluster). @@ -382,8 +414,10 @@ type HorizontalPodAutoscalerCondition struct { // MetricStatus describes the last-read state of a single metric. type MetricStatus struct { - // type is the type of metric source. It will be one of "Object", - // "Pods" or "Resource", each corresponds to a matching field in the object. + // type is the type of metric source. It will be one of "ContainerResource", "External", + // "Object", "Pods" or "Resource", each corresponds to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -402,6 +436,13 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + ContainerResource *ContainerResourceMetricStatus `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -443,6 +484,20 @@ type ResourceMetricStatus struct { Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } +// ContainerResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing a single container in each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ContainerResourceMetricStatus struct { + // Name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric + Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` + // Container is the name of the container in the pods of the scaling target + Container string `json:"container" protobuf:"bytes,3,opt,name=container"` +} + // ExternalMetricStatus indicates the current value of a global metric // not associated with any Kubernetes object. type ExternalMetricStatus struct { diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go index 3f38880f9..e3ea3002b 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go @@ -27,6 +27,28 @@ package v2beta2 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ContainerResourceMetricSource = map[string]string{ + "": "ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", + "target": "target specifies the target value for the given metric", + "container": "container is the name of the container in the pods of the scaling target", +} + +func (ContainerResourceMetricSource) SwaggerDoc() map[string]string { + return map_ContainerResourceMetricSource +} + +var map_ContainerResourceMetricStatus = map[string]string{ + "": "ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "Name is the name of the resource in question.", + "current": "current contains the current value for the given metric", + "container": "Container is the name of the container in the pods of the scaling target", +} + +func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { + return map_ContainerResourceMetricStatus +} + var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", @@ -162,12 +184,13 @@ func (MetricIdentifier) SwaggerDoc() map[string]string { } var map_MetricSpec = map[string]string{ - "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", - "type": "type is the type of metric source. It should be one of \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", - "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", - "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", + "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } func (MetricSpec) SwaggerDoc() map[string]string { @@ -175,12 +198,13 @@ func (MetricSpec) SwaggerDoc() map[string]string { } var map_MetricStatus = map[string]string{ - "": "MetricStatus describes the last-read state of a single metric.", - "type": "type is the type of metric source. It will be one of \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", - "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", - "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "": "MetricStatus describes the last-read state of a single metric.", + "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } func (MetricStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go index ca26fe920..81642822a 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go @@ -25,6 +25,40 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResourceMetricSource) DeepCopyInto(out *ContainerResourceMetricSource) { + *out = *in + in.Target.DeepCopyInto(&out.Target) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricSource. +func (in *ContainerResourceMetricSource) DeepCopy() *ContainerResourceMetricSource { + if in == nil { + return nil + } + out := new(ContainerResourceMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResourceMetricStatus) DeepCopyInto(out *ContainerResourceMetricStatus) { + *out = *in + in.Current.DeepCopyInto(&out.Current) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricStatus. +func (in *ContainerResourceMetricStatus) DeepCopy() *ContainerResourceMetricStatus { + if in == nil { + return nil + } + out := new(ContainerResourceMetricStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { *out = *in @@ -340,6 +374,11 @@ func (in *MetricSpec) DeepCopyInto(out *MetricSpec) { *out = new(ResourceMetricSource) (*in).DeepCopyInto(*out) } + if in.ContainerResource != nil { + in, out := &in.ContainerResource, &out.ContainerResource + *out = new(ContainerResourceMetricSource) + (*in).DeepCopyInto(*out) + } if in.External != nil { in, out := &in.External, &out.External *out = new(ExternalMetricSource) @@ -376,6 +415,11 @@ func (in *MetricStatus) DeepCopyInto(out *MetricStatus) { *out = new(ResourceMetricStatus) (*in).DeepCopyInto(*out) } + if in.ContainerResource != nil { + in, out := &in.ContainerResource, &out.ContainerResource + *out = new(ContainerResourceMetricStatus) + (*in).DeepCopyInto(*out) + } if in.External != nil { in, out := &in.External, &out.External *out = new(ExternalMetricStatus) diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto index 75de45f17..7548c04dc 100644 --- a/vendor/k8s.io/api/batch/v1/generated.proto +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.batch.v1; @@ -151,6 +151,7 @@ message JobSpec { // JobStatus represents the current state of a Job. message JobStatus { // The latest available observations of an object's current state. + // When a job fails, one of the conditions will have type == "Failed". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ // +optional // +patchMergeKey=type @@ -166,6 +167,7 @@ message JobStatus { // Represents time when the job was completed. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. + // The completion time is only set when the job finishes successfully. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTime = 3; diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go index 646a3cd7e..fd478874a 100644 --- a/vendor/k8s.io/api/batch/v1/types.go +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -131,6 +131,7 @@ type JobSpec struct { // JobStatus represents the current state of a Job. type JobStatus struct { // The latest available observations of an object's current state. + // When a job fails, one of the conditions will have type == "Failed". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ // +optional // +patchMergeKey=type @@ -146,6 +147,7 @@ type JobStatus struct { // Represents time when the job was completed. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. + // The completion time is only set when the job finishes successfully. // +optional CompletionTime *metav1.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index 0120e07d4..0d8003a72 100644 --- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -80,9 +80,9 @@ func (JobSpec) SwaggerDoc() map[string]string { var map_JobStatus = map[string]string{ "": "JobStatus represents the current state of a Job.", - "conditions": "The latest available observations of an object's current state. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "conditions": "The latest available observations of an object's current state. When a job fails, one of the conditions will have type == \"Failed\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "startTime": "Represents time when the job was acknowledged by the job controller. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", - "completionTime": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", + "completionTime": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully.", "active": "The number of actively running pods.", "succeeded": "The number of pods which reached phase Succeeded.", "failed": "The number of pods which reached phase Failed.", diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.proto b/vendor/k8s.io/api/batch/v1beta1/generated.proto index 995b4f3f9..4dab09a52 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.proto +++ b/vendor/k8s.io/api/batch/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.batch.v1beta1; diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.proto b/vendor/k8s.io/api/batch/v2alpha1/generated.proto index 0bba13b86..f538d50cd 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/generated.proto +++ b/vendor/k8s.io/api/batch/v2alpha1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.batch.v2alpha1; diff --git a/vendor/k8s.io/api/certificates/v1/generated.proto b/vendor/k8s.io/api/certificates/v1/generated.proto index 8427424a8..839c1aa87 100644 --- a/vendor/k8s.io/api/certificates/v1/generated.proto +++ b/vendor/k8s.io/api/certificates/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.certificates.v1; diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/vendor/k8s.io/api/certificates/v1beta1/generated.proto index 2fb4dc4ec..73631fb77 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.proto +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.certificates.v1beta1; diff --git a/vendor/k8s.io/api/coordination/v1/generated.proto b/vendor/k8s.io/api/coordination/v1/generated.proto index 4206746d8..4d887850d 100644 --- a/vendor/k8s.io/api/coordination/v1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.coordination.v1; diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.proto b/vendor/k8s.io/api/coordination/v1beta1/generated.proto index cfc2711c6..10b485e30 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.coordination.v1beta1; diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index 9b29b21e5..5dcc5eb77 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -4025,10 +4025,38 @@ func (m *PodTemplateSpec) XXX_DiscardUnknown() { var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo +func (m *PortStatus) Reset() { *m = PortStatus{} } +func (*PortStatus) ProtoMessage() {} +func (*PortStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{142} +} +func (m *PortStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PortStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PortStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PortStatus.Merge(m, src) +} +func (m *PortStatus) XXX_Size() int { + return m.Size() +} +func (m *PortStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PortStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PortStatus proto.InternalMessageInfo + func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{142} + return fileDescriptor_83c10c24ec417dc9, []int{143} } func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4056,7 +4084,7 @@ var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{143} + return fileDescriptor_83c10c24ec417dc9, []int{144} } func (m *Preconditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4084,7 +4112,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{144} + return fileDescriptor_83c10c24ec417dc9, []int{145} } func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4112,7 +4140,7 @@ var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{145} + return fileDescriptor_83c10c24ec417dc9, []int{146} } func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4140,7 +4168,7 @@ var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} func (*Probe) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{146} + return fileDescriptor_83c10c24ec417dc9, []int{147} } func (m *Probe) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4168,7 +4196,7 @@ var xxx_messageInfo_Probe proto.InternalMessageInfo func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{147} + return fileDescriptor_83c10c24ec417dc9, []int{148} } func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4196,7 +4224,7 @@ var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{148} + return fileDescriptor_83c10c24ec417dc9, []int{149} } func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4224,7 +4252,7 @@ var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } func (*RBDPersistentVolumeSource) ProtoMessage() {} func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{149} + return fileDescriptor_83c10c24ec417dc9, []int{150} } func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4252,7 +4280,7 @@ var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} func (*RBDVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{150} + return fileDescriptor_83c10c24ec417dc9, []int{151} } func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4280,7 +4308,7 @@ var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} func (*RangeAllocation) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{151} + return fileDescriptor_83c10c24ec417dc9, []int{152} } func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4308,7 +4336,7 @@ var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} func (*ReplicationController) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{152} + return fileDescriptor_83c10c24ec417dc9, []int{153} } func (m *ReplicationController) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4336,7 +4364,7 @@ var xxx_messageInfo_ReplicationController proto.InternalMessageInfo func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{153} + return fileDescriptor_83c10c24ec417dc9, []int{154} } func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4364,7 +4392,7 @@ var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{154} + return fileDescriptor_83c10c24ec417dc9, []int{155} } func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4392,7 +4420,7 @@ var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{155} + return fileDescriptor_83c10c24ec417dc9, []int{156} } func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4420,7 +4448,7 @@ var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{156} + return fileDescriptor_83c10c24ec417dc9, []int{157} } func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4448,7 +4476,7 @@ var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{157} + return fileDescriptor_83c10c24ec417dc9, []int{158} } func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4476,7 +4504,7 @@ var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} func (*ResourceQuota) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{158} + return fileDescriptor_83c10c24ec417dc9, []int{159} } func (m *ResourceQuota) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4504,7 +4532,7 @@ var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} func (*ResourceQuotaList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{159} + return fileDescriptor_83c10c24ec417dc9, []int{160} } func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4532,7 +4560,7 @@ var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{160} + return fileDescriptor_83c10c24ec417dc9, []int{161} } func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4560,7 +4588,7 @@ var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{161} + return fileDescriptor_83c10c24ec417dc9, []int{162} } func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4588,7 +4616,7 @@ var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} func (*ResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{162} + return fileDescriptor_83c10c24ec417dc9, []int{163} } func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4616,7 +4644,7 @@ var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} func (*SELinuxOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{163} + return fileDescriptor_83c10c24ec417dc9, []int{164} } func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4644,7 +4672,7 @@ var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{164} + return fileDescriptor_83c10c24ec417dc9, []int{165} } func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4672,7 +4700,7 @@ var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{165} + return fileDescriptor_83c10c24ec417dc9, []int{166} } func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4700,7 +4728,7 @@ var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } func (*ScopeSelector) ProtoMessage() {} func (*ScopeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{166} + return fileDescriptor_83c10c24ec417dc9, []int{167} } func (m *ScopeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4728,7 +4756,7 @@ var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } func (*ScopedResourceSelectorRequirement) ProtoMessage() {} func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{167} + return fileDescriptor_83c10c24ec417dc9, []int{168} } func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4756,7 +4784,7 @@ var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo func (m *SeccompProfile) Reset() { *m = SeccompProfile{} } func (*SeccompProfile) ProtoMessage() {} func (*SeccompProfile) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{168} + return fileDescriptor_83c10c24ec417dc9, []int{169} } func (m *SeccompProfile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4784,7 +4812,7 @@ var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} func (*Secret) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{169} + return fileDescriptor_83c10c24ec417dc9, []int{170} } func (m *Secret) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4812,7 +4840,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} func (*SecretEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{170} + return fileDescriptor_83c10c24ec417dc9, []int{171} } func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4840,7 +4868,7 @@ var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} func (*SecretKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{171} + return fileDescriptor_83c10c24ec417dc9, []int{172} } func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4868,7 +4896,7 @@ var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} func (*SecretList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{172} + return fileDescriptor_83c10c24ec417dc9, []int{173} } func (m *SecretList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4896,7 +4924,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} func (*SecretProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{173} + return fileDescriptor_83c10c24ec417dc9, []int{174} } func (m *SecretProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4924,7 +4952,7 @@ var xxx_messageInfo_SecretProjection proto.InternalMessageInfo func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} func (*SecretReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{174} + return fileDescriptor_83c10c24ec417dc9, []int{175} } func (m *SecretReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4952,7 +4980,7 @@ var xxx_messageInfo_SecretReference proto.InternalMessageInfo func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} func (*SecretVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{175} + return fileDescriptor_83c10c24ec417dc9, []int{176} } func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4980,7 +5008,7 @@ var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} func (*SecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{176} + return fileDescriptor_83c10c24ec417dc9, []int{177} } func (m *SecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5008,7 +5036,7 @@ var xxx_messageInfo_SecurityContext proto.InternalMessageInfo func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} func (*SerializedReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{177} + return fileDescriptor_83c10c24ec417dc9, []int{178} } func (m *SerializedReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5036,7 +5064,7 @@ var xxx_messageInfo_SerializedReference proto.InternalMessageInfo func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} func (*Service) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{178} + return fileDescriptor_83c10c24ec417dc9, []int{179} } func (m *Service) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5064,7 +5092,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} func (*ServiceAccount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{179} + return fileDescriptor_83c10c24ec417dc9, []int{180} } func (m *ServiceAccount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5092,7 +5120,7 @@ var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} func (*ServiceAccountList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{180} + return fileDescriptor_83c10c24ec417dc9, []int{181} } func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5120,7 +5148,7 @@ var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } func (*ServiceAccountTokenProjection) ProtoMessage() {} func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{181} + return fileDescriptor_83c10c24ec417dc9, []int{182} } func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5148,7 +5176,7 @@ var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} func (*ServiceList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{182} + return fileDescriptor_83c10c24ec417dc9, []int{183} } func (m *ServiceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5176,7 +5204,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} func (*ServicePort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{183} + return fileDescriptor_83c10c24ec417dc9, []int{184} } func (m *ServicePort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5204,7 +5232,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{184} + return fileDescriptor_83c10c24ec417dc9, []int{185} } func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5232,7 +5260,7 @@ var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} func (*ServiceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{185} + return fileDescriptor_83c10c24ec417dc9, []int{186} } func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5260,7 +5288,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} func (*ServiceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{186} + return fileDescriptor_83c10c24ec417dc9, []int{187} } func (m *ServiceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5288,7 +5316,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{187} + return fileDescriptor_83c10c24ec417dc9, []int{188} } func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5316,7 +5344,7 @@ var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{188} + return fileDescriptor_83c10c24ec417dc9, []int{189} } func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5344,7 +5372,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{189} + return fileDescriptor_83c10c24ec417dc9, []int{190} } func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5372,7 +5400,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} func (*Sysctl) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{190} + return fileDescriptor_83c10c24ec417dc9, []int{191} } func (m *Sysctl) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5400,7 +5428,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} func (*TCPSocketAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{191} + return fileDescriptor_83c10c24ec417dc9, []int{192} } func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5428,7 +5456,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} func (*Taint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{192} + return fileDescriptor_83c10c24ec417dc9, []int{193} } func (m *Taint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5456,7 +5484,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} func (*Toleration) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{193} + return fileDescriptor_83c10c24ec417dc9, []int{194} } func (m *Toleration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5484,7 +5512,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } func (*TopologySelectorLabelRequirement) ProtoMessage() {} func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{194} + return fileDescriptor_83c10c24ec417dc9, []int{195} } func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5512,7 +5540,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } func (*TopologySelectorTerm) ProtoMessage() {} func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{195} + return fileDescriptor_83c10c24ec417dc9, []int{196} } func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5540,7 +5568,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} } func (*TopologySpreadConstraint) ProtoMessage() {} func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{196} + return fileDescriptor_83c10c24ec417dc9, []int{197} } func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5568,7 +5596,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } func (*TypedLocalObjectReference) ProtoMessage() {} func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{197} + return fileDescriptor_83c10c24ec417dc9, []int{198} } func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5596,7 +5624,7 @@ var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{198} + return fileDescriptor_83c10c24ec417dc9, []int{199} } func (m *Volume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5624,7 +5652,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } func (*VolumeDevice) ProtoMessage() {} func (*VolumeDevice) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{199} + return fileDescriptor_83c10c24ec417dc9, []int{200} } func (m *VolumeDevice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5652,7 +5680,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} func (*VolumeMount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{200} + return fileDescriptor_83c10c24ec417dc9, []int{201} } func (m *VolumeMount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5680,7 +5708,7 @@ var xxx_messageInfo_VolumeMount proto.InternalMessageInfo func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } func (*VolumeNodeAffinity) ProtoMessage() {} func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{201} + return fileDescriptor_83c10c24ec417dc9, []int{202} } func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5708,7 +5736,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} func (*VolumeProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{202} + return fileDescriptor_83c10c24ec417dc9, []int{203} } func (m *VolumeProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5736,7 +5764,7 @@ var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} func (*VolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{203} + return fileDescriptor_83c10c24ec417dc9, []int{204} } func (m *VolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5764,7 +5792,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{204} + return fileDescriptor_83c10c24ec417dc9, []int{205} } func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5792,7 +5820,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{205} + return fileDescriptor_83c10c24ec417dc9, []int{206} } func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5820,7 +5848,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } func (*WindowsSecurityContextOptions) ProtoMessage() {} func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{206} + return fileDescriptor_83c10c24ec417dc9, []int{207} } func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6006,6 +6034,7 @@ func init() { proto.RegisterType((*PodTemplate)(nil), "k8s.io.api.core.v1.PodTemplate") proto.RegisterType((*PodTemplateList)(nil), "k8s.io.api.core.v1.PodTemplateList") proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.api.core.v1.PodTemplateSpec") + proto.RegisterType((*PortStatus)(nil), "k8s.io.api.core.v1.PortStatus") proto.RegisterType((*PortworxVolumeSource)(nil), "k8s.io.api.core.v1.PortworxVolumeSource") proto.RegisterType((*Preconditions)(nil), "k8s.io.api.core.v1.Preconditions") proto.RegisterType((*PreferAvoidPodsEntry)(nil), "k8s.io.api.core.v1.PreferAvoidPodsEntry") @@ -6087,876 +6116,882 @@ func init() { } var fileDescriptor_83c10c24ec417dc9 = []byte{ - // 13889 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x6b, 0x70, 0x24, 0xd7, - 0x75, 0x18, 0xac, 0x9e, 0xc1, 0x63, 0xe6, 0xe0, 0x7d, 0xb1, 0xbb, 0xc4, 0x82, 0xbb, 0x8b, 0x65, - 0xaf, 0xb4, 0x5c, 0x8a, 0x24, 0x56, 0x7c, 0x89, 0x34, 0x49, 0xd1, 0x02, 0x30, 0xc0, 0xee, 0x70, - 0x17, 0xd8, 0xe1, 0x1d, 0xec, 0xae, 0x44, 0x53, 0xfa, 0xd4, 0x98, 0xb9, 0x00, 0x9a, 0x98, 0xe9, - 0x1e, 0x76, 0xf7, 0x60, 0x17, 0xfc, 0xe4, 0xfa, 0xfc, 0xc9, 0x4f, 0xf9, 0x91, 0x52, 0xa5, 0x5c, - 0x79, 0xd8, 0x2e, 0x57, 0xca, 0x71, 0xca, 0x56, 0x9c, 0xa4, 0xe2, 0xd8, 0xb1, 0x1d, 0xcb, 0x89, - 0x9d, 0x38, 0x0f, 0x27, 0x3f, 0x1c, 0xc7, 0x95, 0x44, 0xae, 0x72, 0x05, 0xb1, 0xd7, 0xa9, 0xb8, - 0xf4, 0x23, 0xb6, 0x13, 0x3b, 0x3f, 0x82, 0xb8, 0xe2, 0xd4, 0x7d, 0xf6, 0xbd, 0x3d, 0xdd, 0x33, - 0x83, 0x25, 0x00, 0x51, 0x2a, 0xfe, 0x9b, 0xb9, 0xe7, 0xdc, 0x73, 0x6f, 0xdf, 0xe7, 0xb9, 0xe7, - 0x09, 0xaf, 0xec, 0xbc, 0x14, 0xce, 0xbb, 0xfe, 0xd5, 0x9d, 0xf6, 0x06, 0x09, 0x3c, 0x12, 0x91, - 0xf0, 0xea, 0x2e, 0xf1, 0xea, 0x7e, 0x70, 0x55, 0x00, 0x9c, 0x96, 0x7b, 0xb5, 0xe6, 0x07, 0xe4, - 0xea, 0xee, 0x33, 0x57, 0xb7, 0x88, 0x47, 0x02, 0x27, 0x22, 0xf5, 0xf9, 0x56, 0xe0, 0x47, 0x3e, - 0x42, 0x1c, 0x67, 0xde, 0x69, 0xb9, 0xf3, 0x14, 0x67, 0x7e, 0xf7, 0x99, 0xd9, 0xa7, 0xb7, 0xdc, - 0x68, 0xbb, 0xbd, 0x31, 0x5f, 0xf3, 0x9b, 0x57, 0xb7, 0xfc, 0x2d, 0xff, 0x2a, 0x43, 0xdd, 0x68, - 0x6f, 0xb2, 0x7f, 0xec, 0x0f, 0xfb, 0xc5, 0x49, 0xcc, 0x3e, 0x1f, 0x37, 0xd3, 0x74, 0x6a, 0xdb, - 0xae, 0x47, 0x82, 0xbd, 0xab, 0xad, 0x9d, 0x2d, 0xd6, 0x6e, 0x40, 0x42, 0xbf, 0x1d, 0xd4, 0x48, - 0xb2, 0xe1, 0xae, 0xb5, 0xc2, 0xab, 0x4d, 0x12, 0x39, 0x29, 0xdd, 0x9d, 0xbd, 0x9a, 0x55, 0x2b, - 0x68, 0x7b, 0x91, 0xdb, 0xec, 0x6c, 0xe6, 0xe3, 0xbd, 0x2a, 0x84, 0xb5, 0x6d, 0xd2, 0x74, 0x3a, - 0xea, 0x3d, 0x97, 0x55, 0xaf, 0x1d, 0xb9, 0x8d, 0xab, 0xae, 0x17, 0x85, 0x51, 0x90, 0xac, 0x64, - 0x7f, 0xd5, 0x82, 0x8b, 0x0b, 0x77, 0xab, 0xcb, 0x0d, 0x27, 0x8c, 0xdc, 0xda, 0x62, 0xc3, 0xaf, - 0xed, 0x54, 0x23, 0x3f, 0x20, 0x77, 0xfc, 0x46, 0xbb, 0x49, 0xaa, 0x6c, 0x20, 0xd0, 0x53, 0x50, - 0xd8, 0x65, 0xff, 0xcb, 0xa5, 0x19, 0xeb, 0xa2, 0x75, 0xa5, 0xb8, 0x38, 0xf9, 0x1b, 0xfb, 0x73, - 0x1f, 0x7a, 0xb0, 0x3f, 0x57, 0xb8, 0x23, 0xca, 0xb1, 0xc2, 0x40, 0x97, 0x61, 0x68, 0x33, 0x5c, - 0xdf, 0x6b, 0x91, 0x99, 0x1c, 0xc3, 0x1d, 0x17, 0xb8, 0x43, 0x2b, 0x55, 0x5a, 0x8a, 0x05, 0x14, - 0x5d, 0x85, 0x62, 0xcb, 0x09, 0x22, 0x37, 0x72, 0x7d, 0x6f, 0x26, 0x7f, 0xd1, 0xba, 0x32, 0xb8, - 0x38, 0x25, 0x50, 0x8b, 0x15, 0x09, 0xc0, 0x31, 0x0e, 0xed, 0x46, 0x40, 0x9c, 0xfa, 0x2d, 0xaf, - 0xb1, 0x37, 0x33, 0x70, 0xd1, 0xba, 0x52, 0x88, 0xbb, 0x81, 0x45, 0x39, 0x56, 0x18, 0xf6, 0x8f, - 0xe4, 0xa0, 0xb0, 0xb0, 0xb9, 0xe9, 0x7a, 0x6e, 0xb4, 0x87, 0xee, 0xc0, 0xa8, 0xe7, 0xd7, 0x89, - 0xfc, 0xcf, 0xbe, 0x62, 0xe4, 0xd9, 0x8b, 0xf3, 0x9d, 0x4b, 0x69, 0x7e, 0x4d, 0xc3, 0x5b, 0x9c, - 0x7c, 0xb0, 0x3f, 0x37, 0xaa, 0x97, 0x60, 0x83, 0x0e, 0xc2, 0x30, 0xd2, 0xf2, 0xeb, 0x8a, 0x6c, - 0x8e, 0x91, 0x9d, 0x4b, 0x23, 0x5b, 0x89, 0xd1, 0x16, 0x27, 0x1e, 0xec, 0xcf, 0x8d, 0x68, 0x05, - 0x58, 0x27, 0x82, 0x36, 0x60, 0x82, 0xfe, 0xf5, 0x22, 0x57, 0xd1, 0xcd, 0x33, 0xba, 0x97, 0xb2, - 0xe8, 0x6a, 0xa8, 0x8b, 0xd3, 0x0f, 0xf6, 0xe7, 0x26, 0x12, 0x85, 0x38, 0x49, 0xd0, 0x7e, 0x17, - 0xc6, 0x17, 0xa2, 0xc8, 0xa9, 0x6d, 0x93, 0x3a, 0x9f, 0x41, 0xf4, 0x3c, 0x0c, 0x78, 0x4e, 0x93, - 0x88, 0xf9, 0xbd, 0x28, 0x06, 0x76, 0x60, 0xcd, 0x69, 0x92, 0x83, 0xfd, 0xb9, 0xc9, 0xdb, 0x9e, - 0xfb, 0x4e, 0x5b, 0xac, 0x0a, 0x5a, 0x86, 0x19, 0x36, 0x7a, 0x16, 0xa0, 0x4e, 0x76, 0xdd, 0x1a, - 0xa9, 0x38, 0xd1, 0xb6, 0x98, 0x6f, 0x24, 0xea, 0x42, 0x49, 0x41, 0xb0, 0x86, 0x65, 0xdf, 0x87, - 0xe2, 0xc2, 0xae, 0xef, 0xd6, 0x2b, 0x7e, 0x3d, 0x44, 0x3b, 0x30, 0xd1, 0x0a, 0xc8, 0x26, 0x09, - 0x54, 0xd1, 0x8c, 0x75, 0x31, 0x7f, 0x65, 0xe4, 0xd9, 0x2b, 0xa9, 0x1f, 0x6b, 0xa2, 0x2e, 0x7b, - 0x51, 0xb0, 0xb7, 0xf8, 0x88, 0x68, 0x6f, 0x22, 0x01, 0xc5, 0x49, 0xca, 0xf6, 0x3f, 0xcf, 0xc1, - 0xe9, 0x85, 0x77, 0xdb, 0x01, 0x29, 0xb9, 0xe1, 0x4e, 0x72, 0x85, 0xd7, 0xdd, 0x70, 0x67, 0x2d, - 0x1e, 0x01, 0xb5, 0xb4, 0x4a, 0xa2, 0x1c, 0x2b, 0x0c, 0xf4, 0x34, 0x0c, 0xd3, 0xdf, 0xb7, 0x71, - 0x59, 0x7c, 0xf2, 0xb4, 0x40, 0x1e, 0x29, 0x39, 0x91, 0x53, 0xe2, 0x20, 0x2c, 0x71, 0xd0, 0x2a, - 0x8c, 0xd4, 0xd8, 0x86, 0xdc, 0x5a, 0xf5, 0xeb, 0x84, 0x4d, 0x66, 0x71, 0xf1, 0x49, 0x8a, 0xbe, - 0x14, 0x17, 0x1f, 0xec, 0xcf, 0xcd, 0xf0, 0xbe, 0x09, 0x12, 0x1a, 0x0c, 0xeb, 0xf5, 0x91, 0xad, - 0xf6, 0xd7, 0x00, 0xa3, 0x04, 0x29, 0x7b, 0xeb, 0x8a, 0xb6, 0x55, 0x06, 0xd9, 0x56, 0x19, 0x4d, - 0xdf, 0x26, 0xe8, 0x19, 0x18, 0xd8, 0x71, 0xbd, 0xfa, 0xcc, 0x10, 0xa3, 0x75, 0x9e, 0xce, 0xf9, - 0x0d, 0xd7, 0xab, 0x1f, 0xec, 0xcf, 0x4d, 0x19, 0xdd, 0xa1, 0x85, 0x98, 0xa1, 0xda, 0x7f, 0x6a, - 0xc1, 0x1c, 0x83, 0xad, 0xb8, 0x0d, 0x52, 0x21, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0x63, 0x40, - 0x9f, 0x05, 0x08, 0x49, 0x2d, 0x20, 0x91, 0x36, 0xa4, 0x6a, 0x61, 0x54, 0x15, 0x04, 0x6b, 0x58, - 0xf4, 0x40, 0x08, 0xb7, 0x9d, 0x80, 0xad, 0x2f, 0x31, 0xb0, 0xea, 0x40, 0xa8, 0x4a, 0x00, 0x8e, - 0x71, 0x8c, 0x03, 0x21, 0xdf, 0xeb, 0x40, 0x40, 0x9f, 0x80, 0x89, 0xb8, 0xb1, 0xb0, 0xe5, 0xd4, - 0xe4, 0x00, 0xb2, 0x2d, 0x53, 0x35, 0x41, 0x38, 0x89, 0x6b, 0xff, 0x6d, 0x4b, 0x2c, 0x1e, 0xfa, - 0xd5, 0xef, 0xf3, 0x6f, 0xb5, 0x7f, 0xc9, 0x82, 0xe1, 0x45, 0xd7, 0xab, 0xbb, 0xde, 0x16, 0xfa, - 0x1c, 0x14, 0xe8, 0xdd, 0x54, 0x77, 0x22, 0x47, 0x9c, 0x7b, 0x1f, 0xd3, 0xf6, 0x96, 0xba, 0x2a, - 0xe6, 0x5b, 0x3b, 0x5b, 0xb4, 0x20, 0x9c, 0xa7, 0xd8, 0x74, 0xb7, 0xdd, 0xda, 0x78, 0x9b, 0xd4, - 0xa2, 0x55, 0x12, 0x39, 0xf1, 0xe7, 0xc4, 0x65, 0x58, 0x51, 0x45, 0x37, 0x60, 0x28, 0x72, 0x82, - 0x2d, 0x12, 0x89, 0x03, 0x30, 0xf5, 0xa0, 0xe2, 0x35, 0x31, 0xdd, 0x91, 0xc4, 0xab, 0x91, 0xf8, - 0x5a, 0x58, 0x67, 0x55, 0xb1, 0x20, 0x61, 0xff, 0xd0, 0x30, 0x9c, 0x5d, 0xaa, 0x96, 0x33, 0xd6, - 0xd5, 0x65, 0x18, 0xaa, 0x07, 0xee, 0x2e, 0x09, 0xc4, 0x38, 0x2b, 0x2a, 0x25, 0x56, 0x8a, 0x05, - 0x14, 0xbd, 0x04, 0xa3, 0xfc, 0x42, 0xba, 0xee, 0x78, 0xf5, 0x86, 0x1c, 0xe2, 0x53, 0x02, 0x7b, - 0xf4, 0x8e, 0x06, 0xc3, 0x06, 0xe6, 0x21, 0x17, 0xd5, 0xe5, 0xc4, 0x66, 0xcc, 0xba, 0xec, 0xbe, - 0x68, 0xc1, 0x24, 0x6f, 0x66, 0x21, 0x8a, 0x02, 0x77, 0xa3, 0x1d, 0x91, 0x70, 0x66, 0x90, 0x9d, - 0x74, 0x4b, 0x69, 0xa3, 0x95, 0x39, 0x02, 0xf3, 0x77, 0x12, 0x54, 0xf8, 0x21, 0x38, 0x23, 0xda, - 0x9d, 0x4c, 0x82, 0x71, 0x47, 0xb3, 0xe8, 0x3b, 0x2d, 0x98, 0xad, 0xf9, 0x5e, 0x14, 0xf8, 0x8d, - 0x06, 0x09, 0x2a, 0xed, 0x8d, 0x86, 0x1b, 0x6e, 0xf3, 0x75, 0x8a, 0xc9, 0x26, 0x3b, 0x09, 0x32, - 0xe6, 0x50, 0x21, 0x89, 0x39, 0xbc, 0xf0, 0x60, 0x7f, 0x6e, 0x76, 0x29, 0x93, 0x14, 0xee, 0xd2, - 0x0c, 0xda, 0x01, 0x44, 0xaf, 0xd2, 0x6a, 0xe4, 0x6c, 0x91, 0xb8, 0xf1, 0xe1, 0xfe, 0x1b, 0x3f, - 0xf3, 0x60, 0x7f, 0x0e, 0xad, 0x75, 0x90, 0xc0, 0x29, 0x64, 0xd1, 0x3b, 0x70, 0x8a, 0x96, 0x76, - 0x7c, 0x6b, 0xa1, 0xff, 0xe6, 0x66, 0x1e, 0xec, 0xcf, 0x9d, 0x5a, 0x4b, 0x21, 0x82, 0x53, 0x49, - 0xa3, 0xef, 0xb0, 0xe0, 0x6c, 0xfc, 0xf9, 0xcb, 0xf7, 0x5b, 0x8e, 0x57, 0x8f, 0x1b, 0x2e, 0xf6, - 0xdf, 0x30, 0x3d, 0x93, 0xcf, 0x2e, 0x65, 0x51, 0xc2, 0xd9, 0x8d, 0xcc, 0x2e, 0xc1, 0xe9, 0xd4, - 0xd5, 0x82, 0x26, 0x21, 0xbf, 0x43, 0x38, 0x17, 0x54, 0xc4, 0xf4, 0x27, 0x3a, 0x05, 0x83, 0xbb, - 0x4e, 0xa3, 0x2d, 0x36, 0x0a, 0xe6, 0x7f, 0x5e, 0xce, 0xbd, 0x64, 0xd9, 0xff, 0x22, 0x0f, 0x13, - 0x4b, 0xd5, 0xf2, 0x43, 0xed, 0x42, 0xfd, 0x1a, 0xca, 0x75, 0xbd, 0x86, 0xe2, 0x4b, 0x2d, 0x9f, - 0x79, 0xa9, 0xfd, 0x7f, 0x29, 0x5b, 0x68, 0x80, 0x6d, 0xa1, 0x6f, 0xc9, 0xd8, 0x42, 0x47, 0xbc, - 0x71, 0x76, 0x33, 0x56, 0xd1, 0x20, 0x9b, 0xcc, 0x54, 0x8e, 0xe5, 0xa6, 0x5f, 0x73, 0x1a, 0xc9, - 0xa3, 0xef, 0x90, 0x4b, 0xe9, 0x68, 0xe6, 0xb1, 0x06, 0xa3, 0x4b, 0x4e, 0xcb, 0xd9, 0x70, 0x1b, - 0x6e, 0xe4, 0x92, 0x10, 0x3d, 0x0e, 0x79, 0xa7, 0x5e, 0x67, 0xdc, 0x56, 0x71, 0xf1, 0xf4, 0x83, - 0xfd, 0xb9, 0xfc, 0x42, 0x9d, 0x5e, 0xfb, 0xa0, 0xb0, 0xf6, 0x30, 0xc5, 0x40, 0x1f, 0x85, 0x81, - 0x7a, 0xe0, 0xb7, 0x66, 0x72, 0x0c, 0x93, 0xee, 0xba, 0x81, 0x52, 0xe0, 0xb7, 0x12, 0xa8, 0x0c, - 0xc7, 0xfe, 0xb5, 0x1c, 0x9c, 0x5b, 0x22, 0xad, 0xed, 0x95, 0x6a, 0xc6, 0xf9, 0x7d, 0x05, 0x0a, - 0x4d, 0xdf, 0x73, 0x23, 0x3f, 0x08, 0x45, 0xd3, 0x6c, 0x45, 0xac, 0x8a, 0x32, 0xac, 0xa0, 0xe8, - 0x22, 0x0c, 0xb4, 0x62, 0xa6, 0x72, 0x54, 0x32, 0xa4, 0x8c, 0x9d, 0x64, 0x10, 0x8a, 0xd1, 0x0e, - 0x49, 0x20, 0x56, 0x8c, 0xc2, 0xb8, 0x1d, 0x92, 0x00, 0x33, 0x48, 0x7c, 0x33, 0xd3, 0x3b, 0x5b, - 0x9c, 0xd0, 0x89, 0x9b, 0x99, 0x42, 0xb0, 0x86, 0x85, 0x2a, 0x50, 0x0c, 0x13, 0x33, 0xdb, 0xd7, - 0x36, 0x1d, 0x63, 0x57, 0xb7, 0x9a, 0xc9, 0x98, 0x88, 0x71, 0xa3, 0x0c, 0xf5, 0xbc, 0xba, 0xbf, - 0x92, 0x03, 0xc4, 0x87, 0xf0, 0x1b, 0x6c, 0xe0, 0x6e, 0x77, 0x0e, 0x5c, 0xff, 0x5b, 0xe2, 0xa8, - 0x46, 0xef, 0xcf, 0x2c, 0x38, 0xb7, 0xe4, 0x7a, 0x75, 0x12, 0x64, 0x2c, 0xc0, 0xe3, 0x79, 0xcb, - 0x1e, 0x8e, 0x69, 0x30, 0x96, 0xd8, 0xc0, 0x11, 0x2c, 0x31, 0xfb, 0x8f, 0x2d, 0x40, 0xfc, 0xb3, - 0xdf, 0x77, 0x1f, 0x7b, 0xbb, 0xf3, 0x63, 0x8f, 0x60, 0x59, 0xd8, 0x37, 0x61, 0x7c, 0xa9, 0xe1, - 0x12, 0x2f, 0x2a, 0x57, 0x96, 0x7c, 0x6f, 0xd3, 0xdd, 0x42, 0x2f, 0xc3, 0x78, 0xe4, 0x36, 0x89, - 0xdf, 0x8e, 0xaa, 0xa4, 0xe6, 0x7b, 0xec, 0x25, 0x69, 0x5d, 0x19, 0x5c, 0x44, 0x0f, 0xf6, 0xe7, - 0xc6, 0xd7, 0x0d, 0x08, 0x4e, 0x60, 0xda, 0xbf, 0x4b, 0xc7, 0xcf, 0x6f, 0xb6, 0x7c, 0x8f, 0x78, - 0xd1, 0x92, 0xef, 0xd5, 0xb9, 0xc4, 0xe1, 0x65, 0x18, 0x88, 0xe8, 0x78, 0xf0, 0xb1, 0xbb, 0x2c, - 0x37, 0x0a, 0x1d, 0x85, 0x83, 0xfd, 0xb9, 0x33, 0x9d, 0x35, 0xd8, 0x38, 0xb1, 0x3a, 0xe8, 0x5b, - 0x60, 0x28, 0x8c, 0x9c, 0xa8, 0x1d, 0x8a, 0xd1, 0x7c, 0x4c, 0x8e, 0x66, 0x95, 0x95, 0x1e, 0xec, - 0xcf, 0x4d, 0xa8, 0x6a, 0xbc, 0x08, 0x8b, 0x0a, 0xe8, 0x09, 0x18, 0x6e, 0x92, 0x30, 0x74, 0xb6, - 0xe4, 0x6d, 0x38, 0x21, 0xea, 0x0e, 0xaf, 0xf2, 0x62, 0x2c, 0xe1, 0xe8, 0x12, 0x0c, 0x92, 0x20, - 0xf0, 0x03, 0xb1, 0x47, 0xc7, 0x04, 0xe2, 0xe0, 0x32, 0x2d, 0xc4, 0x1c, 0x66, 0xff, 0x5b, 0x0b, - 0x26, 0x54, 0x5f, 0x79, 0x5b, 0x27, 0xf0, 0x2a, 0x78, 0x13, 0xa0, 0x26, 0x3f, 0x30, 0x64, 0xb7, - 0xc7, 0xc8, 0xb3, 0x97, 0x53, 0x2f, 0xea, 0x8e, 0x61, 0x8c, 0x29, 0xab, 0xa2, 0x10, 0x6b, 0xd4, - 0xec, 0x7f, 0x6c, 0xc1, 0x74, 0xe2, 0x8b, 0x6e, 0xba, 0x61, 0x84, 0xde, 0xea, 0xf8, 0xaa, 0xf9, - 0xfe, 0xbe, 0x8a, 0xd6, 0x66, 0xdf, 0xa4, 0x96, 0xb2, 0x2c, 0xd1, 0xbe, 0xe8, 0x3a, 0x0c, 0xba, - 0x11, 0x69, 0xca, 0x8f, 0xb9, 0xd4, 0xf5, 0x63, 0x78, 0xaf, 0xe2, 0x19, 0x29, 0xd3, 0x9a, 0x98, - 0x13, 0xb0, 0x7f, 0x2d, 0x0f, 0x45, 0xbe, 0x6c, 0x57, 0x9d, 0xd6, 0x09, 0xcc, 0xc5, 0x93, 0x50, - 0x74, 0x9b, 0xcd, 0x76, 0xe4, 0x6c, 0x88, 0xe3, 0xbc, 0xc0, 0xb7, 0x56, 0x59, 0x16, 0xe2, 0x18, - 0x8e, 0xca, 0x30, 0xc0, 0xba, 0xc2, 0xbf, 0xf2, 0xf1, 0xf4, 0xaf, 0x14, 0x7d, 0x9f, 0x2f, 0x39, - 0x91, 0xc3, 0x39, 0x29, 0x75, 0x8f, 0xd0, 0x22, 0xcc, 0x48, 0x20, 0x07, 0x60, 0xc3, 0xf5, 0x9c, - 0x60, 0x8f, 0x96, 0xcd, 0xe4, 0x19, 0xc1, 0xa7, 0xbb, 0x13, 0x5c, 0x54, 0xf8, 0x9c, 0xac, 0xfa, - 0xb0, 0x18, 0x80, 0x35, 0xa2, 0xb3, 0x2f, 0x42, 0x51, 0x21, 0x1f, 0x86, 0x21, 0x9a, 0xfd, 0x04, - 0x4c, 0x24, 0xda, 0xea, 0x55, 0x7d, 0x54, 0xe7, 0xa7, 0x7e, 0x99, 0x1d, 0x19, 0xa2, 0xd7, 0xcb, - 0xde, 0xae, 0x38, 0x72, 0xdf, 0x85, 0x53, 0x8d, 0x94, 0x93, 0x4c, 0xcc, 0x6b, 0xff, 0x27, 0xdf, - 0x39, 0xf1, 0xd9, 0xa7, 0xd2, 0xa0, 0x38, 0xb5, 0x0d, 0xca, 0x23, 0xf8, 0x2d, 0xba, 0x41, 0x9c, - 0x86, 0xce, 0x6e, 0xdf, 0x12, 0x65, 0x58, 0x41, 0xe9, 0x79, 0x77, 0x4a, 0x75, 0xfe, 0x06, 0xd9, - 0xab, 0x92, 0x06, 0xa9, 0x45, 0x7e, 0xf0, 0x75, 0xed, 0xfe, 0x79, 0x3e, 0xfa, 0xfc, 0xb8, 0x1c, - 0x11, 0x04, 0xf2, 0x37, 0xc8, 0x1e, 0x9f, 0x0a, 0xfd, 0xeb, 0xf2, 0x5d, 0xbf, 0xee, 0x67, 0x2d, - 0x18, 0x53, 0x5f, 0x77, 0x02, 0xe7, 0xc2, 0xa2, 0x79, 0x2e, 0x9c, 0xef, 0xba, 0xc0, 0x33, 0x4e, - 0x84, 0xaf, 0xe4, 0xe0, 0xac, 0xc2, 0xa1, 0x6f, 0x03, 0xfe, 0x47, 0xac, 0xaa, 0xab, 0x50, 0xf4, - 0x94, 0xd4, 0xca, 0x32, 0xc5, 0x45, 0xb1, 0xcc, 0x2a, 0xc6, 0xa1, 0x2c, 0x9e, 0x17, 0x8b, 0x96, - 0x46, 0x75, 0x71, 0xae, 0x10, 0xdd, 0x2e, 0x42, 0xbe, 0xed, 0xd6, 0xc5, 0x05, 0xf3, 0x31, 0x39, - 0xda, 0xb7, 0xcb, 0xa5, 0x83, 0xfd, 0xb9, 0xc7, 0xb2, 0x54, 0x09, 0xf4, 0x66, 0x0b, 0xe7, 0x6f, - 0x97, 0x4b, 0x98, 0x56, 0x46, 0x0b, 0x30, 0x21, 0xb5, 0x25, 0x77, 0x28, 0xbb, 0xe5, 0x7b, 0xe2, - 0x1e, 0x52, 0x32, 0x59, 0x6c, 0x82, 0x71, 0x12, 0x1f, 0x95, 0x60, 0x72, 0xa7, 0xbd, 0x41, 0x1a, - 0x24, 0xe2, 0x1f, 0x7c, 0x83, 0x70, 0x89, 0x65, 0x31, 0x7e, 0x99, 0xdd, 0x48, 0xc0, 0x71, 0x47, - 0x0d, 0xfb, 0x2f, 0xd8, 0x7d, 0x20, 0x46, 0xaf, 0x12, 0xf8, 0x74, 0x61, 0x51, 0xea, 0x5f, 0xcf, - 0xe5, 0xdc, 0xcf, 0xaa, 0xb8, 0x41, 0xf6, 0xd6, 0x7d, 0xca, 0x99, 0xa7, 0xaf, 0x0a, 0x63, 0xcd, - 0x0f, 0x74, 0x5d, 0xf3, 0x3f, 0x9f, 0x83, 0xd3, 0x6a, 0x04, 0x0c, 0x26, 0xf0, 0x1b, 0x7d, 0x0c, - 0x9e, 0x81, 0x91, 0x3a, 0xd9, 0x74, 0xda, 0x8d, 0x48, 0x89, 0xcf, 0x07, 0xb9, 0x0a, 0xa5, 0x14, - 0x17, 0x63, 0x1d, 0xe7, 0x10, 0xc3, 0xf6, 0x3f, 0x47, 0xd8, 0x45, 0x1c, 0x39, 0x74, 0x8d, 0xab, - 0x5d, 0x63, 0x65, 0xee, 0x9a, 0x4b, 0x30, 0xe8, 0x36, 0x29, 0x63, 0x96, 0x33, 0xf9, 0xad, 0x32, - 0x2d, 0xc4, 0x1c, 0x86, 0x3e, 0x02, 0xc3, 0x35, 0xbf, 0xd9, 0x74, 0xbc, 0x3a, 0xbb, 0xf2, 0x8a, - 0x8b, 0x23, 0x94, 0x77, 0x5b, 0xe2, 0x45, 0x58, 0xc2, 0xd0, 0x39, 0x18, 0x70, 0x82, 0x2d, 0x2e, - 0xc3, 0x28, 0x2e, 0x16, 0x68, 0x4b, 0x0b, 0xc1, 0x56, 0x88, 0x59, 0x29, 0x7d, 0x82, 0xdd, 0xf3, - 0x83, 0x1d, 0xd7, 0xdb, 0x2a, 0xb9, 0x81, 0xd8, 0x12, 0xea, 0x2e, 0xbc, 0xab, 0x20, 0x58, 0xc3, - 0x42, 0x2b, 0x30, 0xd8, 0xf2, 0x83, 0x28, 0x9c, 0x19, 0x62, 0xc3, 0xfd, 0x58, 0xc6, 0x41, 0xc4, - 0xbf, 0xb6, 0xe2, 0x07, 0x51, 0xfc, 0x01, 0xf4, 0x5f, 0x88, 0x79, 0x75, 0x74, 0x13, 0x86, 0x89, - 0xb7, 0xbb, 0x12, 0xf8, 0xcd, 0x99, 0xe9, 0x6c, 0x4a, 0xcb, 0x1c, 0x85, 0x2f, 0xb3, 0x98, 0x47, - 0x15, 0xc5, 0x58, 0x92, 0x40, 0xdf, 0x02, 0x79, 0xe2, 0xed, 0xce, 0x0c, 0x33, 0x4a, 0xb3, 0x19, - 0x94, 0xee, 0x38, 0x41, 0x7c, 0xe6, 0x2f, 0x7b, 0xbb, 0x98, 0xd6, 0x41, 0x9f, 0x86, 0xa2, 0x3c, - 0x30, 0x42, 0x21, 0xac, 0x4b, 0x5d, 0xb0, 0xf2, 0x98, 0xc1, 0xe4, 0x9d, 0xb6, 0x1b, 0x90, 0x26, - 0xf1, 0xa2, 0x30, 0x3e, 0x21, 0x25, 0x34, 0xc4, 0x31, 0x35, 0xf4, 0x69, 0x29, 0x21, 0x5e, 0xf5, - 0xdb, 0x5e, 0x14, 0xce, 0x14, 0x59, 0xf7, 0x52, 0x75, 0x77, 0x77, 0x62, 0xbc, 0xa4, 0x08, 0x99, - 0x57, 0xc6, 0x06, 0x29, 0xf4, 0x19, 0x18, 0xe3, 0xff, 0xb9, 0x06, 0x2c, 0x9c, 0x39, 0xcd, 0x68, - 0x5f, 0xcc, 0xa6, 0xcd, 0x11, 0x17, 0x4f, 0x0b, 0xe2, 0x63, 0x7a, 0x69, 0x88, 0x4d, 0x6a, 0x08, - 0xc3, 0x58, 0xc3, 0xdd, 0x25, 0x1e, 0x09, 0xc3, 0x4a, 0xe0, 0x6f, 0x90, 0x19, 0x60, 0x03, 0x73, - 0x36, 0x5d, 0x63, 0xe6, 0x6f, 0x90, 0xc5, 0x29, 0x4a, 0xf3, 0xa6, 0x5e, 0x07, 0x9b, 0x24, 0xd0, - 0x6d, 0x18, 0xa7, 0x2f, 0x36, 0x37, 0x26, 0x3a, 0xd2, 0x8b, 0x28, 0x7b, 0x57, 0x61, 0xa3, 0x12, - 0x4e, 0x10, 0x41, 0xb7, 0x60, 0x34, 0x8c, 0x9c, 0x20, 0x6a, 0xb7, 0x38, 0xd1, 0x33, 0xbd, 0x88, - 0x32, 0x85, 0x6b, 0x55, 0xab, 0x82, 0x0d, 0x02, 0xe8, 0x75, 0x28, 0x36, 0xdc, 0x4d, 0x52, 0xdb, - 0xab, 0x35, 0xc8, 0xcc, 0x28, 0xa3, 0x96, 0x7a, 0xa8, 0xdc, 0x94, 0x48, 0x9c, 0xcf, 0x55, 0x7f, - 0x71, 0x5c, 0x1d, 0xdd, 0x81, 0x33, 0x11, 0x09, 0x9a, 0xae, 0xe7, 0xd0, 0xc3, 0x40, 0x3c, 0xad, - 0x98, 0x22, 0x73, 0x8c, 0xed, 0xb6, 0x0b, 0x62, 0x36, 0xce, 0xac, 0xa7, 0x62, 0xe1, 0x8c, 0xda, - 0xe8, 0x3e, 0xcc, 0xa4, 0x40, 0xfc, 0x86, 0x5b, 0xdb, 0x9b, 0x39, 0xc5, 0x28, 0xbf, 0x2a, 0x28, - 0xcf, 0xac, 0x67, 0xe0, 0x1d, 0x74, 0x81, 0xe1, 0x4c, 0xea, 0xe8, 0x16, 0x4c, 0xb0, 0x13, 0xa8, - 0xd2, 0x6e, 0x34, 0x44, 0x83, 0xe3, 0xac, 0xc1, 0x8f, 0xc8, 0xfb, 0xb8, 0x6c, 0x82, 0x0f, 0xf6, - 0xe7, 0x20, 0xfe, 0x87, 0x93, 0xb5, 0xd1, 0x06, 0xd3, 0x99, 0xb5, 0x03, 0x37, 0xda, 0xa3, 0xe7, - 0x06, 0xb9, 0x1f, 0xcd, 0x4c, 0x74, 0x95, 0x57, 0xe8, 0xa8, 0x4a, 0xb1, 0xa6, 0x17, 0xe2, 0x24, - 0x41, 0x7a, 0xa4, 0x86, 0x51, 0xdd, 0xf5, 0x66, 0x26, 0xf9, 0xbb, 0x44, 0x9e, 0x48, 0x55, 0x5a, - 0x88, 0x39, 0x8c, 0xe9, 0xcb, 0xe8, 0x8f, 0x5b, 0xf4, 0xe6, 0x9a, 0x62, 0x88, 0xb1, 0xbe, 0x4c, - 0x02, 0x70, 0x8c, 0x43, 0x99, 0xc9, 0x28, 0xda, 0x9b, 0x41, 0x0c, 0x55, 0x1d, 0x2c, 0xeb, 0xeb, - 0x9f, 0xc6, 0xb4, 0xdc, 0xde, 0x80, 0x71, 0x75, 0x10, 0xb2, 0x31, 0x41, 0x73, 0x30, 0xc8, 0xd8, - 0x27, 0x21, 0x5d, 0x2b, 0xd2, 0x2e, 0x30, 0xd6, 0x0a, 0xf3, 0x72, 0xd6, 0x05, 0xf7, 0x5d, 0xb2, - 0xb8, 0x17, 0x11, 0xfe, 0xa6, 0xcf, 0x6b, 0x5d, 0x90, 0x00, 0x1c, 0xe3, 0xd8, 0xff, 0x87, 0xb3, - 0xa1, 0xf1, 0x69, 0xdb, 0xc7, 0xfd, 0xf2, 0x14, 0x14, 0xb6, 0xfd, 0x30, 0xa2, 0xd8, 0xac, 0x8d, - 0xc1, 0x98, 0xf1, 0xbc, 0x2e, 0xca, 0xb1, 0xc2, 0x40, 0xaf, 0xc0, 0x58, 0x4d, 0x6f, 0x40, 0x5c, - 0x8e, 0xea, 0x18, 0x31, 0x5a, 0xc7, 0x26, 0x2e, 0x7a, 0x09, 0x0a, 0xcc, 0x06, 0xa4, 0xe6, 0x37, - 0x04, 0xd7, 0x26, 0x6f, 0xf8, 0x42, 0x45, 0x94, 0x1f, 0x68, 0xbf, 0xb1, 0xc2, 0x46, 0x97, 0x61, - 0x88, 0x76, 0xa1, 0x5c, 0x11, 0xd7, 0x92, 0x12, 0x14, 0x5d, 0x67, 0xa5, 0x58, 0x40, 0xed, 0xbf, - 0x9c, 0xd3, 0x46, 0x99, 0xbe, 0x87, 0x09, 0xaa, 0xc0, 0xf0, 0x3d, 0xc7, 0x8d, 0x5c, 0x6f, 0x4b, - 0xf0, 0x1f, 0x4f, 0x74, 0xbd, 0xa3, 0x58, 0xa5, 0xbb, 0xbc, 0x02, 0xbf, 0x45, 0xc5, 0x1f, 0x2c, - 0xc9, 0x50, 0x8a, 0x41, 0xdb, 0xf3, 0x28, 0xc5, 0x5c, 0xbf, 0x14, 0x31, 0xaf, 0xc0, 0x29, 0x8a, - 0x3f, 0x58, 0x92, 0x41, 0x6f, 0x01, 0xc8, 0x1d, 0x46, 0xea, 0xc2, 0xf6, 0xe2, 0xa9, 0xde, 0x44, - 0xd7, 0x55, 0x9d, 0xc5, 0x71, 0x7a, 0x47, 0xc7, 0xff, 0xb1, 0x46, 0xcf, 0x8e, 0x18, 0x9f, 0xd6, - 0xd9, 0x19, 0xf4, 0x6d, 0x74, 0x89, 0x3b, 0x41, 0x44, 0xea, 0x0b, 0x91, 0x18, 0x9c, 0x8f, 0xf6, - 0xf7, 0x48, 0x59, 0x77, 0x9b, 0x44, 0xdf, 0x0e, 0x82, 0x08, 0x8e, 0xe9, 0xd9, 0xbf, 0x98, 0x87, - 0x99, 0xac, 0xee, 0xd2, 0x45, 0x47, 0xee, 0xbb, 0xd1, 0x12, 0x65, 0xaf, 0x2c, 0x73, 0xd1, 0x2d, - 0x8b, 0x72, 0xac, 0x30, 0xe8, 0xec, 0x87, 0xee, 0x96, 0x7c, 0x63, 0x0e, 0xc6, 0xb3, 0x5f, 0x65, - 0xa5, 0x58, 0x40, 0x29, 0x5e, 0x40, 0x9c, 0x50, 0x18, 0xf7, 0x68, 0xab, 0x04, 0xb3, 0x52, 0x2c, - 0xa0, 0xba, 0xb4, 0x6b, 0xa0, 0x87, 0xb4, 0xcb, 0x18, 0xa2, 0xc1, 0xa3, 0x1d, 0x22, 0xf4, 0x59, - 0x80, 0x4d, 0xd7, 0x73, 0xc3, 0x6d, 0x46, 0x7d, 0xe8, 0xd0, 0xd4, 0x15, 0x73, 0xb6, 0xa2, 0xa8, - 0x60, 0x8d, 0x22, 0x7a, 0x01, 0x46, 0xd4, 0x06, 0x2c, 0x97, 0x98, 0xa6, 0x53, 0xb3, 0x1c, 0x89, - 0x4f, 0xa3, 0x12, 0xd6, 0xf1, 0xec, 0xb7, 0x93, 0xeb, 0x45, 0xec, 0x00, 0x6d, 0x7c, 0xad, 0x7e, - 0xc7, 0x37, 0xd7, 0x7d, 0x7c, 0xed, 0xaf, 0xe5, 0x61, 0xc2, 0x68, 0xac, 0x1d, 0xf6, 0x71, 0x66, - 0x5d, 0xa3, 0x07, 0xb8, 0x13, 0x11, 0xb1, 0xff, 0xec, 0xde, 0x5b, 0x45, 0x3f, 0xe4, 0xe9, 0x0e, - 0xe0, 0xf5, 0xd1, 0x67, 0xa1, 0xd8, 0x70, 0x42, 0x26, 0x39, 0x23, 0x62, 0xdf, 0xf5, 0x43, 0x2c, - 0x7e, 0x98, 0x38, 0x61, 0xa4, 0xdd, 0x9a, 0x9c, 0x76, 0x4c, 0x92, 0xde, 0x34, 0x94, 0x3f, 0x91, - 0xd6, 0x63, 0xaa, 0x13, 0x94, 0x89, 0xd9, 0xc3, 0x1c, 0x86, 0x5e, 0x82, 0xd1, 0x80, 0xb0, 0x55, - 0xb1, 0x44, 0xb9, 0x39, 0xb6, 0xcc, 0x06, 0x63, 0xb6, 0x0f, 0x6b, 0x30, 0x6c, 0x60, 0xc6, 0x6f, - 0x83, 0xa1, 0x2e, 0x6f, 0x83, 0x27, 0x60, 0x98, 0xfd, 0x50, 0x2b, 0x40, 0xcd, 0x46, 0x99, 0x17, - 0x63, 0x09, 0x4f, 0x2e, 0x98, 0x42, 0x7f, 0x0b, 0x86, 0xbe, 0x3e, 0xc4, 0xa2, 0x66, 0x5a, 0xe6, - 0x02, 0x3f, 0xe5, 0xc4, 0x92, 0xc7, 0x12, 0x66, 0x7f, 0x14, 0xc6, 0x4b, 0x0e, 0x69, 0xfa, 0xde, - 0xb2, 0x57, 0x6f, 0xf9, 0xae, 0x17, 0xa1, 0x19, 0x18, 0x60, 0x97, 0x08, 0x3f, 0x02, 0x06, 0x68, - 0x43, 0x78, 0x80, 0x3e, 0x08, 0xec, 0x2d, 0x38, 0x5d, 0xf2, 0xef, 0x79, 0xf7, 0x9c, 0xa0, 0xbe, - 0x50, 0x29, 0x6b, 0xef, 0xeb, 0x35, 0xf9, 0xbe, 0xe3, 0x46, 0x5b, 0xa9, 0x47, 0xaf, 0x56, 0x93, - 0xb3, 0xb5, 0x2b, 0x6e, 0x83, 0x64, 0x48, 0x41, 0xfe, 0x6a, 0xce, 0x68, 0x29, 0xc6, 0x57, 0x5a, - 0x2d, 0x2b, 0x53, 0xab, 0xf5, 0x06, 0x14, 0x36, 0x5d, 0xd2, 0xa8, 0x63, 0xb2, 0x29, 0x56, 0xe2, - 0xe3, 0xd9, 0x76, 0x28, 0x2b, 0x14, 0x53, 0x4a, 0xbd, 0xf8, 0xeb, 0x70, 0x45, 0x54, 0xc6, 0x8a, - 0x0c, 0xda, 0x81, 0x49, 0xf9, 0x60, 0x90, 0x50, 0xb1, 0x2e, 0x9f, 0xe8, 0xf6, 0x0a, 0x31, 0x89, - 0x9f, 0x7a, 0xb0, 0x3f, 0x37, 0x89, 0x13, 0x64, 0x70, 0x07, 0x61, 0xfa, 0x1c, 0x6c, 0xd2, 0x13, - 0x78, 0x80, 0x0d, 0x3f, 0x7b, 0x0e, 0xb2, 0x97, 0x2d, 0x2b, 0xb5, 0x7f, 0xcc, 0x82, 0x47, 0x3a, - 0x46, 0x46, 0xbc, 0xf0, 0x8f, 0x78, 0x16, 0x92, 0x2f, 0xee, 0x5c, 0xef, 0x17, 0xb7, 0xfd, 0x77, - 0x2c, 0x38, 0xb5, 0xdc, 0x6c, 0x45, 0x7b, 0x25, 0xd7, 0x54, 0x41, 0xbd, 0x08, 0x43, 0x4d, 0x52, - 0x77, 0xdb, 0x4d, 0x31, 0x73, 0x73, 0xf2, 0x94, 0x5a, 0x65, 0xa5, 0x07, 0xfb, 0x73, 0x63, 0xd5, - 0xc8, 0x0f, 0x9c, 0x2d, 0xc2, 0x0b, 0xb0, 0x40, 0x67, 0x67, 0xbd, 0xfb, 0x2e, 0xb9, 0xe9, 0x36, - 0x5d, 0x69, 0x57, 0xd4, 0x55, 0x66, 0x37, 0x2f, 0x07, 0x74, 0xfe, 0x8d, 0xb6, 0xe3, 0x45, 0x6e, - 0xb4, 0x27, 0xb4, 0x47, 0x92, 0x08, 0x8e, 0xe9, 0xd9, 0x5f, 0xb5, 0x60, 0x42, 0xae, 0xfb, 0x85, - 0x7a, 0x3d, 0x20, 0x61, 0x88, 0x66, 0x21, 0xe7, 0xb6, 0x44, 0x2f, 0x41, 0xf4, 0x32, 0x57, 0xae, - 0xe0, 0x9c, 0xdb, 0x92, 0x6c, 0x19, 0x3b, 0x08, 0xf3, 0xa6, 0x22, 0xed, 0xba, 0x28, 0xc7, 0x0a, - 0x03, 0x5d, 0x81, 0x82, 0xe7, 0xd7, 0xb9, 0x6d, 0x17, 0xbf, 0xd2, 0xd8, 0x02, 0x5b, 0x13, 0x65, - 0x58, 0x41, 0x51, 0x05, 0x8a, 0xdc, 0xec, 0x29, 0x5e, 0xb4, 0x7d, 0x19, 0x4f, 0xb1, 0x2f, 0x5b, - 0x97, 0x35, 0x71, 0x4c, 0xc4, 0xfe, 0x55, 0x0b, 0x46, 0xe5, 0x97, 0xf5, 0xc9, 0x73, 0xd2, 0xad, - 0x15, 0xf3, 0x9b, 0xf1, 0xd6, 0xa2, 0x3c, 0x23, 0x83, 0x18, 0xac, 0x62, 0xfe, 0x50, 0xac, 0xe2, - 0x33, 0x30, 0xe2, 0xb4, 0x5a, 0x15, 0x93, 0xcf, 0x64, 0x4b, 0x69, 0x21, 0x2e, 0xc6, 0x3a, 0x8e, - 0xfd, 0xa3, 0x39, 0x18, 0x97, 0x5f, 0x50, 0x6d, 0x6f, 0x84, 0x24, 0x42, 0xeb, 0x50, 0x74, 0xf8, - 0x2c, 0x11, 0xb9, 0xc8, 0x2f, 0xa5, 0xcb, 0x11, 0x8c, 0x29, 0x8d, 0x2f, 0xfc, 0x05, 0x59, 0x1b, - 0xc7, 0x84, 0x50, 0x03, 0xa6, 0x3c, 0x3f, 0x62, 0x87, 0xbf, 0x82, 0x77, 0x53, 0xed, 0x24, 0xa9, - 0x9f, 0x15, 0xd4, 0xa7, 0xd6, 0x92, 0x54, 0x70, 0x27, 0x61, 0xb4, 0x2c, 0x65, 0x33, 0xf9, 0x6c, - 0x61, 0x80, 0x3e, 0x71, 0xe9, 0xa2, 0x19, 0xfb, 0x57, 0x2c, 0x28, 0x4a, 0xb4, 0x93, 0xd0, 0xe2, - 0xad, 0xc2, 0x70, 0xc8, 0x26, 0x41, 0x0e, 0x8d, 0xdd, 0xad, 0xe3, 0x7c, 0xbe, 0xe2, 0x3b, 0x8d, - 0xff, 0x0f, 0xb1, 0xa4, 0xc1, 0x44, 0xf3, 0xaa, 0xfb, 0xef, 0x13, 0xd1, 0xbc, 0xea, 0x4f, 0xc6, - 0xa5, 0xf4, 0x87, 0xac, 0xcf, 0x9a, 0xac, 0x8b, 0xb2, 0x5e, 0xad, 0x80, 0x6c, 0xba, 0xf7, 0x93, - 0xac, 0x57, 0x85, 0x95, 0x62, 0x01, 0x45, 0x6f, 0xc1, 0x68, 0x4d, 0xca, 0x64, 0xe3, 0x1d, 0x7e, - 0xb9, 0xab, 0x7e, 0x40, 0xa9, 0x92, 0xb8, 0x2c, 0x64, 0x49, 0xab, 0x8f, 0x0d, 0x6a, 0xa6, 0x19, - 0x41, 0xbe, 0x97, 0x19, 0x41, 0x4c, 0x37, 0x5b, 0xa9, 0xfe, 0xe3, 0x16, 0x0c, 0x71, 0x59, 0x5c, - 0x7f, 0xa2, 0x50, 0x4d, 0xb3, 0x16, 0x8f, 0xdd, 0x1d, 0x5a, 0x28, 0x34, 0x65, 0x68, 0x15, 0x8a, - 0xec, 0x07, 0x93, 0x25, 0xe6, 0xb3, 0xad, 0xee, 0x79, 0xab, 0x7a, 0x07, 0xef, 0xc8, 0x6a, 0x38, - 0xa6, 0x60, 0xff, 0x70, 0x9e, 0x9e, 0x6e, 0x31, 0xaa, 0x71, 0xe9, 0x5b, 0xc7, 0x77, 0xe9, 0xe7, - 0x8e, 0xeb, 0xd2, 0xdf, 0x82, 0x89, 0x9a, 0xa6, 0x87, 0x8b, 0x67, 0xf2, 0x4a, 0xd7, 0x45, 0xa2, - 0xa9, 0xec, 0xb8, 0x94, 0x65, 0xc9, 0x24, 0x82, 0x93, 0x54, 0xd1, 0xb7, 0xc1, 0x28, 0x9f, 0x67, - 0xd1, 0x0a, 0xb7, 0xc4, 0xf8, 0x48, 0xf6, 0x7a, 0xd1, 0x9b, 0xe0, 0x52, 0x39, 0xad, 0x3a, 0x36, - 0x88, 0xd9, 0x7f, 0x62, 0x01, 0x5a, 0x6e, 0x6d, 0x93, 0x26, 0x09, 0x9c, 0x46, 0x2c, 0x4e, 0xff, - 0x7e, 0x0b, 0x66, 0x48, 0x47, 0xf1, 0x92, 0xdf, 0x6c, 0x8a, 0x47, 0x4b, 0xc6, 0xbb, 0x7a, 0x39, - 0xa3, 0x8e, 0x72, 0x4b, 0x98, 0xc9, 0xc2, 0xc0, 0x99, 0xed, 0xa1, 0x55, 0x98, 0xe6, 0xb7, 0xa4, - 0x02, 0x68, 0xb6, 0xd7, 0x8f, 0x0a, 0xc2, 0xd3, 0xeb, 0x9d, 0x28, 0x38, 0xad, 0x9e, 0xfd, 0x5d, - 0xa3, 0x90, 0xd9, 0x8b, 0x0f, 0xf4, 0x08, 0x1f, 0xe8, 0x11, 0x3e, 0xd0, 0x23, 0x7c, 0xa0, 0x47, - 0xf8, 0x40, 0x8f, 0xf0, 0x4d, 0xaf, 0x47, 0xf8, 0x23, 0x0b, 0xa6, 0x3b, 0xaf, 0x81, 0x93, 0x60, - 0xcc, 0xdb, 0x30, 0xdd, 0x79, 0xd7, 0x75, 0xb5, 0xb3, 0xeb, 0xec, 0x67, 0x7c, 0xef, 0xa5, 0x7c, - 0x03, 0x4e, 0xa3, 0x6f, 0xff, 0xba, 0x05, 0xa7, 0x15, 0xb2, 0xf1, 0xd2, 0xff, 0x3c, 0x4c, 0xf3, - 0xf3, 0x65, 0xa9, 0xe1, 0xb8, 0xcd, 0x75, 0xd2, 0x6c, 0x35, 0x9c, 0x48, 0x9a, 0x19, 0x3c, 0x93, - 0xba, 0x55, 0x13, 0x26, 0xba, 0x46, 0xc5, 0xc5, 0x47, 0x68, 0xbf, 0x52, 0x00, 0x38, 0xad, 0x19, - 0xc3, 0x28, 0x35, 0xd7, 0xd3, 0x4c, 0xf8, 0x17, 0x0b, 0x30, 0xb8, 0xbc, 0x4b, 0xbc, 0xe8, 0x04, - 0x26, 0xaa, 0x06, 0xe3, 0xae, 0xb7, 0xeb, 0x37, 0x76, 0x49, 0x9d, 0xc3, 0x0f, 0xf3, 0xd0, 0x3f, - 0x23, 0x48, 0x8f, 0x97, 0x0d, 0x12, 0x38, 0x41, 0xf2, 0x38, 0x84, 0xed, 0xd7, 0x60, 0x88, 0xdf, - 0x71, 0x42, 0xd2, 0x9e, 0x7a, 0xa5, 0xb1, 0x41, 0x14, 0x37, 0x77, 0xac, 0x08, 0xe0, 0x77, 0xa8, - 0xa8, 0x8e, 0xde, 0x86, 0xf1, 0x4d, 0x37, 0x08, 0xa3, 0x75, 0xb7, 0x49, 0xc2, 0xc8, 0x69, 0xb6, - 0x1e, 0x42, 0xb8, 0xae, 0xc6, 0x61, 0xc5, 0xa0, 0x84, 0x13, 0x94, 0xd1, 0x16, 0x8c, 0x35, 0x1c, - 0xbd, 0xa9, 0xe1, 0x43, 0x37, 0xa5, 0x2e, 0xcf, 0x9b, 0x3a, 0x21, 0x6c, 0xd2, 0xa5, 0xa7, 0x4d, - 0x8d, 0xc9, 0x87, 0x0b, 0x4c, 0x6a, 0xa2, 0x4e, 0x1b, 0x2e, 0x18, 0xe6, 0x30, 0xca, 0x07, 0x32, - 0xfb, 0xe1, 0xa2, 0xc9, 0x07, 0x6a, 0x56, 0xc2, 0x9f, 0x83, 0x22, 0xa1, 0x43, 0x48, 0x09, 0x8b, - 0xfb, 0xf7, 0x6a, 0x7f, 0x7d, 0x5d, 0x75, 0x6b, 0x81, 0x6f, 0xaa, 0x35, 0x96, 0x25, 0x25, 0x1c, - 0x13, 0x45, 0x4b, 0x30, 0x14, 0x92, 0xc0, 0x25, 0xa1, 0xb8, 0x89, 0xbb, 0x4c, 0x23, 0x43, 0xe3, - 0xae, 0x37, 0xfc, 0x37, 0x16, 0x55, 0xe9, 0xf2, 0x72, 0x98, 0xc4, 0x97, 0xdd, 0x95, 0xda, 0xf2, - 0x5a, 0x60, 0xa5, 0x58, 0x40, 0xd1, 0xeb, 0x30, 0x1c, 0x90, 0x06, 0xd3, 0x9b, 0x8d, 0xf5, 0xbf, - 0xc8, 0xb9, 0x1a, 0x8e, 0xd7, 0xc3, 0x92, 0x00, 0xba, 0x01, 0x28, 0x20, 0x94, 0x8f, 0x74, 0xbd, - 0x2d, 0x65, 0x55, 0x2b, 0xee, 0x21, 0x75, 0x6e, 0xe1, 0x18, 0x43, 0x7a, 0x41, 0xe1, 0x94, 0x6a, - 0xe8, 0x1a, 0x4c, 0xa9, 0xd2, 0xb2, 0x17, 0x46, 0x0e, 0x3d, 0xff, 0x27, 0x18, 0x2d, 0x25, 0xc6, - 0xc1, 0x49, 0x04, 0xdc, 0x59, 0xc7, 0xfe, 0xb2, 0x05, 0x7c, 0x9c, 0x4f, 0x40, 0x78, 0xf1, 0x9a, - 0x29, 0xbc, 0x38, 0x9b, 0x39, 0x73, 0x19, 0x82, 0x8b, 0x2f, 0x5b, 0x30, 0xa2, 0xcd, 0x6c, 0xbc, - 0x66, 0xad, 0x2e, 0x6b, 0xb6, 0x0d, 0x93, 0x74, 0xa5, 0xdf, 0xda, 0x08, 0x49, 0xb0, 0x4b, 0xea, - 0x6c, 0x61, 0xe6, 0x1e, 0x6e, 0x61, 0x2a, 0x0b, 0xbe, 0x9b, 0x09, 0x82, 0xb8, 0xa3, 0x09, 0xfb, - 0x73, 0xb2, 0xab, 0xca, 0xe0, 0xb1, 0xa6, 0xe6, 0x3c, 0x61, 0xf0, 0xa8, 0x66, 0x15, 0xc7, 0x38, - 0x74, 0xab, 0x6d, 0xfb, 0x61, 0x94, 0x34, 0x78, 0xbc, 0xee, 0x87, 0x11, 0x66, 0x10, 0xfb, 0x39, - 0x80, 0xe5, 0xfb, 0xa4, 0xc6, 0x57, 0xac, 0xfe, 0xb6, 0xb2, 0xb2, 0xdf, 0x56, 0xf6, 0x6f, 0x5b, - 0x30, 0xbe, 0xb2, 0x64, 0xdc, 0x73, 0xf3, 0x00, 0xfc, 0x41, 0x78, 0xf7, 0xee, 0x9a, 0xb4, 0x16, - 0xe0, 0x0a, 0x5f, 0x55, 0x8a, 0x35, 0x0c, 0x74, 0x16, 0xf2, 0x8d, 0xb6, 0x27, 0xa4, 0xab, 0xc3, - 0x94, 0x7b, 0xb8, 0xd9, 0xf6, 0x30, 0x2d, 0xd3, 0x3c, 0x2e, 0xf2, 0x7d, 0x7b, 0x5c, 0xf4, 0x8c, - 0x7c, 0x80, 0xe6, 0x60, 0xf0, 0xde, 0x3d, 0xb7, 0xce, 0xfd, 0x4b, 0x85, 0x25, 0xc3, 0xdd, 0xbb, - 0xe5, 0x52, 0x88, 0x79, 0xb9, 0xfd, 0xa5, 0x3c, 0xcc, 0xae, 0x34, 0xc8, 0xfd, 0xf7, 0xe8, 0x63, - 0xdb, 0xaf, 0xbf, 0xc8, 0xe1, 0xe4, 0x54, 0x87, 0xf5, 0x09, 0xea, 0x3d, 0x1e, 0x9b, 0x30, 0xcc, - 0xed, 0xfd, 0xa4, 0xc7, 0xed, 0x2b, 0x69, 0xad, 0x67, 0x0f, 0xc8, 0x3c, 0xb7, 0x1b, 0x14, 0x0e, - 0x83, 0xea, 0xc2, 0x14, 0xa5, 0x58, 0x12, 0x9f, 0x7d, 0x19, 0x46, 0x75, 0xcc, 0x43, 0x79, 0xe7, - 0xfd, 0xff, 0x79, 0x98, 0xa4, 0x3d, 0x38, 0xd6, 0x89, 0xb8, 0xdd, 0x39, 0x11, 0x47, 0xed, 0xa1, - 0xd5, 0x7b, 0x36, 0xde, 0x4a, 0xce, 0xc6, 0x33, 0x59, 0xb3, 0x71, 0xd2, 0x73, 0xf0, 0x9d, 0x16, - 0x4c, 0xaf, 0x34, 0xfc, 0xda, 0x4e, 0xc2, 0x8b, 0xea, 0x05, 0x18, 0xa1, 0xc7, 0x71, 0x68, 0x38, - 0xf8, 0x1b, 0x21, 0x1f, 0x04, 0x08, 0xeb, 0x78, 0x5a, 0xb5, 0xdb, 0xb7, 0xcb, 0xa5, 0xb4, 0x48, - 0x11, 0x02, 0x84, 0x75, 0x3c, 0xfb, 0x37, 0x2d, 0x38, 0x7f, 0x6d, 0x69, 0x39, 0x5e, 0x8a, 0x1d, - 0xc1, 0x2a, 0x2e, 0xc3, 0x50, 0xab, 0xae, 0x75, 0x25, 0x96, 0x3e, 0x97, 0x58, 0x2f, 0x04, 0xf4, - 0xfd, 0x12, 0x88, 0xe5, 0xa7, 0x2d, 0x98, 0xbe, 0xe6, 0x46, 0xf4, 0x76, 0x4d, 0x86, 0x4d, 0xa0, - 0xd7, 0x6b, 0xe8, 0x46, 0x7e, 0xb0, 0x97, 0x0c, 0x9b, 0x80, 0x15, 0x04, 0x6b, 0x58, 0xbc, 0xe5, - 0x5d, 0x97, 0x59, 0x9a, 0xe7, 0x4c, 0x3d, 0x1c, 0x16, 0xe5, 0x58, 0x61, 0xd0, 0x0f, 0xab, 0xbb, - 0x01, 0x13, 0x61, 0xee, 0x89, 0x13, 0x56, 0x7d, 0x58, 0x49, 0x02, 0x70, 0x8c, 0x43, 0x5f, 0x73, - 0x73, 0xd7, 0x1a, 0xed, 0x30, 0x22, 0xc1, 0x66, 0x98, 0x71, 0x3a, 0x3e, 0x07, 0x45, 0x22, 0x15, - 0x06, 0xa2, 0xd7, 0x8a, 0x63, 0x54, 0x9a, 0x04, 0x1e, 0xbd, 0x41, 0xe1, 0xf5, 0xe1, 0x93, 0x79, - 0x38, 0xa7, 0xba, 0x15, 0x40, 0x44, 0x6f, 0x4b, 0x0f, 0x67, 0xc1, 0xfc, 0xe2, 0x97, 0x3b, 0xa0, - 0x38, 0xa5, 0x86, 0xfd, 0x63, 0x16, 0x9c, 0x56, 0x1f, 0xfc, 0xbe, 0xfb, 0x4c, 0xfb, 0xe7, 0x72, - 0x30, 0x76, 0x7d, 0x7d, 0xbd, 0x72, 0x8d, 0x44, 0xe2, 0xda, 0xee, 0x6d, 0x06, 0x80, 0x35, 0x6d, - 0x66, 0xb7, 0xc7, 0x5c, 0x3b, 0x72, 0x1b, 0xf3, 0x3c, 0x2a, 0xd2, 0x7c, 0xd9, 0x8b, 0x6e, 0x05, - 0xd5, 0x28, 0x70, 0xbd, 0xad, 0x54, 0xfd, 0xa7, 0x64, 0x2e, 0xf2, 0x59, 0xcc, 0x05, 0x7a, 0x0e, - 0x86, 0x58, 0x58, 0x26, 0x39, 0x09, 0x8f, 0xaa, 0xb7, 0x10, 0x2b, 0x3d, 0xd8, 0x9f, 0x2b, 0xde, - 0xc6, 0x65, 0xfe, 0x07, 0x0b, 0x54, 0x74, 0x1b, 0x46, 0xb6, 0xa3, 0xa8, 0x75, 0x9d, 0x38, 0x75, - 0xfa, 0x74, 0xe7, 0xc7, 0xe1, 0x85, 0xb4, 0xe3, 0x90, 0x0e, 0x02, 0x47, 0x8b, 0x4f, 0x90, 0xb8, - 0x2c, 0xc4, 0x3a, 0x1d, 0xbb, 0x0a, 0x10, 0xc3, 0x8e, 0x48, 0x91, 0x63, 0xff, 0x81, 0x05, 0xc3, - 0x3c, 0x42, 0x46, 0x80, 0x5e, 0x85, 0x01, 0x72, 0x9f, 0xd4, 0x04, 0xc7, 0x9b, 0xda, 0xe1, 0x98, - 0xd3, 0xe2, 0x02, 0x69, 0xfa, 0x1f, 0xb3, 0x5a, 0xe8, 0x3a, 0x0c, 0xd3, 0xde, 0x5e, 0x53, 0xe1, - 0x42, 0x1e, 0xcb, 0xfa, 0x62, 0x35, 0xed, 0x9c, 0x39, 0x13, 0x45, 0x58, 0x56, 0x67, 0xda, 0xf3, - 0x5a, 0xab, 0x4a, 0x4f, 0xec, 0xa8, 0x1b, 0x63, 0xb1, 0xbe, 0x54, 0xe1, 0x48, 0x82, 0x1a, 0xd7, - 0x9e, 0xcb, 0x42, 0x1c, 0x13, 0xb1, 0xd7, 0xa1, 0x48, 0x27, 0x75, 0xa1, 0xe1, 0x3a, 0xdd, 0x0d, - 0x02, 0x9e, 0x84, 0xa2, 0x54, 0xf7, 0x87, 0xc2, 0x33, 0x9e, 0x51, 0x95, 0xd6, 0x00, 0x21, 0x8e, - 0xe1, 0xf6, 0x26, 0x9c, 0x62, 0xc6, 0x9b, 0x4e, 0xb4, 0x6d, 0xec, 0xb1, 0xde, 0x8b, 0xf9, 0x29, - 0xf1, 0x80, 0xe4, 0x33, 0x33, 0xa3, 0x39, 0x9f, 0x8e, 0x4a, 0x8a, 0xf1, 0x63, 0xd2, 0xfe, 0xda, - 0x00, 0x3c, 0x5a, 0xae, 0x66, 0x07, 0x4f, 0x79, 0x09, 0x46, 0x39, 0x5f, 0x4a, 0x97, 0xb6, 0xd3, - 0x10, 0xed, 0x2a, 0x49, 0xf4, 0xba, 0x06, 0xc3, 0x06, 0x26, 0x3a, 0x0f, 0x79, 0xf7, 0x1d, 0x2f, - 0xe9, 0x9a, 0x55, 0x7e, 0x63, 0x0d, 0xd3, 0x72, 0x0a, 0xa6, 0x2c, 0x2e, 0xbf, 0x3b, 0x14, 0x58, - 0xb1, 0xb9, 0xaf, 0xc1, 0xb8, 0x1b, 0xd6, 0x42, 0xb7, 0xec, 0xd1, 0x73, 0x46, 0x3b, 0xa9, 0x94, - 0x70, 0x83, 0x76, 0x5a, 0x41, 0x71, 0x02, 0x5b, 0xbb, 0xc8, 0x06, 0xfb, 0x66, 0x93, 0x7b, 0xba, - 0x8a, 0xd3, 0x17, 0x40, 0x8b, 0x7d, 0x5d, 0xc8, 0x54, 0x0a, 0xe2, 0x05, 0xc0, 0x3f, 0x38, 0xc4, - 0x12, 0x46, 0x5f, 0x8e, 0xb5, 0x6d, 0xa7, 0xb5, 0xd0, 0x8e, 0xb6, 0x4b, 0x6e, 0x58, 0xf3, 0x77, - 0x49, 0xb0, 0xc7, 0x1e, 0xfd, 0x85, 0xf8, 0xe5, 0xa8, 0x00, 0x4b, 0xd7, 0x17, 0x2a, 0x14, 0x13, - 0x77, 0xd6, 0x41, 0x0b, 0x30, 0x21, 0x0b, 0xab, 0x24, 0x64, 0x57, 0xd8, 0x08, 0x23, 0xa3, 0x9c, - 0xa5, 0x44, 0xb1, 0x22, 0x92, 0xc4, 0x37, 0x39, 0x69, 0x38, 0x0a, 0x4e, 0xfa, 0x45, 0x18, 0x73, - 0x3d, 0x37, 0x72, 0x9d, 0xc8, 0xe7, 0xfa, 0x30, 0xfe, 0xbe, 0x67, 0x82, 0xfe, 0xb2, 0x0e, 0xc0, - 0x26, 0x9e, 0xfd, 0x5f, 0x06, 0x60, 0x8a, 0x4d, 0xdb, 0x07, 0x2b, 0xec, 0x9b, 0x69, 0x85, 0xdd, - 0xee, 0x5c, 0x61, 0x47, 0xf1, 0x44, 0x78, 0xe8, 0x65, 0xf6, 0x36, 0x14, 0x95, 0x7f, 0x98, 0x74, - 0x10, 0xb5, 0x32, 0x1c, 0x44, 0x7b, 0x73, 0x1f, 0xd2, 0xc4, 0x2e, 0x9f, 0x6a, 0x62, 0xf7, 0xd7, - 0x2d, 0x88, 0x15, 0x3c, 0xe8, 0x3a, 0x14, 0x5b, 0x3e, 0xb3, 0x1c, 0x0d, 0xa4, 0x39, 0xf6, 0xa3, - 0xa9, 0x17, 0x15, 0xbf, 0x14, 0xf9, 0xc7, 0x57, 0x64, 0x0d, 0x1c, 0x57, 0x46, 0x8b, 0x30, 0xdc, - 0x0a, 0x48, 0x35, 0x62, 0x31, 0x54, 0x7a, 0xd2, 0xe1, 0x6b, 0x84, 0xe3, 0x63, 0x59, 0xd1, 0xfe, - 0x79, 0x0b, 0x80, 0x5b, 0xb1, 0x39, 0xde, 0x16, 0x39, 0x01, 0xa9, 0x75, 0x09, 0x06, 0xc2, 0x16, - 0xa9, 0x75, 0xb3, 0xe9, 0x8d, 0xfb, 0x53, 0x6d, 0x91, 0x5a, 0x3c, 0xe0, 0xf4, 0x1f, 0x66, 0xb5, - 0xed, 0xef, 0x06, 0x18, 0x8f, 0xd1, 0xca, 0x11, 0x69, 0xa2, 0xa7, 0x8d, 0x98, 0x0a, 0x67, 0x13, - 0x31, 0x15, 0x8a, 0x0c, 0x5b, 0x13, 0x90, 0xbe, 0x0d, 0xf9, 0xa6, 0x73, 0x5f, 0x48, 0xc0, 0x9e, - 0xec, 0xde, 0x0d, 0x4a, 0x7f, 0x7e, 0xd5, 0xb9, 0xcf, 0x1f, 0x89, 0x4f, 0xca, 0x05, 0xb2, 0xea, - 0xdc, 0x3f, 0xe0, 0x96, 0xbb, 0xec, 0x90, 0xba, 0xe9, 0x86, 0xd1, 0x17, 0xfe, 0x73, 0xfc, 0x9f, - 0x2d, 0x3b, 0xda, 0x08, 0x6b, 0xcb, 0xf5, 0x84, 0x81, 0x56, 0x5f, 0x6d, 0xb9, 0x5e, 0xb2, 0x2d, - 0xd7, 0xeb, 0xa3, 0x2d, 0xd7, 0x43, 0xef, 0xc2, 0xb0, 0xb0, 0x9f, 0x14, 0x31, 0x8c, 0xae, 0xf6, - 0xd1, 0x9e, 0x30, 0xbf, 0xe4, 0x6d, 0x5e, 0x95, 0x8f, 0x60, 0x51, 0xda, 0xb3, 0x5d, 0xd9, 0x20, - 0xfa, 0x2b, 0x16, 0x8c, 0x8b, 0xdf, 0x98, 0xbc, 0xd3, 0x26, 0x61, 0x24, 0x78, 0xcf, 0x8f, 0xf7, - 0xdf, 0x07, 0x51, 0x91, 0x77, 0xe5, 0xe3, 0xf2, 0x98, 0x35, 0x81, 0x3d, 0x7b, 0x94, 0xe8, 0x05, - 0xfa, 0x7b, 0x16, 0x9c, 0x6a, 0x3a, 0xf7, 0x79, 0x8b, 0xbc, 0x0c, 0x3b, 0x91, 0xeb, 0x0b, 0x3b, - 0x84, 0x57, 0xfb, 0x9b, 0xfe, 0x8e, 0xea, 0xbc, 0x93, 0x52, 0x59, 0x7a, 0x2a, 0x0d, 0xa5, 0x67, - 0x57, 0x53, 0xfb, 0x35, 0xbb, 0x09, 0x05, 0xb9, 0xde, 0x52, 0x44, 0x0d, 0x25, 0x9d, 0xb1, 0x3e, - 0xb4, 0xf9, 0xaa, 0x1e, 0xab, 0x80, 0xb6, 0x23, 0xd6, 0xda, 0xb1, 0xb6, 0xf3, 0x36, 0x8c, 0xea, - 0x6b, 0xec, 0x58, 0xdb, 0x7a, 0x07, 0xa6, 0x53, 0xd6, 0xd2, 0xb1, 0x36, 0x79, 0x0f, 0xce, 0x66, - 0xae, 0x8f, 0xe3, 0x6c, 0xd8, 0xfe, 0x39, 0x4b, 0x3f, 0x07, 0x4f, 0x40, 0x75, 0xb0, 0x64, 0xaa, - 0x0e, 0x2e, 0x74, 0xdf, 0x39, 0x19, 0xfa, 0x83, 0xb7, 0xf4, 0x4e, 0xd3, 0x53, 0x1d, 0xbd, 0x0e, - 0x43, 0x0d, 0x5a, 0x22, 0xad, 0x70, 0xed, 0xde, 0x3b, 0x32, 0xe6, 0xa5, 0x58, 0x79, 0x88, 0x05, - 0x05, 0xfb, 0x97, 0x2c, 0x18, 0x38, 0x81, 0x91, 0xc0, 0xe6, 0x48, 0x3c, 0x9d, 0x49, 0x5a, 0x84, - 0x57, 0x9e, 0xc7, 0xce, 0xbd, 0xe5, 0xfb, 0x11, 0xf1, 0x42, 0xf6, 0x54, 0x4c, 0x1d, 0x98, 0xff, - 0x07, 0xa6, 0x6f, 0xfa, 0x4e, 0x7d, 0xd1, 0x69, 0x38, 0x5e, 0x8d, 0x04, 0x65, 0x6f, 0xeb, 0x50, - 0x16, 0xe4, 0xb9, 0x5e, 0x16, 0xe4, 0xf6, 0x36, 0x20, 0xbd, 0x01, 0xe1, 0x8a, 0x83, 0x61, 0xd8, - 0xe5, 0x4d, 0x89, 0xe1, 0x7f, 0x3c, 0x9d, 0x35, 0xeb, 0xe8, 0x99, 0xe6, 0x64, 0xc2, 0x0b, 0xb0, - 0x24, 0x64, 0xbf, 0x04, 0xa9, 0xfe, 0xfc, 0xbd, 0xc5, 0x06, 0xf6, 0xa7, 0x61, 0x8a, 0xd5, 0x3c, - 0xe4, 0x93, 0xd6, 0x4e, 0x48, 0x25, 0x53, 0x22, 0xfd, 0xd9, 0x5f, 0xb4, 0x60, 0x62, 0x2d, 0x11, - 0x00, 0xed, 0x32, 0xd3, 0x63, 0xa6, 0x08, 0xc3, 0xab, 0xac, 0x14, 0x0b, 0xe8, 0x91, 0xcb, 0xa0, - 0xfe, 0xc2, 0x82, 0x38, 0xc4, 0xc6, 0x09, 0x30, 0x5e, 0x4b, 0x06, 0xe3, 0x95, 0x2a, 0x1b, 0x51, - 0xdd, 0xc9, 0xe2, 0xbb, 0xd0, 0x0d, 0x15, 0x7c, 0xaa, 0x8b, 0x58, 0x24, 0x26, 0xc3, 0x43, 0x15, - 0x8d, 0x9b, 0x11, 0xaa, 0x64, 0x38, 0x2a, 0xfb, 0x3f, 0xe6, 0x00, 0x29, 0xdc, 0xbe, 0x83, 0x63, - 0x75, 0xd6, 0x38, 0x9a, 0xe0, 0x58, 0xbb, 0x80, 0x98, 0x26, 0x3e, 0x70, 0xbc, 0x90, 0x93, 0x75, - 0x85, 0xd4, 0xed, 0x70, 0x6a, 0xfe, 0x59, 0xd1, 0x24, 0xba, 0xd9, 0x41, 0x0d, 0xa7, 0xb4, 0xa0, - 0x59, 0x58, 0x0c, 0xf6, 0x6b, 0x61, 0x31, 0xd4, 0xc3, 0xdd, 0xee, 0x67, 0x2d, 0x18, 0x53, 0xc3, - 0xf4, 0x3e, 0x31, 0x86, 0x57, 0xfd, 0xc9, 0x38, 0xfa, 0x2a, 0x5a, 0x97, 0xd9, 0x95, 0xf0, 0xad, - 0xcc, 0x6d, 0xd2, 0x69, 0xb8, 0xef, 0x12, 0x15, 0x9a, 0x70, 0x4e, 0xb8, 0x41, 0x8a, 0xd2, 0x83, - 0xfd, 0xb9, 0x31, 0xf5, 0x8f, 0x87, 0x42, 0x8e, 0xab, 0xd8, 0x3f, 0x49, 0x37, 0xbb, 0xb9, 0x14, - 0xd1, 0x0b, 0x30, 0xd8, 0xda, 0x76, 0x42, 0x92, 0x70, 0x1a, 0x1a, 0xac, 0xd0, 0xc2, 0x83, 0xfd, - 0xb9, 0x71, 0x55, 0x81, 0x95, 0x60, 0x8e, 0xdd, 0x7f, 0xc8, 0xb1, 0xce, 0xc5, 0xd9, 0x33, 0xe4, - 0xd8, 0x9f, 0x58, 0x30, 0xb0, 0xe6, 0xd7, 0x4f, 0xe2, 0x08, 0x78, 0xcd, 0x38, 0x02, 0xce, 0x65, - 0x45, 0xa9, 0xcf, 0xdc, 0xfd, 0x2b, 0x89, 0xdd, 0x7f, 0x21, 0x93, 0x42, 0xf7, 0x8d, 0xdf, 0x84, - 0x11, 0x16, 0xfb, 0x5e, 0x38, 0x48, 0x3d, 0x67, 0x6c, 0xf8, 0xb9, 0xc4, 0x86, 0x9f, 0xd0, 0x50, - 0xb5, 0x9d, 0xfe, 0x04, 0x0c, 0x0b, 0x8f, 0x9b, 0xa4, 0xf7, 0xa9, 0xc0, 0xc5, 0x12, 0x6e, 0xff, - 0x78, 0x1e, 0x8c, 0x58, 0xfb, 0xe8, 0x57, 0x2c, 0x98, 0x0f, 0xb8, 0x25, 0x6e, 0xbd, 0xd4, 0x0e, - 0x5c, 0x6f, 0xab, 0x5a, 0xdb, 0x26, 0xf5, 0x76, 0xc3, 0xf5, 0xb6, 0xca, 0x5b, 0x9e, 0xaf, 0x8a, - 0x97, 0xef, 0x93, 0x5a, 0x9b, 0xa9, 0xaf, 0x7a, 0x04, 0xf6, 0x57, 0x16, 0xed, 0xcf, 0x3e, 0xd8, - 0x9f, 0x9b, 0xc7, 0x87, 0xa2, 0x8d, 0x0f, 0xd9, 0x17, 0xf4, 0x9b, 0x16, 0x5c, 0xe5, 0x21, 0xe8, - 0xfb, 0xef, 0x7f, 0x97, 0x77, 0x6e, 0x45, 0x92, 0x8a, 0x89, 0xac, 0x93, 0xa0, 0xb9, 0xf8, 0xa2, - 0x18, 0xd0, 0xab, 0x95, 0xc3, 0xb5, 0x85, 0x0f, 0xdb, 0x39, 0xfb, 0x9f, 0xe6, 0x61, 0x4c, 0x84, - 0xa6, 0x12, 0x77, 0xc0, 0x0b, 0xc6, 0x92, 0x78, 0x2c, 0xb1, 0x24, 0xa6, 0x0c, 0xe4, 0xa3, 0x39, - 0xfe, 0x43, 0x98, 0xa2, 0x87, 0xf3, 0x75, 0xe2, 0x04, 0xd1, 0x06, 0x71, 0xb8, 0xe1, 0x54, 0xfe, - 0xd0, 0xa7, 0xbf, 0x12, 0xac, 0xdd, 0x4c, 0x12, 0xc3, 0x9d, 0xf4, 0xbf, 0x99, 0xee, 0x1c, 0x0f, - 0x26, 0x3b, 0xa2, 0x8b, 0xbd, 0x09, 0x45, 0xe5, 0x2e, 0x22, 0x0e, 0x9d, 0xee, 0x41, 0xfa, 0x92, - 0x14, 0xb8, 0xf0, 0x2b, 0x76, 0x55, 0x8a, 0xc9, 0xd9, 0x7f, 0x3f, 0x67, 0x34, 0xc8, 0x27, 0x71, - 0x0d, 0x0a, 0x4e, 0x18, 0xba, 0x5b, 0x1e, 0xa9, 0x8b, 0x1d, 0xfb, 0xe1, 0xac, 0x1d, 0x6b, 0x34, - 0xc3, 0x5c, 0x76, 0x16, 0x44, 0x4d, 0xac, 0x68, 0xa0, 0xeb, 0xdc, 0x3c, 0x6d, 0x57, 0xbe, 0xd4, - 0xfa, 0xa3, 0x06, 0xd2, 0x80, 0x6d, 0x97, 0x60, 0x51, 0x1f, 0x7d, 0x86, 0xdb, 0x0f, 0xde, 0xf0, - 0xfc, 0x7b, 0xde, 0x35, 0xdf, 0x97, 0xe1, 0x1f, 0xfa, 0x23, 0x38, 0x25, 0xad, 0x06, 0x55, 0x75, - 0x6c, 0x52, 0xeb, 0x2f, 0x5c, 0xe7, 0xe7, 0x61, 0x9a, 0x92, 0x36, 0xbd, 0xb3, 0x43, 0x44, 0x60, - 0x42, 0xc4, 0x3d, 0x93, 0x65, 0x62, 0xec, 0x52, 0x1f, 0x61, 0x66, 0xed, 0x58, 0x02, 0x7c, 0xc3, - 0x24, 0x81, 0x93, 0x34, 0xed, 0x9f, 0xb2, 0x80, 0x79, 0xaa, 0x9e, 0x00, 0x3f, 0xf2, 0x09, 0x93, - 0x1f, 0x99, 0xc9, 0x1a, 0xe4, 0x0c, 0x56, 0xe4, 0x79, 0xbe, 0xb2, 0x2a, 0x81, 0x7f, 0x7f, 0x4f, - 0x18, 0x7d, 0xf4, 0x7e, 0x7f, 0xd8, 0xff, 0xdb, 0xe2, 0x87, 0x98, 0x72, 0xe6, 0x40, 0xdf, 0x0e, - 0x85, 0x9a, 0xd3, 0x72, 0x6a, 0x3c, 0x31, 0x4c, 0xa6, 0x2c, 0xce, 0xa8, 0x34, 0xbf, 0x24, 0x6a, - 0x70, 0xd9, 0x92, 0x8c, 0x9f, 0x57, 0x90, 0xc5, 0x3d, 0xe5, 0x49, 0xaa, 0xc9, 0xd9, 0x1d, 0x18, - 0x33, 0x88, 0x1d, 0xab, 0x20, 0xe2, 0xdb, 0xf9, 0x15, 0xab, 0xe2, 0x3d, 0x36, 0x61, 0xca, 0xd3, - 0xfe, 0xd3, 0x0b, 0x45, 0x3e, 0x2e, 0x3f, 0xdc, 0xeb, 0x12, 0x65, 0xb7, 0x8f, 0xe6, 0x04, 0x9b, - 0x20, 0x83, 0x3b, 0x29, 0xdb, 0x3f, 0x61, 0xc1, 0x23, 0x3a, 0xa2, 0xe6, 0x67, 0xd3, 0x4b, 0xba, - 0x5f, 0x82, 0x82, 0xdf, 0x22, 0x81, 0x13, 0xf9, 0x81, 0xb8, 0x35, 0xae, 0xc8, 0x41, 0xbf, 0x25, - 0xca, 0x0f, 0x44, 0x58, 0x75, 0x49, 0x5d, 0x96, 0x63, 0x55, 0x93, 0xbe, 0x3e, 0xd9, 0x60, 0x84, - 0xc2, 0xa3, 0x8a, 0x9d, 0x01, 0x4c, 0xd1, 0x1d, 0x62, 0x01, 0xb1, 0xbf, 0x66, 0xf1, 0x85, 0xa5, - 0x77, 0x1d, 0xbd, 0x03, 0x93, 0x4d, 0x27, 0xaa, 0x6d, 0x2f, 0xdf, 0x6f, 0x05, 0x5c, 0x57, 0x22, - 0xc7, 0xe9, 0xc9, 0x5e, 0xe3, 0xa4, 0x7d, 0x64, 0x6c, 0x12, 0xb9, 0x9a, 0x20, 0x86, 0x3b, 0xc8, - 0xa3, 0x0d, 0x18, 0x61, 0x65, 0xcc, 0x59, 0x30, 0xec, 0xc6, 0x1a, 0x64, 0xb5, 0xa6, 0x6c, 0x05, - 0x56, 0x63, 0x3a, 0x58, 0x27, 0x6a, 0xff, 0x4c, 0x9e, 0xef, 0x76, 0xc6, 0xca, 0x3f, 0x01, 0xc3, - 0x2d, 0xbf, 0xbe, 0x54, 0x2e, 0x61, 0x31, 0x0b, 0xea, 0x1a, 0xa9, 0xf0, 0x62, 0x2c, 0xe1, 0xe8, - 0x0a, 0x14, 0xc4, 0x4f, 0xa9, 0xdb, 0x62, 0x67, 0xb3, 0xc0, 0x0b, 0xb1, 0x82, 0xa2, 0x67, 0x01, - 0x5a, 0x81, 0xbf, 0xeb, 0xd6, 0x59, 0x10, 0x8b, 0xbc, 0x69, 0xe6, 0x53, 0x51, 0x10, 0xac, 0x61, - 0xa1, 0x57, 0x60, 0xac, 0xed, 0x85, 0x9c, 0x1d, 0xd1, 0x42, 0xd6, 0x2a, 0x03, 0x94, 0xdb, 0x3a, - 0x10, 0x9b, 0xb8, 0x68, 0x01, 0x86, 0x22, 0x87, 0x99, 0xad, 0x0c, 0x66, 0x9b, 0xcd, 0xae, 0x53, - 0x0c, 0x3d, 0x07, 0x09, 0xad, 0x80, 0x45, 0x45, 0xf4, 0xa6, 0xf4, 0xdb, 0xe5, 0x07, 0xbb, 0xb0, - 0x57, 0xef, 0xef, 0x12, 0xd0, 0xbc, 0x76, 0x85, 0x1d, 0xbc, 0x41, 0x0b, 0xbd, 0x0c, 0x40, 0xee, - 0x47, 0x24, 0xf0, 0x9c, 0x86, 0xb2, 0x0a, 0x53, 0x7c, 0x41, 0xc9, 0x5f, 0xf3, 0xa3, 0xdb, 0x21, - 0x59, 0x56, 0x18, 0x58, 0xc3, 0xb6, 0x7f, 0xb3, 0x08, 0x10, 0xf3, 0xed, 0xe8, 0xdd, 0x8e, 0x83, - 0xeb, 0xa9, 0xee, 0x9c, 0xfe, 0xd1, 0x9d, 0x5a, 0xe8, 0x7b, 0x2c, 0x18, 0x71, 0x1a, 0x0d, 0xbf, - 0xe6, 0xf0, 0xa0, 0xc2, 0xb9, 0xee, 0x07, 0xa7, 0x68, 0x7f, 0x21, 0xae, 0xc1, 0xbb, 0xf0, 0x9c, - 0x5c, 0xa1, 0x1a, 0xa4, 0x67, 0x2f, 0xf4, 0x86, 0xd1, 0xc7, 0xe4, 0x53, 0x31, 0x6f, 0x0c, 0xa5, - 0x7a, 0x2a, 0x16, 0xd9, 0x1d, 0xa1, 0xbf, 0x12, 0x6f, 0x1b, 0xaf, 0xc4, 0x81, 0x6c, 0xc7, 0x44, - 0x83, 0x7d, 0xed, 0xf5, 0x40, 0x44, 0x15, 0x3d, 0x48, 0xc1, 0x60, 0xb6, 0x17, 0xa0, 0xf6, 0x4e, - 0xea, 0x11, 0xa0, 0xe0, 0x6d, 0x98, 0xa8, 0x9b, 0x4c, 0x80, 0x58, 0x89, 0x8f, 0x67, 0xd1, 0x4d, - 0xf0, 0x0c, 0xf1, 0xb5, 0x9f, 0x00, 0xe0, 0x24, 0x61, 0x54, 0xe1, 0x31, 0x2b, 0xca, 0xde, 0xa6, - 0x2f, 0x7c, 0x26, 0xec, 0xcc, 0xb9, 0xdc, 0x0b, 0x23, 0xd2, 0xa4, 0x98, 0xf1, 0xed, 0xbe, 0x26, - 0xea, 0x62, 0x45, 0x05, 0xbd, 0x0e, 0x43, 0xcc, 0x0d, 0x2c, 0x9c, 0x29, 0x64, 0xcb, 0x8a, 0xcd, - 0x20, 0x6c, 0xf1, 0x86, 0x64, 0x7f, 0x43, 0x2c, 0x28, 0xa0, 0xeb, 0xd2, 0xc9, 0x32, 0x2c, 0x7b, - 0xb7, 0x43, 0xc2, 0x9c, 0x2c, 0x8b, 0x8b, 0x1f, 0x8e, 0xfd, 0x27, 0x79, 0x79, 0x6a, 0xa6, 0x32, - 0xa3, 0x26, 0xe5, 0xa2, 0xc4, 0x7f, 0x99, 0x00, 0x6d, 0x06, 0xb2, 0xbb, 0x67, 0x26, 0x49, 0x8b, - 0x87, 0xf3, 0x8e, 0x49, 0x02, 0x27, 0x69, 0x52, 0x8e, 0x94, 0xef, 0x7a, 0xe1, 0x75, 0xd1, 0xeb, - 0xec, 0xe0, 0x0f, 0x71, 0x76, 0x1b, 0xf1, 0x12, 0x2c, 0xea, 0x9f, 0x28, 0x7b, 0x30, 0xeb, 0xc1, - 0x64, 0x72, 0x8b, 0x1e, 0x2b, 0x3b, 0xf2, 0x07, 0x03, 0x30, 0x6e, 0x2e, 0x29, 0x74, 0x15, 0x8a, - 0x82, 0x88, 0x4a, 0x5a, 0xa0, 0x76, 0xc9, 0xaa, 0x04, 0xe0, 0x18, 0x87, 0xe5, 0xaa, 0x60, 0xd5, - 0x35, 0x33, 0xdb, 0x38, 0x57, 0x85, 0x82, 0x60, 0x0d, 0x8b, 0x3e, 0xac, 0x36, 0x7c, 0x3f, 0x52, - 0x17, 0x92, 0x5a, 0x77, 0x8b, 0xac, 0x14, 0x0b, 0x28, 0xbd, 0x88, 0x76, 0x48, 0xe0, 0x91, 0x86, - 0x19, 0xde, 0x58, 0x5d, 0x44, 0x37, 0x74, 0x20, 0x36, 0x71, 0xe9, 0x75, 0xea, 0x87, 0x6c, 0x21, - 0x8b, 0xe7, 0x5b, 0x6c, 0xb6, 0x5c, 0xe5, 0x7e, 0xde, 0x12, 0x8e, 0x3e, 0x0d, 0x8f, 0xa8, 0x10, - 0x4e, 0x98, 0xeb, 0x21, 0x64, 0x8b, 0x43, 0x86, 0xb4, 0xe5, 0x91, 0xa5, 0x74, 0x34, 0x9c, 0x55, - 0x1f, 0xbd, 0x06, 0xe3, 0x82, 0xc5, 0x97, 0x14, 0x87, 0x4d, 0xd3, 0x98, 0x1b, 0x06, 0x14, 0x27, - 0xb0, 0x65, 0x80, 0x66, 0xc6, 0x65, 0x4b, 0x0a, 0x85, 0xce, 0x00, 0xcd, 0x3a, 0x1c, 0x77, 0xd4, - 0x40, 0x0b, 0x30, 0xc1, 0x79, 0x30, 0xd7, 0xdb, 0xe2, 0x73, 0x22, 0x9c, 0xa2, 0xd4, 0x96, 0xba, - 0x65, 0x82, 0x71, 0x12, 0x1f, 0xbd, 0x04, 0xa3, 0x4e, 0x50, 0xdb, 0x76, 0x23, 0x52, 0x8b, 0xda, - 0x01, 0xf7, 0x96, 0xd2, 0x6c, 0x8b, 0x16, 0x34, 0x18, 0x36, 0x30, 0xed, 0x77, 0x61, 0x3a, 0x25, - 0x00, 0x04, 0x5d, 0x38, 0x4e, 0xcb, 0x95, 0xdf, 0x94, 0x30, 0x40, 0x5e, 0xa8, 0x94, 0xe5, 0xd7, - 0x68, 0x58, 0x74, 0x75, 0xb2, 0x40, 0x11, 0x5a, 0xbe, 0x43, 0xb5, 0x3a, 0x57, 0x24, 0x00, 0xc7, - 0x38, 0xf6, 0xff, 0xc8, 0xc1, 0x44, 0x8a, 0x6e, 0x85, 0xe5, 0xdc, 0x4b, 0x3c, 0x52, 0xe2, 0x14, - 0x7b, 0x66, 0xbc, 0xef, 0xdc, 0x21, 0xe2, 0x7d, 0xe7, 0x7b, 0xc5, 0xfb, 0x1e, 0x78, 0x2f, 0xf1, - 0xbe, 0xcd, 0x11, 0x1b, 0xec, 0x6b, 0xc4, 0x52, 0x62, 0x84, 0x0f, 0x1d, 0x32, 0x46, 0xb8, 0x31, - 0xe8, 0xc3, 0x7d, 0x0c, 0xfa, 0x0f, 0xe7, 0x60, 0x32, 0x69, 0x03, 0x79, 0x02, 0x72, 0xdb, 0xd7, - 0x0d, 0xb9, 0xed, 0x95, 0x7e, 0x5c, 0x5e, 0x33, 0x65, 0xb8, 0x38, 0x21, 0xc3, 0xfd, 0x68, 0x5f, - 0xd4, 0xba, 0xcb, 0x73, 0xff, 0x66, 0x0e, 0x4e, 0xa7, 0xfa, 0xdc, 0x9e, 0xc0, 0xd8, 0xdc, 0x32, - 0xc6, 0xe6, 0xe9, 0xbe, 0xdd, 0x81, 0x33, 0x07, 0xe8, 0x6e, 0x62, 0x80, 0xae, 0xf6, 0x4f, 0xb2, - 0xfb, 0x28, 0x7d, 0x35, 0x0f, 0x17, 0x52, 0xeb, 0xc5, 0x62, 0xcf, 0x15, 0x43, 0xec, 0xf9, 0x6c, - 0x42, 0xec, 0x69, 0x77, 0xaf, 0x7d, 0x34, 0x72, 0x50, 0xe1, 0xe8, 0xca, 0xa2, 0x19, 0x3c, 0xa4, - 0x0c, 0xd4, 0x70, 0x74, 0x55, 0x84, 0xb0, 0x49, 0xf7, 0x9b, 0x49, 0xf6, 0xf9, 0xaf, 0x2d, 0x38, - 0x9b, 0x3a, 0x37, 0x27, 0x20, 0xeb, 0x5a, 0x33, 0x65, 0x5d, 0x4f, 0xf4, 0xbd, 0x5a, 0x33, 0x84, - 0x5f, 0xbf, 0x3e, 0x90, 0xf1, 0x2d, 0xec, 0x25, 0x7f, 0x0b, 0x46, 0x9c, 0x5a, 0x8d, 0x84, 0xe1, - 0xaa, 0x5f, 0x57, 0x21, 0x8d, 0x9f, 0x66, 0xef, 0xac, 0xb8, 0xf8, 0x60, 0x7f, 0x6e, 0x36, 0x49, - 0x22, 0x06, 0x63, 0x9d, 0x02, 0xfa, 0x0c, 0x14, 0x42, 0x71, 0x6f, 0x8a, 0xb9, 0x7f, 0xae, 0xcf, - 0xc1, 0x71, 0x36, 0x48, 0xc3, 0x8c, 0xb9, 0xa4, 0x24, 0x15, 0x8a, 0xa4, 0x19, 0x9f, 0x25, 0x77, - 0xa4, 0xf1, 0x59, 0x9e, 0x05, 0xd8, 0x55, 0x8f, 0x81, 0xa4, 0xfc, 0x41, 0x7b, 0x26, 0x68, 0x58, - 0xe8, 0x93, 0x30, 0x19, 0xf2, 0xa0, 0x84, 0x4b, 0x0d, 0x27, 0x64, 0x6e, 0x2e, 0x62, 0x15, 0xb2, - 0xb8, 0x4e, 0xd5, 0x04, 0x0c, 0x77, 0x60, 0xa3, 0x15, 0xd9, 0x2a, 0x8b, 0xa0, 0xc8, 0x17, 0xe6, - 0xe5, 0xb8, 0x45, 0x91, 0xf1, 0xf7, 0x54, 0x72, 0xf8, 0xd9, 0xc0, 0x6b, 0x35, 0xd1, 0x67, 0x00, - 0xe8, 0xf2, 0x11, 0x72, 0x88, 0xe1, 0xec, 0xc3, 0x93, 0x9e, 0x2a, 0xf5, 0x54, 0xab, 0x5c, 0xe6, - 0x9b, 0x5a, 0x52, 0x44, 0xb0, 0x46, 0xd0, 0xfe, 0xe1, 0x01, 0x78, 0xb4, 0xcb, 0x19, 0x89, 0x16, - 0x4c, 0x3d, 0xec, 0x93, 0xc9, 0xc7, 0xf5, 0x6c, 0x6a, 0x65, 0xe3, 0xb5, 0x9d, 0x58, 0x8a, 0xb9, - 0xf7, 0xbc, 0x14, 0x7f, 0xc0, 0xd2, 0xc4, 0x1e, 0xdc, 0x56, 0xf3, 0x13, 0x87, 0x3c, 0xfb, 0x8f, - 0x50, 0x0e, 0xb2, 0x99, 0x22, 0x4c, 0x78, 0xb6, 0xef, 0xee, 0xf4, 0x2d, 0x5d, 0x38, 0x59, 0x29, - 0xf1, 0x6f, 0x5b, 0x70, 0xbe, 0x6b, 0x70, 0x8e, 0x6f, 0x40, 0x86, 0xc1, 0xfe, 0x82, 0x05, 0x8f, - 0xa5, 0xd6, 0x30, 0xcc, 0x8c, 0xae, 0x42, 0xb1, 0x46, 0x0b, 0x35, 0xff, 0xca, 0xd8, 0xf1, 0x5c, - 0x02, 0x70, 0x8c, 0x73, 0xc8, 0xc0, 0x23, 0xbf, 0x6a, 0x41, 0xc7, 0xa6, 0x3f, 0x81, 0xdb, 0xa7, - 0x6c, 0xde, 0x3e, 0x1f, 0xee, 0x67, 0x34, 0x33, 0x2e, 0x9e, 0x3f, 0x9e, 0x80, 0x33, 0x19, 0xfe, - 0x45, 0xbb, 0x30, 0xb5, 0x55, 0x23, 0xa6, 0xe7, 0x6a, 0xb7, 0xf8, 0x2f, 0x5d, 0xdd, 0x5c, 0x59, - 0x4e, 0xd2, 0xa9, 0x0e, 0x14, 0xdc, 0xd9, 0x04, 0xfa, 0x82, 0x05, 0xa7, 0x9c, 0x7b, 0xe1, 0x32, - 0xe5, 0x22, 0xdc, 0xda, 0x62, 0xc3, 0xaf, 0xed, 0xd0, 0x23, 0x5a, 0x6e, 0x84, 0xe7, 0x53, 0x25, - 0x3b, 0x77, 0xab, 0x1d, 0xf8, 0x46, 0xf3, 0x2c, 0x49, 0x6b, 0x1a, 0x16, 0x4e, 0x6d, 0x0b, 0x61, - 0x11, 0xb9, 0x9f, 0xbe, 0x51, 0xba, 0xf8, 0x56, 0xa7, 0x39, 0x82, 0xf1, 0x6b, 0x51, 0x42, 0xb0, - 0xa2, 0x83, 0x3e, 0x07, 0xc5, 0x2d, 0xe9, 0x9d, 0x99, 0x72, 0xed, 0xc6, 0x03, 0xd9, 0xdd, 0x67, - 0x95, 0xab, 0x67, 0x15, 0x12, 0x8e, 0x89, 0xa2, 0xd7, 0x20, 0xef, 0x6d, 0x86, 0xdd, 0xf2, 0x9c, - 0x26, 0xec, 0xf0, 0x78, 0x04, 0x83, 0xb5, 0x95, 0x2a, 0xa6, 0x15, 0xd1, 0x75, 0xc8, 0x07, 0x1b, - 0x75, 0x21, 0x96, 0x4c, 0xdd, 0xa4, 0x78, 0xb1, 0x94, 0xd1, 0x2b, 0x46, 0x09, 0x2f, 0x96, 0x30, - 0x25, 0x81, 0x2a, 0x30, 0xc8, 0x9c, 0x72, 0xc4, 0x25, 0x97, 0xca, 0xce, 0x77, 0x71, 0x6e, 0xe3, - 0x61, 0x0e, 0x18, 0x02, 0xe6, 0x84, 0xd0, 0x3a, 0x0c, 0xd5, 0x58, 0x4e, 0x4c, 0x11, 0xf1, 0xed, - 0x63, 0xa9, 0x02, 0xc8, 0x2e, 0xc9, 0x42, 0x85, 0x3c, 0x8e, 0x61, 0x60, 0x41, 0x8b, 0x51, 0x25, - 0xad, 0xed, 0xcd, 0x50, 0xe4, 0x70, 0x4e, 0xa7, 0xda, 0x25, 0x07, 0xae, 0xa0, 0xca, 0x30, 0xb0, - 0xa0, 0x85, 0x5e, 0x86, 0xdc, 0x66, 0x4d, 0x38, 0xdc, 0xa4, 0x4a, 0x22, 0xcd, 0x20, 0x14, 0x8b, - 0x43, 0x0f, 0xf6, 0xe7, 0x72, 0x2b, 0x4b, 0x38, 0xb7, 0x59, 0x43, 0x6b, 0x30, 0xbc, 0xc9, 0xdd, - 0xd6, 0x85, 0xb0, 0xf1, 0xf1, 0x74, 0x8f, 0xfa, 0x0e, 0xcf, 0x76, 0xee, 0x6b, 0x22, 0x00, 0x58, - 0x12, 0x61, 0x81, 0xf0, 0x95, 0xfb, 0xbd, 0x08, 0x8e, 0x36, 0x7f, 0xb8, 0x90, 0x09, 0x9c, 0xe9, - 0x88, 0x9d, 0xf8, 0xb1, 0x46, 0x91, 0xae, 0x6a, 0x47, 0x26, 0xd2, 0x17, 0x61, 0x62, 0x52, 0x57, - 0xb5, 0xca, 0xb6, 0xdf, 0x6d, 0x55, 0x2b, 0x24, 0x1c, 0x13, 0x45, 0x3b, 0x30, 0xb6, 0x1b, 0xb6, - 0xb6, 0x89, 0xdc, 0xd2, 0x2c, 0x6a, 0x4c, 0xc6, 0xbd, 0x7c, 0x47, 0x20, 0xba, 0x41, 0xd4, 0x76, - 0x1a, 0x1d, 0xa7, 0x10, 0xd3, 0xe9, 0xdf, 0xd1, 0x89, 0x61, 0x93, 0x36, 0x1d, 0xfe, 0x77, 0xda, - 0xfe, 0xc6, 0x5e, 0x44, 0x44, 0x4c, 0xb3, 0xd4, 0xe1, 0x7f, 0x83, 0xa3, 0x74, 0x0e, 0xbf, 0x00, - 0x60, 0x49, 0x04, 0xdd, 0x11, 0xc3, 0xc3, 0x4e, 0xcf, 0xc9, 0xec, 0xc0, 0xa3, 0x0b, 0x12, 0x29, - 0x63, 0x50, 0xd8, 0x69, 0x19, 0x93, 0x62, 0xa7, 0x64, 0x6b, 0xdb, 0x8f, 0x7c, 0x2f, 0x71, 0x42, - 0x4f, 0x65, 0x9f, 0x92, 0x95, 0x14, 0xfc, 0xce, 0x53, 0x32, 0x0d, 0x0b, 0xa7, 0xb6, 0x85, 0xea, - 0x30, 0xde, 0xf2, 0x83, 0xe8, 0x9e, 0x1f, 0xc8, 0xf5, 0x85, 0xba, 0x08, 0x4b, 0x0c, 0x4c, 0xd1, - 0x22, 0x0b, 0x17, 0x68, 0x42, 0x70, 0x82, 0x26, 0xfa, 0x14, 0x0c, 0x87, 0x35, 0xa7, 0x41, 0xca, - 0xb7, 0x66, 0xa6, 0xb3, 0xaf, 0x9f, 0x2a, 0x47, 0xc9, 0x58, 0x5d, 0x3c, 0x6a, 0x3e, 0x47, 0xc1, - 0x92, 0x1c, 0x5a, 0x81, 0x41, 0x96, 0xe8, 0x8c, 0x05, 0xe0, 0xcb, 0x88, 0x9f, 0xda, 0x61, 0x15, - 0xcd, 0xcf, 0x26, 0x56, 0x8c, 0x79, 0x75, 0xba, 0x07, 0xc4, 0x9b, 0xc1, 0x0f, 0x67, 0x4e, 0x67, - 0xef, 0x01, 0xf1, 0xd4, 0xb8, 0x55, 0xed, 0xb6, 0x07, 0x14, 0x12, 0x8e, 0x89, 0xd2, 0x93, 0x99, - 0x9e, 0xa6, 0x67, 0xba, 0x98, 0xf3, 0x64, 0x9e, 0xa5, 0xec, 0x64, 0xa6, 0x27, 0x29, 0x25, 0x61, - 0xff, 0xde, 0x70, 0x27, 0xcf, 0xc2, 0x5e, 0x99, 0xdf, 0x65, 0x75, 0x28, 0x20, 0x3f, 0xde, 0xaf, - 0xd0, 0xeb, 0x08, 0x59, 0xf0, 0x2f, 0x58, 0x70, 0xa6, 0x95, 0xfa, 0x21, 0x82, 0x01, 0xe8, 0x4f, - 0x76, 0xc6, 0x3f, 0x5d, 0x05, 0x6b, 0x4c, 0x87, 0xe3, 0x8c, 0x96, 0x92, 0xcf, 0x9c, 0xfc, 0x7b, - 0x7e, 0xe6, 0xac, 0x42, 0x81, 0x31, 0x99, 0x3d, 0x72, 0x44, 0x27, 0x5f, 0x7b, 0x8c, 0x95, 0x58, - 0x12, 0x15, 0xb1, 0x22, 0x81, 0x7e, 0xd0, 0x82, 0xf3, 0xc9, 0xae, 0x63, 0xc2, 0xc0, 0x22, 0xc2, - 0x23, 0x7f, 0xe0, 0xae, 0x88, 0xef, 0xef, 0xe0, 0xff, 0x0d, 0xe4, 0x83, 0x5e, 0x08, 0xb8, 0x7b, - 0x63, 0xa8, 0x94, 0xf2, 0xc2, 0x1e, 0x32, 0xb5, 0x0a, 0x7d, 0xbc, 0xb2, 0x9f, 0x87, 0xd1, 0xa6, - 0xdf, 0xf6, 0x22, 0x61, 0xfd, 0x23, 0x2c, 0x11, 0x98, 0x06, 0x7e, 0x55, 0x2b, 0xc7, 0x06, 0x56, - 0xe2, 0x6d, 0x5e, 0x78, 0xe8, 0xb7, 0xf9, 0x5b, 0x30, 0xea, 0x69, 0xe6, 0xaa, 0x82, 0x1f, 0xb8, - 0x9c, 0x1d, 0x9d, 0x55, 0x37, 0x6e, 0xe5, 0xbd, 0xd4, 0x4b, 0xb0, 0x41, 0xed, 0x64, 0x1f, 0x7c, - 0x5f, 0xb6, 0x52, 0x98, 0x7a, 0x2e, 0x02, 0x78, 0xd5, 0x14, 0x01, 0x5c, 0x4e, 0x8a, 0x00, 0x3a, - 0x24, 0xca, 0xc6, 0xeb, 0xbf, 0xff, 0xe4, 0x33, 0xfd, 0x86, 0x30, 0xb4, 0x1b, 0x70, 0xb1, 0xd7, - 0xb5, 0xc4, 0xcc, 0xc0, 0xea, 0x4a, 0x7f, 0x18, 0x9b, 0x81, 0xd5, 0xcb, 0x25, 0xcc, 0x20, 0xfd, - 0x06, 0xc7, 0xb1, 0xff, 0x9b, 0x05, 0xf9, 0x8a, 0x5f, 0x3f, 0x81, 0x07, 0xef, 0x27, 0x8c, 0x07, - 0xef, 0xa3, 0xe9, 0x17, 0x62, 0x3d, 0x53, 0x1e, 0xbe, 0x9c, 0x90, 0x87, 0x9f, 0xcf, 0x22, 0xd0, - 0x5d, 0xfa, 0xfd, 0x93, 0x79, 0x18, 0xa9, 0xf8, 0x75, 0x65, 0x83, 0xfd, 0xeb, 0x0f, 0x63, 0x83, - 0x9d, 0x99, 0x42, 0x41, 0xa3, 0xcc, 0xac, 0xc7, 0xa4, 0xe3, 0xe8, 0x37, 0x98, 0x29, 0xf6, 0x5d, - 0xe2, 0x6e, 0x6d, 0x47, 0xa4, 0x9e, 0xfc, 0x9c, 0x93, 0x33, 0xc5, 0xfe, 0xaf, 0x16, 0x4c, 0x24, - 0x5a, 0x47, 0x0d, 0x18, 0x6b, 0xe8, 0xd2, 0x56, 0xb1, 0x4e, 0x1f, 0x4a, 0x50, 0x2b, 0x4c, 0x59, - 0xb5, 0x22, 0x6c, 0x12, 0x47, 0xf3, 0x00, 0x4a, 0xfd, 0x28, 0xc5, 0x7a, 0x8c, 0xeb, 0x57, 0xfa, - 0xc9, 0x10, 0x6b, 0x18, 0xe8, 0x05, 0x18, 0x89, 0xfc, 0x96, 0xdf, 0xf0, 0xb7, 0xf6, 0x6e, 0x10, - 0x19, 0x8e, 0x49, 0x19, 0xa8, 0xad, 0xc7, 0x20, 0xac, 0xe3, 0xd9, 0x3f, 0x9d, 0xe7, 0x1f, 0xea, - 0x45, 0xee, 0x07, 0x6b, 0xf2, 0xfd, 0xbd, 0x26, 0xbf, 0x6a, 0xc1, 0x24, 0x6d, 0x9d, 0xd9, 0xc0, - 0xc8, 0xcb, 0x56, 0x45, 0x65, 0xb6, 0xba, 0x44, 0x65, 0xbe, 0x4c, 0xcf, 0xae, 0xba, 0xdf, 0x8e, - 0x84, 0x04, 0x4d, 0x3b, 0x9c, 0x68, 0x29, 0x16, 0x50, 0x81, 0x47, 0x82, 0x40, 0xf8, 0xed, 0xe9, - 0x78, 0x24, 0x08, 0xb0, 0x80, 0xca, 0xa0, 0xcd, 0x03, 0xe9, 0x41, 0x9b, 0x79, 0x70, 0x49, 0x61, - 0x2d, 0x21, 0xd8, 0x1e, 0x2d, 0xb8, 0xa4, 0x34, 0xa3, 0x88, 0x71, 0xec, 0x9f, 0xcb, 0xc3, 0x68, - 0xc5, 0xaf, 0xc7, 0x0a, 0xc0, 0xe7, 0x0d, 0x05, 0xe0, 0xc5, 0x84, 0x02, 0x70, 0x52, 0xc7, 0xfd, - 0x40, 0xdd, 0xf7, 0xf5, 0x52, 0xf7, 0xfd, 0x13, 0x8b, 0xcd, 0x5a, 0x69, 0xad, 0xca, 0x4d, 0xaa, - 0xd0, 0x33, 0x30, 0xc2, 0x0e, 0x24, 0xe6, 0x28, 0x2a, 0xb5, 0x62, 0x2c, 0x19, 0xd1, 0x5a, 0x5c, - 0x8c, 0x75, 0x1c, 0x74, 0x05, 0x0a, 0x21, 0x71, 0x82, 0xda, 0xb6, 0x3a, 0xe3, 0x84, 0x0a, 0x8b, - 0x97, 0x61, 0x05, 0x45, 0x6f, 0xc4, 0x71, 0x0d, 0xf3, 0xd9, 0x8e, 0x67, 0x7a, 0x7f, 0xf8, 0x16, - 0xc9, 0x0e, 0x66, 0x68, 0xdf, 0x05, 0xd4, 0x89, 0xdf, 0x47, 0x40, 0xaf, 0x39, 0x33, 0xa0, 0x57, - 0xb1, 0x23, 0x98, 0xd7, 0x9f, 0x5b, 0x30, 0x5e, 0xf1, 0xeb, 0x74, 0xeb, 0x7e, 0x33, 0xed, 0x53, - 0x3d, 0xa8, 0xeb, 0x50, 0x97, 0xa0, 0xae, 0x97, 0x60, 0xb0, 0xe2, 0xd7, 0xcb, 0x95, 0x6e, 0x0e, - 0xdb, 0xf6, 0xdf, 0xb2, 0x60, 0xb8, 0xe2, 0xd7, 0x4f, 0x40, 0x38, 0xff, 0xaa, 0x29, 0x9c, 0x7f, - 0x24, 0x63, 0xdd, 0x64, 0xc8, 0xe3, 0xff, 0xc6, 0x00, 0x8c, 0xd1, 0x7e, 0xfa, 0x5b, 0x72, 0x2a, - 0x8d, 0x61, 0xb3, 0xfa, 0x18, 0x36, 0xca, 0x0b, 0xfb, 0x8d, 0x86, 0x7f, 0x2f, 0x39, 0xad, 0x2b, - 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x0a, 0x0a, 0xad, 0x80, 0xec, 0xba, 0xbe, 0x60, 0x32, 0x35, 0x55, - 0x47, 0x45, 0x94, 0x63, 0x85, 0x41, 0x1f, 0x67, 0xa1, 0xeb, 0xd5, 0x48, 0x95, 0xd4, 0x7c, 0xaf, - 0xce, 0xe5, 0xd7, 0x79, 0x91, 0x98, 0x41, 0x2b, 0xc7, 0x06, 0x16, 0xba, 0x0b, 0x45, 0xf6, 0x9f, - 0x1d, 0x3b, 0x87, 0x4f, 0xf1, 0x29, 0x52, 0xbe, 0x09, 0x02, 0x38, 0xa6, 0x85, 0x9e, 0x05, 0x88, - 0x64, 0xf4, 0xee, 0x50, 0x04, 0x6f, 0x52, 0x0c, 0xb9, 0x8a, 0xeb, 0x1d, 0x62, 0x0d, 0x0b, 0x3d, - 0x09, 0xc5, 0xc8, 0x71, 0x1b, 0x37, 0x5d, 0x8f, 0x84, 0x4c, 0x2e, 0x9d, 0x97, 0x99, 0xd7, 0x44, - 0x21, 0x8e, 0xe1, 0x94, 0x21, 0x62, 0x91, 0x0d, 0x78, 0x82, 0xe0, 0x02, 0xc3, 0x66, 0x0c, 0xd1, - 0x4d, 0x55, 0x8a, 0x35, 0x0c, 0xb4, 0x0d, 0xe7, 0x5c, 0x8f, 0x25, 0x31, 0x20, 0xd5, 0x1d, 0xb7, - 0xb5, 0x7e, 0xb3, 0x7a, 0x87, 0x04, 0xee, 0xe6, 0xde, 0xa2, 0x53, 0xdb, 0x21, 0x9e, 0x4c, 0xde, - 0xf8, 0x61, 0xd1, 0xc5, 0x73, 0xe5, 0x2e, 0xb8, 0xb8, 0x2b, 0x25, 0xfb, 0x25, 0x38, 0x5d, 0xf1, - 0xeb, 0x15, 0x3f, 0x88, 0x56, 0xfc, 0xe0, 0x9e, 0x13, 0xd4, 0xe5, 0x4a, 0x99, 0x93, 0x79, 0x5e, - 0xe8, 0x51, 0x38, 0xc8, 0x0f, 0x0a, 0x23, 0xdb, 0xd8, 0x73, 0x8c, 0xf9, 0x3a, 0xa4, 0x87, 0x4d, - 0x8d, 0xb1, 0x01, 0x2a, 0xa3, 0xc7, 0x35, 0x27, 0x22, 0xe8, 0x16, 0xcb, 0x54, 0x1c, 0xdf, 0x88, - 0xa2, 0xfa, 0x13, 0x5a, 0xa6, 0xe2, 0x18, 0x98, 0x7a, 0x85, 0x9a, 0xf5, 0xed, 0xff, 0x3e, 0xc8, - 0x0e, 0xc7, 0x44, 0x56, 0x08, 0xf4, 0x59, 0x18, 0x0f, 0xc9, 0x4d, 0xd7, 0x6b, 0xdf, 0x97, 0x32, - 0x81, 0x2e, 0x3e, 0x52, 0xd5, 0x65, 0x1d, 0x93, 0x4b, 0x16, 0xcd, 0x32, 0x9c, 0xa0, 0x86, 0x9a, - 0x30, 0x7e, 0xcf, 0xf5, 0xea, 0xfe, 0xbd, 0x50, 0xd2, 0x2f, 0x64, 0x0b, 0x18, 0xef, 0x72, 0xcc, - 0x44, 0x1f, 0x8d, 0xe6, 0xee, 0x1a, 0xc4, 0x70, 0x82, 0x38, 0x5d, 0x80, 0x41, 0xdb, 0x5b, 0x08, - 0x6f, 0x87, 0x24, 0x10, 0x39, 0xa7, 0xd9, 0x02, 0xc4, 0xb2, 0x10, 0xc7, 0x70, 0xba, 0x00, 0xd9, - 0x9f, 0x6b, 0x81, 0xdf, 0xe6, 0x31, 0xf6, 0xc5, 0x02, 0xc4, 0xaa, 0x14, 0x6b, 0x18, 0x74, 0x83, - 0xb2, 0x7f, 0x6b, 0xbe, 0x87, 0x7d, 0x3f, 0x92, 0x5b, 0x9a, 0x65, 0x39, 0xd5, 0xca, 0xb1, 0x81, - 0x85, 0x56, 0x00, 0x85, 0xed, 0x56, 0xab, 0xc1, 0x8c, 0x2f, 0x9c, 0x06, 0x23, 0xc5, 0x15, 0xdf, - 0x79, 0x1e, 0x7a, 0xb4, 0xda, 0x01, 0xc5, 0x29, 0x35, 0xe8, 0x59, 0xbd, 0x29, 0xba, 0x3a, 0xc8, - 0xba, 0xca, 0x95, 0x11, 0x55, 0xde, 0x4f, 0x09, 0x43, 0xcb, 0x30, 0x1c, 0xee, 0x85, 0xb5, 0x48, - 0xc4, 0x50, 0xcb, 0x48, 0xfc, 0x53, 0x65, 0x28, 0x5a, 0xde, 0x39, 0x5e, 0x05, 0xcb, 0xba, 0xa8, - 0x06, 0xd3, 0x82, 0xe2, 0xd2, 0xb6, 0xe3, 0xa9, 0x34, 0x2a, 0xdc, 0x06, 0xf5, 0x99, 0x07, 0xfb, - 0x73, 0xd3, 0xa2, 0x65, 0x1d, 0x7c, 0xb0, 0x3f, 0x77, 0xa6, 0xe2, 0xd7, 0x53, 0x20, 0x38, 0x8d, - 0x1a, 0x5f, 0x7c, 0xb5, 0x9a, 0xdf, 0x6c, 0x55, 0x02, 0x7f, 0xd3, 0x6d, 0x90, 0x6e, 0x0a, 0x9d, - 0xaa, 0x81, 0x29, 0x16, 0x9f, 0x51, 0x86, 0x13, 0xd4, 0xec, 0x6f, 0x67, 0xfc, 0x0c, 0x4b, 0xb3, - 0x1c, 0xb5, 0x03, 0x82, 0x9a, 0x30, 0xd6, 0x62, 0xdb, 0x44, 0x44, 0xbe, 0x17, 0x6b, 0xfd, 0xf9, - 0x3e, 0x05, 0x13, 0xf7, 0xe8, 0x35, 0xa0, 0x04, 0x87, 0xec, 0xc5, 0x57, 0xd1, 0xc9, 0x61, 0x93, - 0xba, 0xfd, 0x63, 0x8f, 0xb0, 0x1b, 0xb1, 0xca, 0xa5, 0x0d, 0xc3, 0xc2, 0xe4, 0x5d, 0x3c, 0xad, - 0x66, 0xb3, 0xc5, 0x5e, 0xf1, 0xb4, 0x08, 0xb3, 0x79, 0x2c, 0xeb, 0xa2, 0xcf, 0xc0, 0x38, 0x7d, - 0xa9, 0x68, 0xf9, 0x4b, 0x4e, 0x65, 0x87, 0x26, 0x88, 0xd3, 0x96, 0x68, 0x59, 0x31, 0xf4, 0xca, - 0x38, 0x41, 0x0c, 0xbd, 0xc1, 0x8c, 0x33, 0xcc, 0xd4, 0x28, 0x3d, 0x48, 0xeb, 0x76, 0x18, 0x92, - 0xac, 0x46, 0x24, 0x2b, 0xed, 0x8a, 0x7d, 0xbc, 0x69, 0x57, 0xd0, 0x4d, 0x18, 0x13, 0xb9, 0x86, - 0xc5, 0xca, 0xcd, 0x1b, 0xd2, 0xb8, 0x31, 0xac, 0x03, 0x0f, 0x92, 0x05, 0xd8, 0xac, 0x8c, 0xb6, - 0xe0, 0xbc, 0x96, 0xfb, 0xe7, 0x5a, 0xe0, 0x30, 0x95, 0xba, 0xcb, 0x8e, 0x53, 0xed, 0xae, 0x7e, - 0xec, 0xc1, 0xfe, 0xdc, 0xf9, 0xf5, 0x6e, 0x88, 0xb8, 0x3b, 0x1d, 0x74, 0x0b, 0x4e, 0x73, 0xc7, - 0xda, 0x12, 0x71, 0xea, 0x0d, 0xd7, 0x53, 0xcc, 0x00, 0xdf, 0xf2, 0x67, 0x1f, 0xec, 0xcf, 0x9d, - 0x5e, 0x48, 0x43, 0xc0, 0xe9, 0xf5, 0xd0, 0xab, 0x50, 0xac, 0x7b, 0xa1, 0x18, 0x83, 0x21, 0x23, - 0xbd, 0x52, 0xb1, 0xb4, 0x56, 0x55, 0xdf, 0x1f, 0xff, 0xc1, 0x71, 0x05, 0xb4, 0xc5, 0x25, 0xb6, - 0x4a, 0x40, 0x32, 0xdc, 0x11, 0x12, 0x28, 0x29, 0x6a, 0x33, 0x5c, 0xeb, 0xb8, 0xaa, 0x42, 0x59, - 0x9c, 0x1b, 0x5e, 0x77, 0x06, 0x61, 0xf4, 0x3a, 0x20, 0xfa, 0x82, 0x70, 0x6b, 0x64, 0xa1, 0xc6, - 0xd2, 0x2a, 0x30, 0x01, 0x77, 0xc1, 0x74, 0xf6, 0xaa, 0x76, 0x60, 0xe0, 0x94, 0x5a, 0xe8, 0x3a, - 0x3d, 0x55, 0xf4, 0x52, 0x71, 0x6a, 0xa9, 0x64, 0x78, 0x25, 0xd2, 0x0a, 0x48, 0xcd, 0x89, 0x48, - 0xdd, 0xa4, 0x88, 0x13, 0xf5, 0x50, 0x1d, 0xce, 0x39, 0xed, 0xc8, 0x67, 0xc2, 0x70, 0x13, 0x75, - 0xdd, 0xdf, 0x21, 0x1e, 0xd3, 0x43, 0x15, 0x16, 0x2f, 0x52, 0x6e, 0x63, 0xa1, 0x0b, 0x1e, 0xee, - 0x4a, 0x85, 0x72, 0x89, 0x2a, 0xfb, 0x2d, 0x98, 0x91, 0x8e, 0x52, 0x32, 0xe0, 0xbe, 0x00, 0x23, - 0xdb, 0x7e, 0x18, 0xad, 0x91, 0xe8, 0x9e, 0x1f, 0xec, 0x88, 0x78, 0x95, 0x71, 0x8c, 0xe3, 0x18, - 0x84, 0x75, 0x3c, 0xfa, 0x0c, 0x64, 0x56, 0x12, 0xe5, 0x12, 0x53, 0x50, 0x17, 0xe2, 0x33, 0xe6, - 0x3a, 0x2f, 0xc6, 0x12, 0x2e, 0x51, 0xcb, 0x95, 0x25, 0xa6, 0x6c, 0x4e, 0xa0, 0x96, 0x2b, 0x4b, - 0x58, 0xc2, 0xe9, 0x72, 0x0d, 0xb7, 0x9d, 0x80, 0x54, 0x02, 0xbf, 0x46, 0x42, 0x2d, 0xb2, 0xf6, - 0xa3, 0x3c, 0x1a, 0x27, 0x5d, 0xae, 0xd5, 0x34, 0x04, 0x9c, 0x5e, 0x0f, 0x91, 0xce, 0xbc, 0x57, - 0xe3, 0xd9, 0x5a, 0x82, 0x4e, 0x7e, 0xa6, 0xcf, 0xd4, 0x57, 0x1e, 0x4c, 0xaa, 0x8c, 0x5b, 0x3c, - 0xfe, 0x66, 0x38, 0x33, 0xc1, 0xd6, 0x76, 0xff, 0xc1, 0x3b, 0x95, 0xde, 0xa5, 0x9c, 0xa0, 0x84, - 0x3b, 0x68, 0x1b, 0xc1, 0xac, 0x26, 0x7b, 0xa6, 0x43, 0xbe, 0x0a, 0xc5, 0xb0, 0xbd, 0x51, 0xf7, - 0x9b, 0x8e, 0xeb, 0x31, 0x65, 0xb3, 0xf6, 0x1e, 0xa9, 0x4a, 0x00, 0x8e, 0x71, 0xd0, 0x0a, 0x14, - 0x1c, 0xa9, 0x54, 0x41, 0xd9, 0x31, 0x50, 0x94, 0x2a, 0x85, 0x87, 0x05, 0x90, 0x6a, 0x14, 0x55, - 0x17, 0xbd, 0x02, 0x63, 0xc2, 0x31, 0x54, 0x24, 0x7b, 0x9c, 0x36, 0xbd, 0x77, 0xaa, 0x3a, 0x10, - 0x9b, 0xb8, 0xe8, 0x36, 0x8c, 0x44, 0x7e, 0x83, 0xb9, 0xa0, 0x50, 0x36, 0xef, 0x4c, 0x76, 0x1c, - 0xb5, 0x75, 0x85, 0xa6, 0xcb, 0x33, 0x55, 0x55, 0xac, 0xd3, 0x41, 0xeb, 0x7c, 0xbd, 0xb3, 0x08, - 0xd3, 0x24, 0x9c, 0x79, 0x24, 0xfb, 0x4e, 0x52, 0x81, 0xa8, 0xcd, 0xed, 0x20, 0x6a, 0x62, 0x9d, - 0x0c, 0xba, 0x06, 0x53, 0xad, 0xc0, 0xf5, 0xd9, 0x9a, 0x50, 0xfa, 0xb4, 0x19, 0x33, 0xbd, 0x4d, - 0x25, 0x89, 0x80, 0x3b, 0xeb, 0x30, 0xbf, 0x5e, 0x51, 0x38, 0x73, 0x96, 0xe7, 0x83, 0xe6, 0xcf, - 0x3b, 0x5e, 0x86, 0x15, 0x14, 0xad, 0xb2, 0x93, 0x98, 0x4b, 0x26, 0x66, 0x66, 0xb3, 0xc3, 0xae, - 0xe8, 0x12, 0x0c, 0xce, 0xbc, 0xaa, 0xbf, 0x38, 0xa6, 0x80, 0xea, 0x5a, 0xe2, 0x40, 0xfa, 0x62, - 0x08, 0x67, 0xce, 0x75, 0x31, 0x55, 0x4b, 0x3c, 0x2f, 0x62, 0x86, 0xc0, 0x28, 0x0e, 0x71, 0x82, - 0x26, 0xfa, 0x24, 0x4c, 0x8a, 0x30, 0x6f, 0xf1, 0x30, 0x9d, 0x8f, 0x0d, 0x7b, 0x71, 0x02, 0x86, - 0x3b, 0xb0, 0x79, 0xe4, 0x7d, 0x67, 0xa3, 0x41, 0xc4, 0xd1, 0x77, 0xd3, 0xf5, 0x76, 0xc2, 0x99, - 0x0b, 0xec, 0x7c, 0x10, 0x91, 0xf7, 0x93, 0x50, 0x9c, 0x52, 0x03, 0xad, 0xc3, 0x64, 0x2b, 0x20, - 0xa4, 0xc9, 0x18, 0x7d, 0x71, 0x9f, 0xcd, 0x71, 0xb7, 0x76, 0xda, 0x93, 0x4a, 0x02, 0x76, 0x90, - 0x52, 0x86, 0x3b, 0x28, 0xa0, 0x7b, 0x50, 0xf0, 0x77, 0x49, 0xb0, 0x4d, 0x9c, 0xfa, 0xcc, 0xc5, - 0x2e, 0x86, 0xe6, 0xe2, 0x72, 0xbb, 0x25, 0x70, 0x13, 0x3a, 0x78, 0x59, 0xdc, 0x5b, 0x07, 0x2f, - 0x1b, 0x43, 0x3f, 0x64, 0xc1, 0x59, 0x29, 0xb6, 0xaf, 0xb6, 0xe8, 0xa8, 0x2f, 0xf9, 0x5e, 0x18, - 0x05, 0xdc, 0x11, 0xfb, 0xb1, 0x6c, 0xe7, 0xe4, 0xf5, 0x8c, 0x4a, 0x4a, 0x38, 0x7a, 0x36, 0x0b, - 0x23, 0xc4, 0xd9, 0x2d, 0xa2, 0x25, 0x98, 0x0a, 0x49, 0x24, 0x0f, 0xa3, 0x85, 0x70, 0xe5, 0x8d, - 0xd2, 0xda, 0xcc, 0x25, 0xee, 0x45, 0x4e, 0x37, 0x43, 0x35, 0x09, 0xc4, 0x9d, 0xf8, 0xb3, 0xdf, - 0x0a, 0x53, 0x1d, 0xd7, 0xff, 0x61, 0x32, 0x8a, 0xcc, 0xee, 0xc0, 0x98, 0x31, 0xc4, 0xc7, 0xaa, - 0xc3, 0xfd, 0x97, 0xc3, 0x50, 0x54, 0xfa, 0x3d, 0x74, 0xd5, 0x54, 0xdb, 0x9e, 0x4d, 0xaa, 0x6d, - 0x0b, 0xf4, 0x5d, 0xaf, 0x6b, 0x6a, 0xd7, 0x53, 0x62, 0x67, 0x65, 0x6d, 0xe8, 0xfe, 0x9d, 0xa2, - 0x35, 0x71, 0x6d, 0xbe, 0x6f, 0xfd, 0xef, 0x40, 0x57, 0x09, 0xf0, 0x35, 0x98, 0xf2, 0x7c, 0xc6, - 0x73, 0x92, 0xba, 0x64, 0x28, 0x18, 0xdf, 0x50, 0xd4, 0x83, 0x51, 0x24, 0x10, 0x70, 0x67, 0x1d, - 0xda, 0x20, 0xbf, 0xf8, 0x93, 0x22, 0x67, 0xce, 0x17, 0x60, 0x01, 0x45, 0x97, 0x60, 0xb0, 0xe5, - 0xd7, 0xcb, 0x15, 0xc1, 0x6f, 0x6a, 0xa9, 0x6e, 0xeb, 0xe5, 0x0a, 0xe6, 0x30, 0xb4, 0x00, 0x43, - 0xec, 0x47, 0x38, 0x33, 0x9a, 0x1d, 0x75, 0x80, 0xd5, 0xd0, 0xf2, 0xb5, 0xb0, 0x0a, 0x58, 0x54, - 0x64, 0xa2, 0x2f, 0xca, 0xa4, 0x33, 0xd1, 0xd7, 0xf0, 0x43, 0x8a, 0xbe, 0x24, 0x01, 0x1c, 0xd3, - 0x42, 0xf7, 0xe1, 0xb4, 0xf1, 0x30, 0xe2, 0x4b, 0x84, 0x84, 0xc2, 0xf3, 0xf9, 0x52, 0xd7, 0x17, - 0x91, 0xd0, 0x17, 0x9f, 0x17, 0x9d, 0x3e, 0x5d, 0x4e, 0xa3, 0x84, 0xd3, 0x1b, 0x40, 0x0d, 0x98, - 0xaa, 0x75, 0xb4, 0x5a, 0xe8, 0xbf, 0x55, 0x35, 0xa1, 0x9d, 0x2d, 0x76, 0x12, 0x46, 0xaf, 0x40, - 0xe1, 0x1d, 0x3f, 0x64, 0x67, 0xb5, 0xe0, 0x91, 0xa5, 0xdb, 0x6c, 0xe1, 0x8d, 0x5b, 0x55, 0x56, - 0x7e, 0xb0, 0x3f, 0x37, 0x52, 0xf1, 0xeb, 0xf2, 0x2f, 0x56, 0x15, 0xd0, 0xf7, 0x5a, 0x30, 0xdb, - 0xf9, 0xf2, 0x52, 0x9d, 0x1e, 0xeb, 0xbf, 0xd3, 0xb6, 0x68, 0x74, 0x76, 0x39, 0x93, 0x1c, 0xee, - 0xd2, 0x94, 0xfd, 0xcb, 0x5c, 0xb7, 0x2b, 0x34, 0x40, 0x24, 0x6c, 0x37, 0x4e, 0x22, 0x4d, 0xe5, - 0xb2, 0xa1, 0x9c, 0x7a, 0x68, 0xfb, 0x81, 0x7f, 0x66, 0x31, 0xfb, 0x81, 0x13, 0x74, 0x14, 0x78, - 0x03, 0x0a, 0x91, 0x4c, 0x36, 0xda, 0x25, 0xb3, 0xa6, 0xd6, 0x29, 0x66, 0x43, 0xa1, 0x38, 0x56, - 0x95, 0x57, 0x54, 0x91, 0xb1, 0xff, 0x21, 0x9f, 0x01, 0x09, 0x39, 0x01, 0x1d, 0x40, 0xc9, 0xd4, - 0x01, 0xcc, 0xf5, 0xf8, 0x82, 0x0c, 0x5d, 0xc0, 0x3f, 0x30, 0xfb, 0xcd, 0x24, 0x35, 0xef, 0x77, - 0xc3, 0x15, 0xfb, 0x47, 0x2c, 0x38, 0x95, 0x66, 0xe9, 0x49, 0x5f, 0x19, 0x5c, 0x4e, 0xa4, 0x0c, - 0x79, 0xd4, 0x08, 0xde, 0x11, 0xe5, 0x58, 0x61, 0xf4, 0x9d, 0xed, 0xea, 0x70, 0xd1, 0x5f, 0x6f, - 0xc1, 0x58, 0x25, 0x20, 0xda, 0x85, 0xf6, 0x1a, 0x77, 0xa3, 0xe6, 0xfd, 0x79, 0xea, 0xd0, 0x2e, - 0xd4, 0xf6, 0xcf, 0xe4, 0xe0, 0x14, 0xd7, 0xc4, 0x2f, 0xec, 0xfa, 0x6e, 0xbd, 0xe2, 0xd7, 0x45, - 0xa6, 0xb2, 0x37, 0x61, 0xb4, 0xa5, 0x09, 0xf7, 0xba, 0x45, 0x32, 0xd4, 0x85, 0x80, 0xb1, 0x38, - 0x42, 0x2f, 0xc5, 0x06, 0x2d, 0x54, 0x87, 0x51, 0xb2, 0xeb, 0xd6, 0x94, 0x3a, 0x37, 0x77, 0xe8, - 0xcb, 0x45, 0xb5, 0xb2, 0xac, 0xd1, 0xc1, 0x06, 0xd5, 0x63, 0xc8, 0x41, 0x6b, 0xff, 0xa8, 0x05, - 0x8f, 0x64, 0xc4, 0x3d, 0xa4, 0xcd, 0xdd, 0x63, 0x36, 0x0f, 0x22, 0x9d, 0xa5, 0x6a, 0x8e, 0x5b, - 0x42, 0x60, 0x01, 0x45, 0x9f, 0x02, 0xe0, 0x96, 0x0c, 0xf4, 0x99, 0xdb, 0x2b, 0x40, 0x9c, 0x11, - 0xdb, 0x4a, 0x0b, 0x53, 0x24, 0xeb, 0x63, 0x8d, 0x96, 0xfd, 0x53, 0x79, 0x18, 0xe4, 0xe9, 0xc4, - 0x57, 0x60, 0x78, 0x9b, 0xe7, 0x6f, 0xe8, 0x27, 0x55, 0x44, 0x2c, 0x80, 0xe0, 0x05, 0x58, 0x56, - 0x46, 0xab, 0x30, 0xcd, 0xf3, 0x5f, 0x34, 0x4a, 0xa4, 0xe1, 0xec, 0x49, 0x69, 0x19, 0xcf, 0x1d, - 0xa9, 0xa4, 0x86, 0xe5, 0x4e, 0x14, 0x9c, 0x56, 0x0f, 0xbd, 0x06, 0xe3, 0xf4, 0xf5, 0xe2, 0xb7, - 0x23, 0x49, 0x89, 0x67, 0xbe, 0x50, 0xcf, 0xa5, 0x75, 0x03, 0x8a, 0x13, 0xd8, 0xf4, 0x01, 0xdd, - 0xea, 0x90, 0x0b, 0x0e, 0xc6, 0x0f, 0x68, 0x53, 0x16, 0x68, 0xe2, 0x32, 0x13, 0xcf, 0x36, 0x33, - 0x68, 0x5d, 0xdf, 0x0e, 0x48, 0xb8, 0xed, 0x37, 0xea, 0x8c, 0xd1, 0x1a, 0xd4, 0x4c, 0x3c, 0x13, - 0x70, 0xdc, 0x51, 0x83, 0x52, 0xd9, 0x74, 0xdc, 0x46, 0x3b, 0x20, 0x31, 0x95, 0x21, 0x93, 0xca, - 0x4a, 0x02, 0x8e, 0x3b, 0x6a, 0xd0, 0x75, 0x74, 0xba, 0x12, 0xf8, 0xf4, 0xf0, 0x92, 0xc1, 0x5c, - 0x94, 0xdd, 0xee, 0xb0, 0xf4, 0x3b, 0xed, 0x12, 0xf6, 0x4c, 0x58, 0x36, 0x72, 0x0a, 0x86, 0xd2, - 0xbe, 0x2a, 0x3c, 0x4e, 0x25, 0x15, 0xf4, 0x0c, 0x8c, 0x88, 0xac, 0x06, 0xcc, 0xbc, 0x94, 0x4f, - 0x1d, 0x33, 0x32, 0x28, 0xc5, 0xc5, 0x58, 0xc7, 0xb1, 0xbf, 0x2f, 0x07, 0xd3, 0x29, 0xfe, 0x01, - 0xfc, 0xa8, 0xda, 0x72, 0xc3, 0x48, 0xe5, 0xc7, 0xd3, 0x8e, 0x2a, 0x5e, 0x8e, 0x15, 0x06, 0xdd, - 0x0f, 0xfc, 0x30, 0x4c, 0x1e, 0x80, 0xc2, 0xfe, 0x56, 0x40, 0x0f, 0x99, 0x69, 0xee, 0x22, 0x0c, - 0xb4, 0x43, 0x22, 0x03, 0x16, 0xaa, 0xf3, 0x9b, 0xe9, 0x9e, 0x18, 0x84, 0xb2, 0xc7, 0x5b, 0x4a, - 0x8d, 0xa3, 0xb1, 0xc7, 0x5c, 0x91, 0xc3, 0x61, 0xb4, 0x73, 0x11, 0xf1, 0x1c, 0x2f, 0x12, 0x4c, - 0x74, 0x1c, 0x79, 0x8b, 0x95, 0x62, 0x01, 0xb5, 0xbf, 0x94, 0x87, 0xb3, 0x99, 0x1e, 0x43, 0xb4, - 0xeb, 0x4d, 0xdf, 0x73, 0x23, 0x5f, 0x59, 0x6f, 0xf0, 0x68, 0x5b, 0xa4, 0xb5, 0xbd, 0x2a, 0xca, - 0xb1, 0xc2, 0x40, 0x97, 0x61, 0x90, 0x49, 0xae, 0x3a, 0x32, 0x05, 0x2e, 0x96, 0x78, 0xf8, 0x15, - 0x0e, 0xee, 0x3b, 0x0b, 0xeb, 0x25, 0x18, 0x68, 0xf9, 0x7e, 0x23, 0x79, 0x68, 0xd1, 0xee, 0xfa, - 0x7e, 0x03, 0x33, 0x20, 0xfa, 0x88, 0x18, 0xaf, 0x84, 0xb9, 0x02, 0x76, 0xea, 0x7e, 0xa8, 0x0d, - 0xda, 0x13, 0x30, 0xbc, 0x43, 0xf6, 0x02, 0xd7, 0xdb, 0x4a, 0x9a, 0xb1, 0xdc, 0xe0, 0xc5, 0x58, - 0xc2, 0xcd, 0xa4, 0x4f, 0xc3, 0x47, 0x9d, 0x3e, 0xb5, 0xd0, 0xf3, 0x0a, 0xfc, 0x81, 0x3c, 0x4c, - 0xe0, 0xc5, 0xd2, 0x07, 0x13, 0x71, 0xbb, 0x73, 0x22, 0x8e, 0x3a, 0x7d, 0x6a, 0xef, 0xd9, 0xf8, - 0x05, 0x0b, 0x26, 0x58, 0x6e, 0x05, 0x11, 0xa7, 0xc9, 0xf5, 0xbd, 0x13, 0x60, 0xf1, 0x2e, 0xc1, - 0x60, 0x40, 0x1b, 0x4d, 0xa6, 0x08, 0x64, 0x3d, 0xc1, 0x1c, 0x86, 0xce, 0xc1, 0x00, 0xeb, 0x02, - 0x9d, 0xbc, 0x51, 0x9e, 0x5d, 0xa9, 0xe4, 0x44, 0x0e, 0x66, 0xa5, 0x2c, 0xf8, 0x08, 0x26, 0xad, - 0x86, 0xcb, 0x3b, 0x1d, 0xeb, 0x15, 0xdf, 0x1f, 0xbe, 0xc4, 0xa9, 0x5d, 0x7b, 0x6f, 0xc1, 0x47, - 0xd2, 0x49, 0x76, 0x7f, 0x3e, 0xfd, 0x51, 0x0e, 0x2e, 0xa4, 0xd6, 0xeb, 0x3b, 0xf8, 0x48, 0xf7, - 0xda, 0xc7, 0x19, 0x83, 0x3f, 0x7f, 0x82, 0x46, 0x82, 0x03, 0xfd, 0x72, 0x98, 0x83, 0x7d, 0xc4, - 0x04, 0x49, 0x1d, 0xb2, 0xf7, 0x49, 0x4c, 0x90, 0xd4, 0xbe, 0x65, 0x3c, 0xff, 0xfe, 0x22, 0x97, - 0xf1, 0x2d, 0xec, 0x21, 0x78, 0x85, 0x9e, 0x33, 0x0c, 0x18, 0x0a, 0x8e, 0x79, 0x94, 0x9f, 0x31, - 0xbc, 0x0c, 0x2b, 0x28, 0x5a, 0x80, 0x89, 0xa6, 0xeb, 0xd1, 0xc3, 0x67, 0xcf, 0x64, 0xfc, 0x54, - 0xc8, 0xa6, 0x55, 0x13, 0x8c, 0x93, 0xf8, 0xc8, 0xd5, 0xe2, 0x85, 0xe4, 0xb2, 0x93, 0x6e, 0x67, - 0xf6, 0x76, 0xde, 0xd4, 0xb9, 0xaa, 0x51, 0x4c, 0x89, 0x1d, 0xb2, 0xaa, 0xbd, 0xff, 0xf3, 0xfd, - 0xbf, 0xff, 0x47, 0xd3, 0xdf, 0xfe, 0xb3, 0xaf, 0xc0, 0xd8, 0x43, 0x0b, 0x7c, 0xed, 0xaf, 0xe6, - 0xe1, 0xd1, 0x2e, 0xdb, 0x9e, 0x9f, 0xf5, 0xc6, 0x1c, 0x68, 0x67, 0x7d, 0xc7, 0x3c, 0x54, 0xe0, - 0xd4, 0x66, 0xbb, 0xd1, 0xd8, 0x63, 0x76, 0xf8, 0xa4, 0x2e, 0x31, 0x04, 0x4f, 0x79, 0x4e, 0xe6, - 0xb3, 0x5a, 0x49, 0xc1, 0xc1, 0xa9, 0x35, 0x29, 0x43, 0x4f, 0x6f, 0x92, 0x3d, 0x45, 0x2a, 0xc1, - 0xd0, 0x63, 0x1d, 0x88, 0x4d, 0x5c, 0x74, 0x0d, 0xa6, 0x9c, 0x5d, 0xc7, 0xe5, 0x41, 0x57, 0x25, - 0x01, 0xce, 0xd1, 0x2b, 0x39, 0xdd, 0x42, 0x12, 0x01, 0x77, 0xd6, 0x41, 0xaf, 0x03, 0xf2, 0x45, - 0xee, 0xff, 0x6b, 0xc4, 0x13, 0xaa, 0x31, 0x36, 0x77, 0xf9, 0xf8, 0x48, 0xb8, 0xd5, 0x81, 0x81, - 0x53, 0x6a, 0x25, 0xe2, 0x6f, 0x0c, 0x65, 0xc7, 0xdf, 0xe8, 0x7e, 0x2e, 0xf6, 0x4c, 0xff, 0xf0, - 0x9f, 0x2c, 0x7a, 0x7d, 0x71, 0x26, 0xdf, 0x0c, 0x23, 0xf7, 0x0a, 0x33, 0x6d, 0xe3, 0x32, 0x3c, - 0x2d, 0x6a, 0xc4, 0x69, 0xcd, 0xb4, 0x2d, 0x06, 0x62, 0x13, 0x97, 0x2f, 0x88, 0x30, 0x76, 0x56, - 0x34, 0x58, 0x7c, 0x11, 0xeb, 0x46, 0x61, 0xa0, 0x4f, 0xc3, 0x70, 0xdd, 0xdd, 0x75, 0x43, 0x3f, - 0x10, 0x2b, 0xfd, 0x90, 0xea, 0x82, 0xf8, 0x1c, 0x2c, 0x71, 0x32, 0x58, 0xd2, 0xb3, 0x7f, 0x20, - 0x07, 0x63, 0xb2, 0xc5, 0x37, 0xda, 0x7e, 0xe4, 0x9c, 0xc0, 0xb5, 0x7c, 0xcd, 0xb8, 0x96, 0x3f, - 0xd2, 0x2d, 0xe0, 0x0f, 0xeb, 0x52, 0xe6, 0x75, 0x7c, 0x2b, 0x71, 0x1d, 0x3f, 0xde, 0x9b, 0x54, - 0xf7, 0x6b, 0xf8, 0x1f, 0x59, 0x30, 0x65, 0xe0, 0x9f, 0xc0, 0x6d, 0xb0, 0x62, 0xde, 0x06, 0x8f, - 0xf5, 0xfc, 0x86, 0x8c, 0x5b, 0xe0, 0xbb, 0xf3, 0x89, 0xbe, 0xb3, 0xd3, 0xff, 0x1d, 0x18, 0xd8, - 0x76, 0x82, 0x7a, 0xb7, 0x00, 0xe7, 0x1d, 0x95, 0xe6, 0xaf, 0x3b, 0x81, 0xd0, 0x0d, 0x3e, 0xa5, - 0x72, 0x5e, 0x3b, 0x41, 0x6f, 0xbd, 0x20, 0x6b, 0x0a, 0xbd, 0x04, 0x43, 0x61, 0xcd, 0x6f, 0x29, - 0xcb, 0xf9, 0x8b, 0x3c, 0x1f, 0x36, 0x2d, 0x39, 0xd8, 0x9f, 0x43, 0x66, 0x73, 0xb4, 0x18, 0x0b, - 0x7c, 0xf4, 0x26, 0x8c, 0xb1, 0x5f, 0xca, 0x50, 0x27, 0x9f, 0x9d, 0x0c, 0xa9, 0xaa, 0x23, 0x72, - 0x2b, 0x36, 0xa3, 0x08, 0x9b, 0xa4, 0x66, 0xb7, 0xa0, 0xa8, 0x3e, 0xeb, 0x58, 0xf5, 0x71, 0xff, - 0x2e, 0x0f, 0xd3, 0x29, 0x6b, 0x0e, 0x85, 0xc6, 0x4c, 0x3c, 0xd3, 0xe7, 0x52, 0x7d, 0x8f, 0x73, - 0x11, 0xb2, 0xd7, 0x50, 0x5d, 0xac, 0xad, 0xbe, 0x1b, 0xbd, 0x1d, 0x92, 0x64, 0xa3, 0xb4, 0xa8, - 0x77, 0xa3, 0xb4, 0xb1, 0x13, 0x1b, 0x6a, 0xda, 0x90, 0xea, 0xe9, 0xb1, 0xce, 0xe9, 0x9f, 0xe6, - 0xe1, 0x54, 0x5a, 0x0c, 0x32, 0xf4, 0xf9, 0x44, 0x62, 0xbc, 0xe7, 0xfb, 0x8d, 0x5e, 0xc6, 0xb3, - 0xe5, 0x71, 0x19, 0xf0, 0xe2, 0xbc, 0x99, 0x2a, 0xaf, 0xe7, 0x30, 0x8b, 0x36, 0x99, 0x23, 0x7e, - 0xc0, 0x13, 0x1a, 0xca, 0xe3, 0xe3, 0xe3, 0x7d, 0x77, 0x40, 0x64, 0x42, 0x0c, 0x13, 0x46, 0x00, - 0xb2, 0xb8, 0xb7, 0x11, 0x80, 0x6c, 0x79, 0xd6, 0x85, 0x11, 0xed, 0x6b, 0x8e, 0x75, 0xc6, 0x77, - 0xe8, 0x6d, 0xa5, 0xf5, 0xfb, 0x58, 0x67, 0xfd, 0x47, 0x2d, 0x48, 0xd8, 0x85, 0x2b, 0xb1, 0x98, - 0x95, 0x29, 0x16, 0xbb, 0x08, 0x03, 0x81, 0xdf, 0x20, 0xc9, 0x3c, 0x74, 0xd8, 0x6f, 0x10, 0xcc, - 0x20, 0x14, 0x23, 0x8a, 0x85, 0x1d, 0xa3, 0xfa, 0x43, 0x4e, 0x3c, 0xd1, 0x2e, 0xc1, 0x60, 0x83, - 0xec, 0x92, 0x46, 0x32, 0x5d, 0xc8, 0x4d, 0x5a, 0x88, 0x39, 0xcc, 0xfe, 0x85, 0x01, 0x38, 0xdf, - 0x35, 0x94, 0x05, 0x7d, 0x0e, 0x6d, 0x39, 0x11, 0xb9, 0xe7, 0xec, 0x25, 0xe3, 0xfa, 0x5f, 0xe3, - 0xc5, 0x58, 0xc2, 0x99, 0xe7, 0x0e, 0x0f, 0xcf, 0x9b, 0x10, 0x22, 0x8a, 0xa8, 0xbc, 0x02, 0x6a, - 0x0a, 0xa5, 0xf2, 0x47, 0x21, 0x94, 0x7a, 0x16, 0x20, 0x0c, 0x1b, 0xdc, 0x7a, 0xa6, 0x2e, 0x5c, - 0x82, 0xe2, 0x30, 0xce, 0xd5, 0x9b, 0x02, 0x82, 0x35, 0x2c, 0x54, 0x82, 0xc9, 0x56, 0xe0, 0x47, - 0x5c, 0x26, 0x5b, 0xe2, 0x06, 0x66, 0x83, 0x66, 0x14, 0x81, 0x4a, 0x02, 0x8e, 0x3b, 0x6a, 0xa0, - 0x17, 0x60, 0x44, 0x44, 0x16, 0xa8, 0xf8, 0x7e, 0x43, 0x88, 0x81, 0x94, 0xcd, 0x55, 0x35, 0x06, - 0x61, 0x1d, 0x4f, 0xab, 0xc6, 0x04, 0xbd, 0xc3, 0xa9, 0xd5, 0xb8, 0xb0, 0x57, 0xc3, 0x4b, 0xc4, - 0x23, 0x2c, 0xf4, 0x15, 0x8f, 0x30, 0x16, 0x8c, 0x15, 0xfb, 0xd6, 0x6d, 0x41, 0x4f, 0x51, 0xd2, - 0xcf, 0x0e, 0xc0, 0xb4, 0x58, 0x38, 0xc7, 0xbd, 0x5c, 0x6e, 0x77, 0x2e, 0x97, 0xa3, 0x10, 0x9d, - 0x7d, 0xb0, 0x66, 0x4e, 0x7a, 0xcd, 0xfc, 0xa0, 0x05, 0x26, 0x7b, 0x85, 0xfe, 0xdf, 0xcc, 0xc4, - 0x28, 0x2f, 0x64, 0xb2, 0x6b, 0x75, 0x79, 0x81, 0xbc, 0xc7, 0x14, 0x29, 0xf6, 0x7f, 0xb0, 0xe0, - 0xb1, 0x9e, 0x14, 0xd1, 0x32, 0x14, 0x19, 0x0f, 0xa8, 0xbd, 0xce, 0x1e, 0x57, 0x06, 0xa8, 0x12, - 0x90, 0xc1, 0x92, 0xc6, 0x35, 0xd1, 0x72, 0x47, 0x06, 0x9a, 0x27, 0x52, 0x32, 0xd0, 0x9c, 0x36, - 0x86, 0xe7, 0x21, 0x53, 0xd0, 0x7c, 0x3f, 0xbd, 0x71, 0x0c, 0xe7, 0x0f, 0xf4, 0x71, 0x43, 0xec, - 0x67, 0x27, 0xc4, 0x7e, 0xc8, 0xc4, 0xd6, 0xee, 0x90, 0x4f, 0xc2, 0x24, 0x0b, 0x39, 0xc4, 0xcc, - 0xa1, 0x85, 0x5b, 0x4a, 0x2e, 0x36, 0x79, 0xbc, 0x99, 0x80, 0xe1, 0x0e, 0x6c, 0xfb, 0x0f, 0xf3, - 0x30, 0xc4, 0xb7, 0xdf, 0x09, 0xbc, 0x09, 0x9f, 0x84, 0xa2, 0xdb, 0x6c, 0xb6, 0x79, 0x52, 0x91, - 0x41, 0xee, 0x8b, 0x4a, 0xe7, 0xa9, 0x2c, 0x0b, 0x71, 0x0c, 0x47, 0x2b, 0x42, 0xe2, 0xdc, 0x25, - 0xaa, 0x21, 0xef, 0xf8, 0x7c, 0xc9, 0x89, 0x1c, 0xce, 0xe0, 0xa8, 0x7b, 0x36, 0x96, 0x4d, 0xa3, - 0xcf, 0x02, 0x84, 0x51, 0xe0, 0x7a, 0x5b, 0xb4, 0x4c, 0x04, 0xf1, 0xfc, 0x68, 0x17, 0x6a, 0x55, - 0x85, 0xcc, 0x69, 0xc6, 0x67, 0x8e, 0x02, 0x60, 0x8d, 0x22, 0x9a, 0x37, 0x6e, 0xfa, 0xd9, 0xc4, - 0xdc, 0x01, 0xa7, 0x1a, 0xcf, 0xd9, 0xec, 0x8b, 0x50, 0x54, 0xc4, 0x7b, 0xc9, 0x9f, 0x46, 0x75, - 0xb6, 0xe8, 0x13, 0x30, 0x91, 0xe8, 0xdb, 0xa1, 0xc4, 0x57, 0xbf, 0x68, 0xc1, 0x04, 0xef, 0xcc, - 0xb2, 0xb7, 0x2b, 0x6e, 0x83, 0x77, 0xe1, 0x54, 0x23, 0xe5, 0x54, 0x16, 0xd3, 0xdf, 0xff, 0x29, - 0xae, 0xc4, 0x55, 0x69, 0x50, 0x9c, 0xda, 0x06, 0xba, 0x42, 0x77, 0x1c, 0x3d, 0x75, 0x9d, 0x86, - 0x70, 0x4d, 0x1d, 0xe5, 0xbb, 0x8d, 0x97, 0x61, 0x05, 0xb5, 0x7f, 0xc7, 0x82, 0x29, 0xde, 0xf3, - 0x1b, 0x64, 0x4f, 0x9d, 0x4d, 0x5f, 0xcf, 0xbe, 0x8b, 0x74, 0x56, 0xb9, 0x8c, 0x74, 0x56, 0xfa, - 0xa7, 0xe5, 0xbb, 0x7e, 0xda, 0xcf, 0x58, 0x20, 0x56, 0xc8, 0x09, 0x08, 0x21, 0xbe, 0xd5, 0x14, - 0x42, 0xcc, 0x66, 0x6f, 0x82, 0x0c, 0xe9, 0xc3, 0x9f, 0x5b, 0x30, 0xc9, 0x11, 0x62, 0x6d, 0xf9, - 0xd7, 0x75, 0x1e, 0xfa, 0x49, 0x7a, 0x7b, 0x83, 0xec, 0xad, 0xfb, 0x15, 0x27, 0xda, 0x4e, 0xff, - 0x28, 0x63, 0xb2, 0x06, 0xba, 0x4e, 0x56, 0x5d, 0x6e, 0xa0, 0x43, 0x64, 0xd2, 0x3e, 0x74, 0xb6, - 0x07, 0xfb, 0x6b, 0x16, 0x20, 0xde, 0x8c, 0xc1, 0xb8, 0x51, 0x76, 0x88, 0x95, 0x6a, 0x17, 0x5d, - 0x7c, 0x34, 0x29, 0x08, 0xd6, 0xb0, 0x8e, 0x64, 0x78, 0x12, 0x26, 0x0f, 0xf9, 0xde, 0x26, 0x0f, - 0x87, 0x18, 0xd1, 0x7f, 0x35, 0x04, 0x49, 0x07, 0x18, 0x74, 0x07, 0x46, 0x6b, 0x4e, 0xcb, 0xd9, - 0x70, 0x1b, 0x6e, 0xe4, 0x92, 0xb0, 0x9b, 0xad, 0xd4, 0x92, 0x86, 0x27, 0x94, 0xd4, 0x5a, 0x09, - 0x36, 0xe8, 0xa0, 0x79, 0x80, 0x56, 0xe0, 0xee, 0xba, 0x0d, 0xb2, 0xc5, 0x64, 0x25, 0xcc, 0x19, - 0x9e, 0x1b, 0x00, 0xc9, 0x52, 0xac, 0x61, 0xa4, 0x78, 0x1b, 0xe7, 0x8f, 0xd9, 0xdb, 0x18, 0x4e, - 0xcc, 0xdb, 0x78, 0xe0, 0x50, 0xde, 0xc6, 0x85, 0x43, 0x7b, 0x1b, 0x0f, 0xf6, 0xe5, 0x6d, 0x8c, - 0xe1, 0x8c, 0xe4, 0x3d, 0xe9, 0xff, 0x15, 0xb7, 0x41, 0xc4, 0x83, 0x83, 0x7b, 0xf0, 0xcf, 0x3e, - 0xd8, 0x9f, 0x3b, 0x83, 0x53, 0x31, 0x70, 0x46, 0x4d, 0xf4, 0x29, 0x98, 0x71, 0x1a, 0x0d, 0xff, - 0x9e, 0x9a, 0xd4, 0xe5, 0xb0, 0xe6, 0x34, 0xb8, 0x12, 0x62, 0x98, 0x51, 0x3d, 0xf7, 0x60, 0x7f, - 0x6e, 0x66, 0x21, 0x03, 0x07, 0x67, 0xd6, 0x46, 0xaf, 0x42, 0xb1, 0x15, 0xf8, 0xb5, 0x55, 0xcd, - 0x4b, 0xef, 0x02, 0x1d, 0xc0, 0x8a, 0x2c, 0x3c, 0xd8, 0x9f, 0x1b, 0x53, 0x7f, 0xd8, 0x85, 0x1f, - 0x57, 0x48, 0x71, 0x1f, 0x1e, 0x39, 0x52, 0xf7, 0xe1, 0x1d, 0x98, 0xae, 0x92, 0xc0, 0x65, 0x79, - 0xb7, 0xeb, 0xf1, 0xf9, 0xb4, 0x0e, 0xc5, 0x20, 0x71, 0x22, 0xf7, 0x15, 0x69, 0x50, 0x0b, 0xbb, - 0x2f, 0x4f, 0xe0, 0x98, 0x90, 0xfd, 0xbf, 0x2c, 0x18, 0x16, 0x0e, 0x2f, 0x27, 0xc0, 0x35, 0x2e, - 0x18, 0x9a, 0x84, 0xb9, 0xf4, 0x01, 0x63, 0x9d, 0xc9, 0xd4, 0x21, 0x94, 0x13, 0x3a, 0x84, 0xc7, - 0xba, 0x11, 0xe9, 0xae, 0x3d, 0xf8, 0x6b, 0x79, 0xca, 0xbd, 0x1b, 0xae, 0x97, 0xc7, 0x3f, 0x04, - 0x6b, 0x30, 0x1c, 0x0a, 0xd7, 0xbf, 0x5c, 0xb6, 0xad, 0x7a, 0x72, 0x12, 0x63, 0x3b, 0x36, 0xe1, - 0xec, 0x27, 0x89, 0xa4, 0xfa, 0x14, 0xe6, 0x8f, 0xd1, 0xa7, 0xb0, 0x97, 0x73, 0xea, 0xc0, 0x51, - 0x38, 0xa7, 0xda, 0x5f, 0x61, 0x37, 0xa7, 0x5e, 0x7e, 0x02, 0x4c, 0xd5, 0x35, 0xf3, 0x8e, 0xb5, - 0xbb, 0xac, 0x2c, 0xd1, 0xa9, 0x0c, 0xe6, 0xea, 0xe7, 0x2d, 0x38, 0x9f, 0xf2, 0x55, 0x1a, 0xa7, - 0xf5, 0x14, 0x14, 0x9c, 0x76, 0xdd, 0x55, 0x7b, 0x59, 0xd3, 0x27, 0x2e, 0x88, 0x72, 0xac, 0x30, - 0xd0, 0x12, 0x4c, 0x91, 0xfb, 0x2d, 0x97, 0xab, 0x52, 0x75, 0x63, 0xd3, 0x3c, 0xf7, 0x92, 0x5a, - 0x4e, 0x02, 0x71, 0x27, 0xbe, 0x0a, 0x08, 0x92, 0xcf, 0x0c, 0x08, 0xf2, 0x77, 0x2d, 0x18, 0x51, - 0xce, 0x6f, 0xc7, 0x3e, 0xda, 0x9f, 0x34, 0x47, 0xfb, 0xd1, 0x2e, 0xa3, 0x9d, 0x31, 0xcc, 0xbf, - 0x9d, 0x53, 0xfd, 0xad, 0xf8, 0x41, 0xd4, 0x07, 0x07, 0xf7, 0x12, 0x14, 0x5a, 0x81, 0x1f, 0xf9, - 0x35, 0xbf, 0x21, 0x18, 0xb8, 0x73, 0x71, 0x64, 0x1c, 0x5e, 0x7e, 0xa0, 0xfd, 0xc6, 0x0a, 0x9b, - 0xf2, 0x4e, 0x4e, 0xab, 0x25, 0x01, 0xd2, 0x06, 0x8d, 0xc5, 0x8d, 0x8d, 0x8b, 0xb1, 0x8e, 0xc3, - 0x06, 0xdc, 0x0f, 0x22, 0xc1, 0x67, 0xc5, 0x03, 0xee, 0x07, 0x11, 0x66, 0x10, 0x54, 0x07, 0x88, - 0x9c, 0x60, 0x8b, 0x44, 0xb4, 0x4c, 0x04, 0xef, 0xca, 0x3e, 0x6f, 0xda, 0x91, 0xdb, 0x98, 0x77, - 0xbd, 0x28, 0x8c, 0x82, 0xf9, 0xb2, 0x17, 0xdd, 0x0a, 0xf8, 0x13, 0x52, 0x8b, 0x8e, 0xa3, 0x68, - 0x61, 0x8d, 0xae, 0x74, 0xf4, 0x66, 0x6d, 0x0c, 0x9a, 0xc6, 0x0c, 0x6b, 0xa2, 0x1c, 0x2b, 0x0c, - 0xfb, 0x45, 0x76, 0xfb, 0xb0, 0x31, 0x3d, 0x5c, 0x38, 0x99, 0x5f, 0x2e, 0xaa, 0xd9, 0x60, 0x9a, - 0xcc, 0x92, 0x1e, 0xb4, 0xa6, 0xfb, 0x61, 0x4f, 0x1b, 0xd6, 0xfd, 0xb5, 0xe2, 0xc8, 0x36, 0xe8, - 0xdb, 0x3a, 0x0c, 0x54, 0x9e, 0xee, 0x71, 0x6b, 0x1c, 0xc2, 0x24, 0x85, 0x25, 0x91, 0x60, 0x21, - 0xf6, 0xcb, 0x15, 0xb1, 0x2f, 0xb4, 0x24, 0x12, 0x02, 0x80, 0x63, 0x1c, 0x74, 0x55, 0x08, 0x08, - 0xb8, 0x9c, 0xff, 0xd1, 0x84, 0x80, 0x40, 0x7e, 0xbe, 0x26, 0xd5, 0x79, 0x06, 0x46, 0x54, 0x5e, - 0xd8, 0x0a, 0x4f, 0x37, 0x2a, 0x96, 0xcd, 0x72, 0x5c, 0x8c, 0x75, 0x1c, 0xb4, 0x0e, 0x13, 0x21, - 0x97, 0x9b, 0xa9, 0x88, 0xb5, 0x5c, 0xfe, 0xf8, 0x51, 0x69, 0xd5, 0x53, 0x35, 0xc1, 0x07, 0xac, - 0x88, 0x9f, 0x36, 0xd2, 0xb9, 0x3a, 0x49, 0x02, 0xbd, 0x06, 0xe3, 0x0d, 0xdf, 0xa9, 0x2f, 0x3a, - 0x0d, 0xc7, 0xab, 0xb1, 0xef, 0x2d, 0x98, 0xe9, 0x05, 0x6f, 0x1a, 0x50, 0x9c, 0xc0, 0xa6, 0xcc, - 0x98, 0x5e, 0x22, 0xa2, 0x2c, 0x3b, 0xde, 0x16, 0x09, 0x45, 0x96, 0x4f, 0xc6, 0x8c, 0xdd, 0xcc, - 0xc0, 0xc1, 0x99, 0xb5, 0xd1, 0x4b, 0x30, 0x2a, 0x3f, 0x5f, 0x8b, 0x45, 0x10, 0x3b, 0x32, 0x68, - 0x30, 0x6c, 0x60, 0xa2, 0x7b, 0x70, 0x5a, 0xfe, 0x5f, 0x0f, 0x9c, 0xcd, 0x4d, 0xb7, 0x26, 0x1c, - 0x74, 0xb9, 0x97, 0xe1, 0x82, 0x74, 0x85, 0x5b, 0x4e, 0x43, 0x3a, 0xd8, 0x9f, 0xbb, 0x28, 0x46, - 0x2d, 0x15, 0xce, 0x26, 0x31, 0x9d, 0x3e, 0x5a, 0x85, 0xe9, 0x6d, 0xe2, 0x34, 0xa2, 0xed, 0xa5, - 0x6d, 0x52, 0xdb, 0x91, 0x9b, 0x88, 0x45, 0x38, 0xd0, 0xcc, 0xff, 0xaf, 0x77, 0xa2, 0xe0, 0xb4, - 0x7a, 0xe8, 0x2d, 0x98, 0x69, 0xb5, 0x37, 0x1a, 0x6e, 0xb8, 0xbd, 0xe6, 0x47, 0xcc, 0xb4, 0x47, - 0xa5, 0x99, 0x15, 0xa1, 0x10, 0x54, 0x0c, 0x89, 0x4a, 0x06, 0x1e, 0xce, 0xa4, 0x80, 0xde, 0x85, - 0xd3, 0x89, 0xc5, 0x20, 0x9c, 0xc1, 0xc7, 0xb3, 0x63, 0xd6, 0x57, 0xd3, 0x2a, 0x88, 0xb8, 0x0a, - 0x69, 0x20, 0x9c, 0xde, 0x04, 0x7a, 0x1e, 0x0a, 0x6e, 0x6b, 0xc5, 0x69, 0xba, 0x8d, 0x3d, 0x16, - 0x74, 0xbf, 0xc8, 0x02, 0xd1, 0x17, 0xca, 0x15, 0x5e, 0x76, 0xa0, 0xfd, 0xc6, 0x0a, 0x93, 0x3e, - 0x41, 0xb4, 0xd0, 0xa2, 0xe1, 0xcc, 0x64, 0x6c, 0xb9, 0xac, 0xc5, 0x1f, 0x0d, 0xb1, 0x81, 0xf5, - 0xde, 0x0c, 0xc2, 0xde, 0xa1, 0x95, 0x35, 0x9e, 0x11, 0x7d, 0x0e, 0x46, 0xf5, 0x15, 0x2b, 0xee, - 0xbf, 0xcb, 0xe9, 0x2c, 0x95, 0xb6, 0xb2, 0x39, 0xc7, 0xa9, 0x56, 0xaf, 0x0e, 0xc3, 0x06, 0x45, - 0x9b, 0x40, 0xfa, 0x58, 0xa2, 0x9b, 0x50, 0xa8, 0x35, 0x5c, 0xe2, 0x45, 0xe5, 0x4a, 0xb7, 0xa8, - 0x58, 0x4b, 0x02, 0x47, 0x4c, 0x8e, 0x08, 0x28, 0xce, 0xcb, 0xb0, 0xa2, 0x60, 0xff, 0x5a, 0x0e, - 0xe6, 0x7a, 0x44, 0xa7, 0x4f, 0xe8, 0x2d, 0xac, 0xbe, 0xf4, 0x16, 0x0b, 0x32, 0x41, 0xef, 0x5a, - 0x42, 0x24, 0x92, 0x48, 0xbe, 0x1b, 0x0b, 0x46, 0x92, 0xf8, 0x7d, 0xdb, 0x91, 0xeb, 0xaa, 0x8f, - 0x81, 0x9e, 0x9e, 0x10, 0x86, 0xca, 0x73, 0xb0, 0xff, 0x77, 0x52, 0xa6, 0xfa, 0xca, 0xfe, 0x4a, - 0x0e, 0x4e, 0xab, 0x21, 0xfc, 0xe6, 0x1d, 0xb8, 0xdb, 0x9d, 0x03, 0x77, 0x04, 0xca, 0x3f, 0xfb, - 0x16, 0x0c, 0xf1, 0x30, 0x5f, 0x7d, 0xf0, 0x67, 0x97, 0xcc, 0x88, 0x98, 0x8a, 0x25, 0x30, 0xa2, - 0x62, 0x7e, 0xaf, 0x05, 0x13, 0xeb, 0x4b, 0x95, 0xaa, 0x5f, 0xdb, 0x21, 0xd1, 0x02, 0xe7, 0xa7, - 0xb1, 0xe0, 0xb5, 0xac, 0x87, 0xe4, 0xa1, 0xd2, 0xb8, 0xb3, 0x8b, 0x30, 0xb0, 0xed, 0x87, 0x51, - 0xd2, 0x32, 0xe0, 0xba, 0x1f, 0x46, 0x98, 0x41, 0xec, 0xdf, 0xb5, 0x60, 0x90, 0xa5, 0xa4, 0x97, - 0x52, 0x64, 0x2b, 0x43, 0x8a, 0xdc, 0xcf, 0x77, 0xa1, 0x17, 0x60, 0x88, 0x6c, 0x6e, 0x92, 0x5a, - 0x24, 0x66, 0x55, 0xba, 0x72, 0x0f, 0x2d, 0xb3, 0x52, 0xca, 0x60, 0xb0, 0xc6, 0xf8, 0x5f, 0x2c, - 0x90, 0xd1, 0x5d, 0x28, 0x46, 0x6e, 0x93, 0x2c, 0xd4, 0xeb, 0x42, 0xb7, 0xfa, 0x10, 0xee, 0xe8, - 0xeb, 0x92, 0x00, 0x8e, 0x69, 0xd9, 0x5f, 0xca, 0x01, 0xc4, 0xf1, 0x51, 0x7a, 0x7d, 0xe2, 0x62, - 0x87, 0xd6, 0xed, 0x72, 0x8a, 0xd6, 0x0d, 0xc5, 0x04, 0x53, 0x54, 0x6e, 0x6a, 0x98, 0xf2, 0x7d, - 0x0d, 0xd3, 0xc0, 0x61, 0x86, 0x69, 0x09, 0xa6, 0xe2, 0xf8, 0x2e, 0x66, 0x78, 0x2b, 0xf6, 0x86, - 0x5a, 0x4f, 0x02, 0x71, 0x27, 0xbe, 0x4d, 0xe0, 0xa2, 0x0a, 0x73, 0x21, 0xee, 0x1a, 0x66, 0xba, - 0xab, 0x6b, 0x31, 0x7b, 0x8c, 0x53, 0xac, 0x56, 0xcc, 0x65, 0xaa, 0x15, 0x7f, 0xc2, 0x82, 0x53, - 0xc9, 0x76, 0x98, 0x2f, 0xe5, 0x17, 0x2d, 0x38, 0xcd, 0x94, 0xab, 0xac, 0xd5, 0x4e, 0x55, 0xee, - 0xf3, 0x5d, 0x43, 0x77, 0x64, 0xf4, 0x38, 0x8e, 0x19, 0xb0, 0x9a, 0x46, 0x1a, 0xa7, 0xb7, 0x68, - 0xff, 0xfb, 0x1c, 0xcc, 0x64, 0xc5, 0xfc, 0x60, 0x96, 0xfd, 0xce, 0xfd, 0xea, 0x0e, 0xb9, 0x27, - 0xec, 0xa7, 0x63, 0xcb, 0x7e, 0x5e, 0x8c, 0x25, 0x3c, 0x19, 0x70, 0x3c, 0xd7, 0x5f, 0xc0, 0x71, - 0xb4, 0x0d, 0x53, 0xf7, 0xb6, 0x89, 0x77, 0xdb, 0x0b, 0x9d, 0xc8, 0x0d, 0x37, 0x5d, 0xa6, 0x88, - 0xe4, 0xeb, 0xe6, 0x65, 0x69, 0xe5, 0x7c, 0x37, 0x89, 0x70, 0xb0, 0x3f, 0x77, 0xde, 0x28, 0x88, - 0xbb, 0xcc, 0x0f, 0x12, 0xdc, 0x49, 0xb4, 0x33, 0x5e, 0xfb, 0xc0, 0x31, 0xc6, 0x6b, 0xb7, 0xbf, - 0x68, 0xc1, 0xd9, 0xcc, 0x24, 0x91, 0xe8, 0x0a, 0x14, 0x9c, 0x96, 0xcb, 0x65, 0xb9, 0xe2, 0x18, - 0x65, 0x32, 0x83, 0x4a, 0x99, 0x4b, 0x72, 0x15, 0x54, 0x25, 0xaf, 0xce, 0x65, 0x26, 0xaf, 0xee, - 0x99, 0x8b, 0xda, 0xfe, 0x1e, 0x0b, 0x84, 0x57, 0x62, 0x1f, 0x67, 0xf7, 0x9b, 0x32, 0xf7, 0xbf, - 0x91, 0xd3, 0xe5, 0x62, 0xb6, 0x9b, 0xa6, 0xc8, 0xe4, 0xa2, 0x78, 0x25, 0x23, 0x7f, 0x8b, 0x41, - 0xcb, 0xae, 0x83, 0x80, 0x96, 0x08, 0x93, 0x54, 0xf6, 0xee, 0xcd, 0xb3, 0x00, 0x75, 0x86, 0xab, - 0x65, 0x00, 0x57, 0x37, 0x73, 0x49, 0x41, 0xb0, 0x86, 0x65, 0xff, 0x9b, 0x1c, 0x8c, 0xc8, 0x1c, - 0x22, 0x6d, 0xaf, 0x1f, 0x79, 0xc2, 0xa1, 0x92, 0x0a, 0xb2, 0x94, 0xf9, 0x94, 0x70, 0x25, 0x16, - 0xc3, 0xc4, 0x29, 0xf3, 0x25, 0x00, 0xc7, 0x38, 0x74, 0x17, 0x85, 0xed, 0x0d, 0x86, 0x9e, 0xf0, - 0xa1, 0xab, 0xf2, 0x62, 0x2c, 0xe1, 0xe8, 0x53, 0x30, 0xc9, 0xeb, 0x05, 0x7e, 0xcb, 0xd9, 0xe2, - 0x42, 0xf2, 0x41, 0xe5, 0xfc, 0x3e, 0xb9, 0x9a, 0x80, 0x1d, 0xec, 0xcf, 0x9d, 0x4a, 0x96, 0x31, - 0xed, 0x4f, 0x07, 0x15, 0x66, 0x0b, 0xc3, 0x1b, 0xa1, 0xbb, 0xbf, 0xc3, 0x84, 0x26, 0x06, 0x61, - 0x1d, 0xcf, 0xfe, 0x1c, 0xa0, 0xce, 0x6c, 0x2a, 0xe8, 0x75, 0x6e, 0x00, 0xe9, 0x06, 0xa4, 0xde, - 0x4d, 0x1b, 0xa4, 0xbb, 0x78, 0x4b, 0xf7, 0x17, 0x5e, 0x0b, 0xab, 0xfa, 0xf6, 0x5f, 0xca, 0xc3, - 0x64, 0xd2, 0xe1, 0x17, 0x5d, 0x87, 0x21, 0xce, 0x7a, 0x08, 0xf2, 0x5d, 0x8c, 0x0d, 0x34, 0x37, - 0x61, 0x76, 0x08, 0x0b, 0xee, 0x45, 0xd4, 0x47, 0x6f, 0xc1, 0x48, 0xdd, 0xbf, 0xe7, 0xdd, 0x73, - 0x82, 0xfa, 0x42, 0xa5, 0x2c, 0x96, 0x73, 0xea, 0x6b, 0xa9, 0x14, 0xa3, 0xe9, 0xae, 0xc7, 0x4c, - 0xb1, 0x16, 0x83, 0xb0, 0x4e, 0x0e, 0xad, 0xb3, 0xe0, 0xcf, 0x9b, 0xee, 0xd6, 0xaa, 0xd3, 0xea, - 0x66, 0x0d, 0xbf, 0x24, 0x91, 0x34, 0xca, 0x63, 0x22, 0x42, 0x34, 0x07, 0xe0, 0x98, 0x10, 0xfa, - 0x3c, 0x4c, 0x87, 0x19, 0x32, 0xd9, 0xac, 0xe4, 0x5a, 0xdd, 0xc4, 0x94, 0x8b, 0x8f, 0xd0, 0x77, - 0x6c, 0x9a, 0xf4, 0x36, 0xad, 0x19, 0xfb, 0x47, 0x4e, 0x81, 0xb1, 0x89, 0x8d, 0x5c, 0x8b, 0xd6, - 0x11, 0xe5, 0x5a, 0xc4, 0x50, 0x20, 0xcd, 0x56, 0xb4, 0x57, 0x72, 0x83, 0x6e, 0x19, 0x88, 0x97, - 0x05, 0x4e, 0x27, 0x4d, 0x09, 0xc1, 0x8a, 0x4e, 0x7a, 0x42, 0xcc, 0xfc, 0xd7, 0x31, 0x21, 0xe6, - 0xc0, 0x09, 0x26, 0xc4, 0x5c, 0x83, 0xe1, 0x2d, 0x37, 0xc2, 0xa4, 0xe5, 0x0b, 0xa6, 0x3f, 0x75, - 0x1d, 0x5e, 0xe3, 0x28, 0x9d, 0xa9, 0xd7, 0x04, 0x00, 0x4b, 0x22, 0xe8, 0x75, 0xb5, 0x03, 0x87, - 0xb2, 0xdf, 0xcc, 0x9d, 0x5a, 0xf1, 0xd4, 0x3d, 0x28, 0xd2, 0x5e, 0x0e, 0x3f, 0x6c, 0xda, 0xcb, - 0x15, 0x99, 0xac, 0xb2, 0x90, 0xed, 0xba, 0xc2, 0x72, 0x51, 0xf6, 0x48, 0x51, 0x79, 0x47, 0x4f, - 0xf0, 0x59, 0xcc, 0x3e, 0x09, 0x54, 0xee, 0xce, 0x3e, 0xd3, 0x7a, 0x7e, 0x8f, 0x05, 0xa7, 0x5b, - 0x69, 0xb9, 0x6e, 0x85, 0x02, 0xf9, 0x85, 0xbe, 0xd3, 0xe9, 0x1a, 0x0d, 0x32, 0x41, 0x4d, 0x2a, - 0x1a, 0x4e, 0x6f, 0x8e, 0x0e, 0x74, 0xb0, 0x51, 0x17, 0x8a, 0xcc, 0x4b, 0x19, 0xf9, 0x41, 0xbb, - 0x64, 0x05, 0x5d, 0x4f, 0xc9, 0x45, 0xf9, 0xe1, 0xac, 0x5c, 0x94, 0x7d, 0x67, 0xa0, 0x7c, 0x5d, - 0x65, 0x06, 0x1d, 0xcb, 0x5e, 0x4a, 0x3c, 0xef, 0x67, 0xcf, 0x7c, 0xa0, 0xaf, 0xab, 0x7c, 0xa0, - 0x5d, 0x22, 0x7b, 0xf2, 0x6c, 0x9f, 0x3d, 0xb3, 0x80, 0x6a, 0x99, 0x3c, 0x27, 0x8e, 0x26, 0x93, - 0xa7, 0x71, 0xd5, 0xf0, 0x64, 0x92, 0x4f, 0xf6, 0xb8, 0x6a, 0x0c, 0xba, 0xdd, 0x2f, 0x1b, 0x9e, - 0xb5, 0x74, 0xea, 0xa1, 0xb2, 0x96, 0xde, 0xd1, 0xb3, 0x80, 0xa2, 0x1e, 0x69, 0x2e, 0x29, 0x52, - 0x9f, 0xb9, 0x3f, 0xef, 0xe8, 0x17, 0xe0, 0x74, 0x36, 0x5d, 0x75, 0xcf, 0x75, 0xd2, 0x4d, 0xbd, - 0x02, 0x3b, 0x72, 0x8a, 0x9e, 0x3a, 0x99, 0x9c, 0xa2, 0xa7, 0x8f, 0x3c, 0xa7, 0xe8, 0x99, 0x13, - 0xc8, 0x29, 0xfa, 0xc8, 0x09, 0xe6, 0x14, 0xbd, 0xc3, 0xac, 0x2e, 0x78, 0x6c, 0x17, 0x11, 0x89, - 0x34, 0x3d, 0xea, 0x65, 0x5a, 0x00, 0x18, 0xfe, 0x71, 0x0a, 0x84, 0x63, 0x52, 0x29, 0xb9, 0x4a, - 0x67, 0x8e, 0x21, 0x57, 0xe9, 0x5a, 0x9c, 0xab, 0xf4, 0x6c, 0xf6, 0x54, 0xa7, 0xd8, 0xe9, 0x67, - 0x64, 0x28, 0xbd, 0xa3, 0x67, 0x16, 0x7d, 0xb4, 0x8b, 0x28, 0x3e, 0x4d, 0xf0, 0xd8, 0x25, 0x9f, - 0xe8, 0x6b, 0x3c, 0x9f, 0xe8, 0xb9, 0xec, 0x93, 0x3c, 0x79, 0xdd, 0x19, 0x59, 0x44, 0x69, 0xbf, - 0x54, 0xcc, 0x3b, 0x16, 0x73, 0x35, 0xa3, 0x5f, 0x2a, 0x68, 0x5e, 0x67, 0xbf, 0x14, 0x08, 0xc7, - 0xa4, 0xec, 0xef, 0xcb, 0xc1, 0x85, 0xee, 0xfb, 0x2d, 0x96, 0xa6, 0x56, 0x62, 0x4d, 0x63, 0x42, - 0x9a, 0xca, 0xdf, 0x6c, 0x31, 0x56, 0xdf, 0xe1, 0xc4, 0xae, 0xc1, 0x94, 0x32, 0xf0, 0x6f, 0xb8, - 0xb5, 0xbd, 0xb5, 0xf8, 0xe5, 0xab, 0x9c, 0xa2, 0xab, 0x49, 0x04, 0xdc, 0x59, 0x07, 0x2d, 0xc0, - 0x84, 0x51, 0x58, 0x2e, 0x89, 0xb7, 0x99, 0x12, 0xdf, 0x56, 0x4d, 0x30, 0x4e, 0xe2, 0xdb, 0x5f, - 0xb6, 0xe0, 0x91, 0x8c, 0x34, 0x60, 0x7d, 0x47, 0xcb, 0xda, 0x84, 0x89, 0x96, 0x59, 0xb5, 0x47, - 0x50, 0x3d, 0x23, 0xd9, 0x98, 0xea, 0x6b, 0x02, 0x80, 0x93, 0x44, 0xed, 0x3f, 0xb3, 0xe0, 0x7c, - 0x57, 0x8b, 0x35, 0x84, 0xe1, 0xcc, 0x56, 0x33, 0x74, 0x96, 0x02, 0x52, 0x27, 0x5e, 0xe4, 0x3a, - 0x8d, 0x6a, 0x8b, 0xd4, 0x34, 0x79, 0x38, 0x33, 0xfd, 0xba, 0xb6, 0x5a, 0x5d, 0xe8, 0xc4, 0xc0, - 0x19, 0x35, 0xd1, 0x0a, 0xa0, 0x4e, 0x88, 0x98, 0x61, 0x16, 0xbd, 0xb7, 0x93, 0x1e, 0x4e, 0xa9, - 0x81, 0x5e, 0x84, 0x31, 0x65, 0x09, 0xa7, 0xcd, 0x38, 0x3b, 0xd8, 0xb1, 0x0e, 0xc0, 0x26, 0xde, - 0xe2, 0x95, 0xdf, 0xf8, 0xfd, 0x0b, 0x1f, 0xfa, 0xad, 0xdf, 0xbf, 0xf0, 0xa1, 0xdf, 0xf9, 0xfd, - 0x0b, 0x1f, 0xfa, 0x8e, 0x07, 0x17, 0xac, 0xdf, 0x78, 0x70, 0xc1, 0xfa, 0xad, 0x07, 0x17, 0xac, - 0xdf, 0x79, 0x70, 0xc1, 0xfa, 0xbd, 0x07, 0x17, 0xac, 0x2f, 0xfd, 0xc1, 0x85, 0x0f, 0xbd, 0x99, - 0xdb, 0x7d, 0xe6, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xaf, 0x9f, 0xac, 0x23, 0x24, 0x01, 0x01, - 0x00, + // 13999 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6b, 0x70, 0x5c, 0xd7, + 0x79, 0x98, 0xef, 0x2e, 0x5e, 0xfb, 0xe1, 0x7d, 0x40, 0x52, 0x20, 0x24, 0x12, 0xd4, 0x95, 0x4d, + 0x51, 0x96, 0x04, 0x9a, 0x7a, 0xd8, 0x8a, 0x64, 0x2b, 0x06, 0xb0, 0x00, 0xb9, 0x22, 0x01, 0xae, + 0xce, 0x82, 0xa4, 0xed, 0xc8, 0x1e, 0x5f, 0xec, 0x1e, 0x00, 0x57, 0xd8, 0xbd, 0x77, 0x75, 0xef, + 0x5d, 0x90, 0x50, 0x9d, 0x69, 0xea, 0x3c, 0x9d, 0x47, 0xc7, 0xd3, 0xc9, 0xf4, 0x91, 0x64, 0x32, + 0x9d, 0x34, 0x9d, 0xc4, 0x75, 0xdb, 0x69, 0x9a, 0x34, 0x49, 0xe3, 0xb4, 0x49, 0x9b, 0x3e, 0xd2, + 0xfe, 0x48, 0xd3, 0x4c, 0x1b, 0x67, 0x26, 0x53, 0x34, 0x61, 0x3a, 0xcd, 0xf8, 0x47, 0x93, 0xb4, + 0x49, 0x7f, 0x14, 0xcd, 0x34, 0x9d, 0xf3, 0xbc, 0xe7, 0xdc, 0xc7, 0xee, 0x82, 0x02, 0x61, 0xd9, + 0xa3, 0x7f, 0xbb, 0xe7, 0xfb, 0xce, 0x77, 0xce, 0x3d, 0xcf, 0xef, 0x7c, 0x4f, 0x78, 0x65, 0xf7, + 0xa5, 0x70, 0xc1, 0xf5, 0x2f, 0xef, 0x76, 0x36, 0x49, 0xe0, 0x91, 0x88, 0x84, 0x97, 0xf7, 0x88, + 0xd7, 0xf0, 0x83, 0xcb, 0x02, 0xe0, 0xb4, 0xdd, 0xcb, 0x75, 0x3f, 0x20, 0x97, 0xf7, 0xae, 0x5c, + 0xde, 0x26, 0x1e, 0x09, 0x9c, 0x88, 0x34, 0x16, 0xda, 0x81, 0x1f, 0xf9, 0x08, 0x71, 0x9c, 0x05, + 0xa7, 0xed, 0x2e, 0x50, 0x9c, 0x85, 0xbd, 0x2b, 0x73, 0xcf, 0x6e, 0xbb, 0xd1, 0x4e, 0x67, 0x73, + 0xa1, 0xee, 0xb7, 0x2e, 0x6f, 0xfb, 0xdb, 0xfe, 0x65, 0x86, 0xba, 0xd9, 0xd9, 0x62, 0xff, 0xd8, + 0x1f, 0xf6, 0x8b, 0x93, 0x98, 0x7b, 0x21, 0x6e, 0xa6, 0xe5, 0xd4, 0x77, 0x5c, 0x8f, 0x04, 0xfb, + 0x97, 0xdb, 0xbb, 0xdb, 0xac, 0xdd, 0x80, 0x84, 0x7e, 0x27, 0xa8, 0x93, 0x64, 0xc3, 0x5d, 0x6b, + 0x85, 0x97, 0x5b, 0x24, 0x72, 0x32, 0xba, 0x3b, 0x77, 0x39, 0xaf, 0x56, 0xd0, 0xf1, 0x22, 0xb7, + 0x95, 0x6e, 0xe6, 0xc3, 0xbd, 0x2a, 0x84, 0xf5, 0x1d, 0xd2, 0x72, 0x52, 0xf5, 0x9e, 0xcf, 0xab, + 0xd7, 0x89, 0xdc, 0xe6, 0x65, 0xd7, 0x8b, 0xc2, 0x28, 0x48, 0x56, 0xb2, 0xbf, 0x6a, 0xc1, 0x85, + 0xc5, 0x3b, 0xb5, 0x95, 0xa6, 0x13, 0x46, 0x6e, 0x7d, 0xa9, 0xe9, 0xd7, 0x77, 0x6b, 0x91, 0x1f, + 0x90, 0xdb, 0x7e, 0xb3, 0xd3, 0x22, 0x35, 0x36, 0x10, 0xe8, 0x19, 0x18, 0xd9, 0x63, 0xff, 0x2b, + 0xe5, 0x59, 0xeb, 0x82, 0x75, 0xa9, 0xb4, 0x34, 0xf5, 0xeb, 0x07, 0xf3, 0xef, 0xbb, 0x7f, 0x30, + 0x3f, 0x72, 0x5b, 0x94, 0x63, 0x85, 0x81, 0x2e, 0xc2, 0xd0, 0x56, 0xb8, 0xb1, 0xdf, 0x26, 0xb3, + 0x05, 0x86, 0x3b, 0x21, 0x70, 0x87, 0x56, 0x6b, 0xb4, 0x14, 0x0b, 0x28, 0xba, 0x0c, 0xa5, 0xb6, + 0x13, 0x44, 0x6e, 0xe4, 0xfa, 0xde, 0x6c, 0xf1, 0x82, 0x75, 0x69, 0x70, 0x69, 0x5a, 0xa0, 0x96, + 0xaa, 0x12, 0x80, 0x63, 0x1c, 0xda, 0x8d, 0x80, 0x38, 0x8d, 0x9b, 0x5e, 0x73, 0x7f, 0x76, 0xe0, + 0x82, 0x75, 0x69, 0x24, 0xee, 0x06, 0x16, 0xe5, 0x58, 0x61, 0xd8, 0x3f, 0x52, 0x80, 0x91, 0xc5, + 0xad, 0x2d, 0xd7, 0x73, 0xa3, 0x7d, 0x74, 0x1b, 0xc6, 0x3c, 0xbf, 0x41, 0xe4, 0x7f, 0xf6, 0x15, + 0xa3, 0xcf, 0x5d, 0x58, 0x48, 0x2f, 0xa5, 0x85, 0x75, 0x0d, 0x6f, 0x69, 0xea, 0xfe, 0xc1, 0xfc, + 0x98, 0x5e, 0x82, 0x0d, 0x3a, 0x08, 0xc3, 0x68, 0xdb, 0x6f, 0x28, 0xb2, 0x05, 0x46, 0x76, 0x3e, + 0x8b, 0x6c, 0x35, 0x46, 0x5b, 0x9a, 0xbc, 0x7f, 0x30, 0x3f, 0xaa, 0x15, 0x60, 0x9d, 0x08, 0xda, + 0x84, 0x49, 0xfa, 0xd7, 0x8b, 0x5c, 0x45, 0xb7, 0xc8, 0xe8, 0x3e, 0x91, 0x47, 0x57, 0x43, 0x5d, + 0x9a, 0xb9, 0x7f, 0x30, 0x3f, 0x99, 0x28, 0xc4, 0x49, 0x82, 0xf6, 0xdb, 0x30, 0xb1, 0x18, 0x45, + 0x4e, 0x7d, 0x87, 0x34, 0xf8, 0x0c, 0xa2, 0x17, 0x60, 0xc0, 0x73, 0x5a, 0x44, 0xcc, 0xef, 0x05, + 0x31, 0xb0, 0x03, 0xeb, 0x4e, 0x8b, 0x1c, 0x1e, 0xcc, 0x4f, 0xdd, 0xf2, 0xdc, 0xb7, 0x3a, 0x62, + 0x55, 0xd0, 0x32, 0xcc, 0xb0, 0xd1, 0x73, 0x00, 0x0d, 0xb2, 0xe7, 0xd6, 0x49, 0xd5, 0x89, 0x76, + 0xc4, 0x7c, 0x23, 0x51, 0x17, 0xca, 0x0a, 0x82, 0x35, 0x2c, 0xfb, 0x1e, 0x94, 0x16, 0xf7, 0x7c, + 0xb7, 0x51, 0xf5, 0x1b, 0x21, 0xda, 0x85, 0xc9, 0x76, 0x40, 0xb6, 0x48, 0xa0, 0x8a, 0x66, 0xad, + 0x0b, 0xc5, 0x4b, 0xa3, 0xcf, 0x5d, 0xca, 0xfc, 0x58, 0x13, 0x75, 0xc5, 0x8b, 0x82, 0xfd, 0xa5, + 0x47, 0x44, 0x7b, 0x93, 0x09, 0x28, 0x4e, 0x52, 0xb6, 0xff, 0x55, 0x01, 0x4e, 0x2f, 0xbe, 0xdd, + 0x09, 0x48, 0xd9, 0x0d, 0x77, 0x93, 0x2b, 0xbc, 0xe1, 0x86, 0xbb, 0xeb, 0xf1, 0x08, 0xa8, 0xa5, + 0x55, 0x16, 0xe5, 0x58, 0x61, 0xa0, 0x67, 0x61, 0x98, 0xfe, 0xbe, 0x85, 0x2b, 0xe2, 0x93, 0x67, + 0x04, 0xf2, 0x68, 0xd9, 0x89, 0x9c, 0x32, 0x07, 0x61, 0x89, 0x83, 0xd6, 0x60, 0xb4, 0xce, 0x36, + 0xe4, 0xf6, 0x9a, 0xdf, 0x20, 0x6c, 0x32, 0x4b, 0x4b, 0x4f, 0x53, 0xf4, 0xe5, 0xb8, 0xf8, 0xf0, + 0x60, 0x7e, 0x96, 0xf7, 0x4d, 0x90, 0xd0, 0x60, 0x58, 0xaf, 0x8f, 0x6c, 0xb5, 0xbf, 0x06, 0x18, + 0x25, 0xc8, 0xd8, 0x5b, 0x97, 0xb4, 0xad, 0x32, 0xc8, 0xb6, 0xca, 0x58, 0xf6, 0x36, 0x41, 0x57, + 0x60, 0x60, 0xd7, 0xf5, 0x1a, 0xb3, 0x43, 0x8c, 0xd6, 0x39, 0x3a, 0xe7, 0xd7, 0x5d, 0xaf, 0x71, + 0x78, 0x30, 0x3f, 0x6d, 0x74, 0x87, 0x16, 0x62, 0x86, 0x6a, 0xff, 0xa9, 0x05, 0xf3, 0x0c, 0xb6, + 0xea, 0x36, 0x49, 0x95, 0x04, 0xa1, 0x1b, 0x46, 0xc4, 0x8b, 0x8c, 0x01, 0x7d, 0x0e, 0x20, 0x24, + 0xf5, 0x80, 0x44, 0xda, 0x90, 0xaa, 0x85, 0x51, 0x53, 0x10, 0xac, 0x61, 0xd1, 0x03, 0x21, 0xdc, + 0x71, 0x02, 0xb6, 0xbe, 0xc4, 0xc0, 0xaa, 0x03, 0xa1, 0x26, 0x01, 0x38, 0xc6, 0x31, 0x0e, 0x84, + 0x62, 0xaf, 0x03, 0x01, 0x7d, 0x0c, 0x26, 0xe3, 0xc6, 0xc2, 0xb6, 0x53, 0x97, 0x03, 0xc8, 0xb6, + 0x4c, 0xcd, 0x04, 0xe1, 0x24, 0xae, 0xfd, 0xf7, 0x2c, 0xb1, 0x78, 0xe8, 0x57, 0xbf, 0xcb, 0xbf, + 0xd5, 0xfe, 0x45, 0x0b, 0x86, 0x97, 0x5c, 0xaf, 0xe1, 0x7a, 0xdb, 0xe8, 0xb3, 0x30, 0x42, 0xef, + 0xa6, 0x86, 0x13, 0x39, 0xe2, 0xdc, 0xfb, 0x90, 0xb6, 0xb7, 0xd4, 0x55, 0xb1, 0xd0, 0xde, 0xdd, + 0xa6, 0x05, 0xe1, 0x02, 0xc5, 0xa6, 0xbb, 0xed, 0xe6, 0xe6, 0x9b, 0xa4, 0x1e, 0xad, 0x91, 0xc8, + 0x89, 0x3f, 0x27, 0x2e, 0xc3, 0x8a, 0x2a, 0xba, 0x0e, 0x43, 0x91, 0x13, 0x6c, 0x93, 0x48, 0x1c, + 0x80, 0x99, 0x07, 0x15, 0xaf, 0x89, 0xe9, 0x8e, 0x24, 0x5e, 0x9d, 0xc4, 0xd7, 0xc2, 0x06, 0xab, + 0x8a, 0x05, 0x09, 0xfb, 0x87, 0x86, 0xe1, 0xec, 0x72, 0xad, 0x92, 0xb3, 0xae, 0x2e, 0xc2, 0x50, + 0x23, 0x70, 0xf7, 0x48, 0x20, 0xc6, 0x59, 0x51, 0x29, 0xb3, 0x52, 0x2c, 0xa0, 0xe8, 0x25, 0x18, + 0xe3, 0x17, 0xd2, 0x35, 0xc7, 0x6b, 0x34, 0xe5, 0x10, 0x9f, 0x12, 0xd8, 0x63, 0xb7, 0x35, 0x18, + 0x36, 0x30, 0x8f, 0xb8, 0xa8, 0x2e, 0x26, 0x36, 0x63, 0xde, 0x65, 0xf7, 0x05, 0x0b, 0xa6, 0x78, + 0x33, 0x8b, 0x51, 0x14, 0xb8, 0x9b, 0x9d, 0x88, 0x84, 0xb3, 0x83, 0xec, 0xa4, 0x5b, 0xce, 0x1a, + 0xad, 0xdc, 0x11, 0x58, 0xb8, 0x9d, 0xa0, 0xc2, 0x0f, 0xc1, 0x59, 0xd1, 0xee, 0x54, 0x12, 0x8c, + 0x53, 0xcd, 0xa2, 0xef, 0xb4, 0x60, 0xae, 0xee, 0x7b, 0x51, 0xe0, 0x37, 0x9b, 0x24, 0xa8, 0x76, + 0x36, 0x9b, 0x6e, 0xb8, 0xc3, 0xd7, 0x29, 0x26, 0x5b, 0xec, 0x24, 0xc8, 0x99, 0x43, 0x85, 0x24, + 0xe6, 0xf0, 0xfc, 0xfd, 0x83, 0xf9, 0xb9, 0xe5, 0x5c, 0x52, 0xb8, 0x4b, 0x33, 0x68, 0x17, 0x10, + 0xbd, 0x4a, 0x6b, 0x91, 0xb3, 0x4d, 0xe2, 0xc6, 0x87, 0xfb, 0x6f, 0xfc, 0xcc, 0xfd, 0x83, 0x79, + 0xb4, 0x9e, 0x22, 0x81, 0x33, 0xc8, 0xa2, 0xb7, 0xe0, 0x14, 0x2d, 0x4d, 0x7d, 0xeb, 0x48, 0xff, + 0xcd, 0xcd, 0xde, 0x3f, 0x98, 0x3f, 0xb5, 0x9e, 0x41, 0x04, 0x67, 0x92, 0x46, 0xdf, 0x61, 0xc1, + 0xd9, 0xf8, 0xf3, 0x57, 0xee, 0xb5, 0x1d, 0xaf, 0x11, 0x37, 0x5c, 0xea, 0xbf, 0x61, 0x7a, 0x26, + 0x9f, 0x5d, 0xce, 0xa3, 0x84, 0xf3, 0x1b, 0x99, 0x5b, 0x86, 0xd3, 0x99, 0xab, 0x05, 0x4d, 0x41, + 0x71, 0x97, 0x70, 0x2e, 0xa8, 0x84, 0xe9, 0x4f, 0x74, 0x0a, 0x06, 0xf7, 0x9c, 0x66, 0x47, 0x6c, + 0x14, 0xcc, 0xff, 0xbc, 0x5c, 0x78, 0xc9, 0xb2, 0xff, 0x75, 0x11, 0x26, 0x97, 0x6b, 0x95, 0x07, + 0xda, 0x85, 0xfa, 0x35, 0x54, 0xe8, 0x7a, 0x0d, 0xc5, 0x97, 0x5a, 0x31, 0xf7, 0x52, 0xfb, 0xcb, + 0x19, 0x5b, 0x68, 0x80, 0x6d, 0xa1, 0x6f, 0xc9, 0xd9, 0x42, 0xc7, 0xbc, 0x71, 0xf6, 0x72, 0x56, + 0xd1, 0x20, 0x9b, 0xcc, 0x4c, 0x8e, 0xe5, 0x86, 0x5f, 0x77, 0x9a, 0xc9, 0xa3, 0xef, 0x88, 0x4b, + 0xe9, 0x78, 0xe6, 0xb1, 0x0e, 0x63, 0xcb, 0x4e, 0xdb, 0xd9, 0x74, 0x9b, 0x6e, 0xe4, 0x92, 0x10, + 0x3d, 0x09, 0x45, 0xa7, 0xd1, 0x60, 0xdc, 0x56, 0x69, 0xe9, 0xf4, 0xfd, 0x83, 0xf9, 0xe2, 0x62, + 0x83, 0x5e, 0xfb, 0xa0, 0xb0, 0xf6, 0x31, 0xc5, 0x40, 0x1f, 0x84, 0x81, 0x46, 0xe0, 0xb7, 0x67, + 0x0b, 0x0c, 0x93, 0xee, 0xba, 0x81, 0x72, 0xe0, 0xb7, 0x13, 0xa8, 0x0c, 0xc7, 0xfe, 0xd5, 0x02, + 0x3c, 0xb6, 0x4c, 0xda, 0x3b, 0xab, 0xb5, 0x9c, 0xf3, 0xfb, 0x12, 0x8c, 0xb4, 0x7c, 0xcf, 0x8d, + 0xfc, 0x20, 0x14, 0x4d, 0xb3, 0x15, 0xb1, 0x26, 0xca, 0xb0, 0x82, 0xa2, 0x0b, 0x30, 0xd0, 0x8e, + 0x99, 0xca, 0x31, 0xc9, 0x90, 0x32, 0x76, 0x92, 0x41, 0x28, 0x46, 0x27, 0x24, 0x81, 0x58, 0x31, + 0x0a, 0xe3, 0x56, 0x48, 0x02, 0xcc, 0x20, 0xf1, 0xcd, 0x4c, 0xef, 0x6c, 0x71, 0x42, 0x27, 0x6e, + 0x66, 0x0a, 0xc1, 0x1a, 0x16, 0xaa, 0x42, 0x29, 0x4c, 0xcc, 0x6c, 0x5f, 0xdb, 0x74, 0x9c, 0x5d, + 0xdd, 0x6a, 0x26, 0x63, 0x22, 0xc6, 0x8d, 0x32, 0xd4, 0xf3, 0xea, 0xfe, 0x4a, 0x01, 0x10, 0x1f, + 0xc2, 0x6f, 0xb0, 0x81, 0xbb, 0x95, 0x1e, 0xb8, 0xfe, 0xb7, 0xc4, 0x71, 0x8d, 0xde, 0x9f, 0x59, + 0xf0, 0xd8, 0xb2, 0xeb, 0x35, 0x48, 0x90, 0xb3, 0x00, 0x1f, 0xce, 0x5b, 0xf6, 0x68, 0x4c, 0x83, + 0xb1, 0xc4, 0x06, 0x8e, 0x61, 0x89, 0xd9, 0x7f, 0x6c, 0x01, 0xe2, 0x9f, 0xfd, 0xae, 0xfb, 0xd8, + 0x5b, 0xe9, 0x8f, 0x3d, 0x86, 0x65, 0x61, 0xdf, 0x80, 0x89, 0xe5, 0xa6, 0x4b, 0xbc, 0xa8, 0x52, + 0x5d, 0xf6, 0xbd, 0x2d, 0x77, 0x1b, 0xbd, 0x0c, 0x13, 0x91, 0xdb, 0x22, 0x7e, 0x27, 0xaa, 0x91, + 0xba, 0xef, 0xb1, 0x97, 0xa4, 0x75, 0x69, 0x70, 0x09, 0xdd, 0x3f, 0x98, 0x9f, 0xd8, 0x30, 0x20, + 0x38, 0x81, 0x69, 0xff, 0x2e, 0x1d, 0x3f, 0xbf, 0xd5, 0xf6, 0x3d, 0xe2, 0x45, 0xcb, 0xbe, 0xd7, + 0xe0, 0x12, 0x87, 0x97, 0x61, 0x20, 0xa2, 0xe3, 0xc1, 0xc7, 0xee, 0xa2, 0xdc, 0x28, 0x74, 0x14, + 0x0e, 0x0f, 0xe6, 0xcf, 0xa4, 0x6b, 0xb0, 0x71, 0x62, 0x75, 0xd0, 0xb7, 0xc0, 0x50, 0x18, 0x39, + 0x51, 0x27, 0x14, 0xa3, 0xf9, 0xb8, 0x1c, 0xcd, 0x1a, 0x2b, 0x3d, 0x3c, 0x98, 0x9f, 0x54, 0xd5, + 0x78, 0x11, 0x16, 0x15, 0xd0, 0x53, 0x30, 0xdc, 0x22, 0x61, 0xe8, 0x6c, 0xcb, 0xdb, 0x70, 0x52, + 0xd4, 0x1d, 0x5e, 0xe3, 0xc5, 0x58, 0xc2, 0xd1, 0x13, 0x30, 0x48, 0x82, 0xc0, 0x0f, 0xc4, 0x1e, + 0x1d, 0x17, 0x88, 0x83, 0x2b, 0xb4, 0x10, 0x73, 0x98, 0xfd, 0x1f, 0x2c, 0x98, 0x54, 0x7d, 0xe5, + 0x6d, 0x9d, 0xc0, 0xab, 0xe0, 0x53, 0x00, 0x75, 0xf9, 0x81, 0x21, 0xbb, 0x3d, 0x46, 0x9f, 0xbb, + 0x98, 0x79, 0x51, 0xa7, 0x86, 0x31, 0xa6, 0xac, 0x8a, 0x42, 0xac, 0x51, 0xb3, 0xff, 0x99, 0x05, + 0x33, 0x89, 0x2f, 0xba, 0xe1, 0x86, 0x11, 0x7a, 0x23, 0xf5, 0x55, 0x0b, 0xfd, 0x7d, 0x15, 0xad, + 0xcd, 0xbe, 0x49, 0x2d, 0x65, 0x59, 0xa2, 0x7d, 0xd1, 0x35, 0x18, 0x74, 0x23, 0xd2, 0x92, 0x1f, + 0xf3, 0x44, 0xd7, 0x8f, 0xe1, 0xbd, 0x8a, 0x67, 0xa4, 0x42, 0x6b, 0x62, 0x4e, 0xc0, 0xfe, 0xd5, + 0x22, 0x94, 0xf8, 0xb2, 0x5d, 0x73, 0xda, 0x27, 0x30, 0x17, 0x4f, 0x43, 0xc9, 0x6d, 0xb5, 0x3a, + 0x91, 0xb3, 0x29, 0x8e, 0xf3, 0x11, 0xbe, 0xb5, 0x2a, 0xb2, 0x10, 0xc7, 0x70, 0x54, 0x81, 0x01, + 0xd6, 0x15, 0xfe, 0x95, 0x4f, 0x66, 0x7f, 0xa5, 0xe8, 0xfb, 0x42, 0xd9, 0x89, 0x1c, 0xce, 0x49, + 0xa9, 0x7b, 0x84, 0x16, 0x61, 0x46, 0x02, 0x39, 0x00, 0x9b, 0xae, 0xe7, 0x04, 0xfb, 0xb4, 0x6c, + 0xb6, 0xc8, 0x08, 0x3e, 0xdb, 0x9d, 0xe0, 0x92, 0xc2, 0xe7, 0x64, 0xd5, 0x87, 0xc5, 0x00, 0xac, + 0x11, 0x9d, 0xfb, 0x08, 0x94, 0x14, 0xf2, 0x51, 0x18, 0xa2, 0xb9, 0x8f, 0xc1, 0x64, 0xa2, 0xad, + 0x5e, 0xd5, 0xc7, 0x74, 0x7e, 0xea, 0x97, 0xd8, 0x91, 0x21, 0x7a, 0xbd, 0xe2, 0xed, 0x89, 0x23, + 0xf7, 0x6d, 0x38, 0xd5, 0xcc, 0x38, 0xc9, 0xc4, 0xbc, 0xf6, 0x7f, 0xf2, 0x3d, 0x26, 0x3e, 0xfb, + 0x54, 0x16, 0x14, 0x67, 0xb6, 0x41, 0x79, 0x04, 0xbf, 0x4d, 0x37, 0x88, 0xd3, 0xd4, 0xd9, 0xed, + 0x9b, 0xa2, 0x0c, 0x2b, 0x28, 0x3d, 0xef, 0x4e, 0xa9, 0xce, 0x5f, 0x27, 0xfb, 0x35, 0xd2, 0x24, + 0xf5, 0xc8, 0x0f, 0xbe, 0xae, 0xdd, 0x3f, 0xc7, 0x47, 0x9f, 0x1f, 0x97, 0xa3, 0x82, 0x40, 0xf1, + 0x3a, 0xd9, 0xe7, 0x53, 0xa1, 0x7f, 0x5d, 0xb1, 0xeb, 0xd7, 0xfd, 0x8c, 0x05, 0xe3, 0xea, 0xeb, + 0x4e, 0xe0, 0x5c, 0x58, 0x32, 0xcf, 0x85, 0x73, 0x5d, 0x17, 0x78, 0xce, 0x89, 0xf0, 0x95, 0x02, + 0x9c, 0x55, 0x38, 0xf4, 0x6d, 0xc0, 0xff, 0x88, 0x55, 0x75, 0x19, 0x4a, 0x9e, 0x92, 0x5a, 0x59, + 0xa6, 0xb8, 0x28, 0x96, 0x59, 0xc5, 0x38, 0x94, 0xc5, 0xf3, 0x62, 0xd1, 0xd2, 0x98, 0x2e, 0xce, + 0x15, 0xa2, 0xdb, 0x25, 0x28, 0x76, 0xdc, 0x86, 0xb8, 0x60, 0x3e, 0x24, 0x47, 0xfb, 0x56, 0xa5, + 0x7c, 0x78, 0x30, 0xff, 0x78, 0x9e, 0x2a, 0x81, 0xde, 0x6c, 0xe1, 0xc2, 0xad, 0x4a, 0x19, 0xd3, + 0xca, 0x68, 0x11, 0x26, 0xa5, 0xb6, 0xe4, 0x36, 0x65, 0xb7, 0x7c, 0x4f, 0xdc, 0x43, 0x4a, 0x26, + 0x8b, 0x4d, 0x30, 0x4e, 0xe2, 0xa3, 0x32, 0x4c, 0xed, 0x76, 0x36, 0x49, 0x93, 0x44, 0xfc, 0x83, + 0xaf, 0x13, 0x2e, 0xb1, 0x2c, 0xc5, 0x2f, 0xb3, 0xeb, 0x09, 0x38, 0x4e, 0xd5, 0xb0, 0xff, 0x82, + 0xdd, 0x07, 0x62, 0xf4, 0xaa, 0x81, 0x4f, 0x17, 0x16, 0xa5, 0xfe, 0xf5, 0x5c, 0xce, 0xfd, 0xac, + 0x8a, 0xeb, 0x64, 0x7f, 0xc3, 0xa7, 0x9c, 0x79, 0xf6, 0xaa, 0x30, 0xd6, 0xfc, 0x40, 0xd7, 0x35, + 0xff, 0x73, 0x05, 0x38, 0xad, 0x46, 0xc0, 0x60, 0x02, 0xbf, 0xd1, 0xc7, 0xe0, 0x0a, 0x8c, 0x36, + 0xc8, 0x96, 0xd3, 0x69, 0x46, 0x4a, 0x7c, 0x3e, 0xc8, 0x55, 0x28, 0xe5, 0xb8, 0x18, 0xeb, 0x38, + 0x47, 0x18, 0xb6, 0xff, 0x3d, 0xca, 0x2e, 0xe2, 0xc8, 0xa1, 0x6b, 0x5c, 0xed, 0x1a, 0x2b, 0x77, + 0xd7, 0x3c, 0x01, 0x83, 0x6e, 0x8b, 0x32, 0x66, 0x05, 0x93, 0xdf, 0xaa, 0xd0, 0x42, 0xcc, 0x61, + 0xe8, 0x03, 0x30, 0x5c, 0xf7, 0x5b, 0x2d, 0xc7, 0x6b, 0xb0, 0x2b, 0xaf, 0xb4, 0x34, 0x4a, 0x79, + 0xb7, 0x65, 0x5e, 0x84, 0x25, 0x0c, 0x3d, 0x06, 0x03, 0x4e, 0xb0, 0xcd, 0x65, 0x18, 0xa5, 0xa5, + 0x11, 0xda, 0xd2, 0x62, 0xb0, 0x1d, 0x62, 0x56, 0x4a, 0x9f, 0x60, 0x77, 0xfd, 0x60, 0xd7, 0xf5, + 0xb6, 0xcb, 0x6e, 0x20, 0xb6, 0x84, 0xba, 0x0b, 0xef, 0x28, 0x08, 0xd6, 0xb0, 0xd0, 0x2a, 0x0c, + 0xb6, 0xfd, 0x20, 0x0a, 0x67, 0x87, 0xd8, 0x70, 0x3f, 0x9e, 0x73, 0x10, 0xf1, 0xaf, 0xad, 0xfa, + 0x41, 0x14, 0x7f, 0x00, 0xfd, 0x17, 0x62, 0x5e, 0x1d, 0xdd, 0x80, 0x61, 0xe2, 0xed, 0xad, 0x06, + 0x7e, 0x6b, 0x76, 0x26, 0x9f, 0xd2, 0x0a, 0x47, 0xe1, 0xcb, 0x2c, 0xe6, 0x51, 0x45, 0x31, 0x96, + 0x24, 0xd0, 0xb7, 0x40, 0x91, 0x78, 0x7b, 0xb3, 0xc3, 0x8c, 0xd2, 0x5c, 0x0e, 0xa5, 0xdb, 0x4e, + 0x10, 0x9f, 0xf9, 0x2b, 0xde, 0x1e, 0xa6, 0x75, 0xd0, 0x27, 0xa1, 0x24, 0x0f, 0x8c, 0x50, 0x08, + 0xeb, 0x32, 0x17, 0xac, 0x3c, 0x66, 0x30, 0x79, 0xab, 0xe3, 0x06, 0xa4, 0x45, 0xbc, 0x28, 0x8c, + 0x4f, 0x48, 0x09, 0x0d, 0x71, 0x4c, 0x0d, 0x7d, 0x52, 0x4a, 0x88, 0xd7, 0xfc, 0x8e, 0x17, 0x85, + 0xb3, 0x25, 0xd6, 0xbd, 0x4c, 0xdd, 0xdd, 0xed, 0x18, 0x2f, 0x29, 0x42, 0xe6, 0x95, 0xb1, 0x41, + 0x0a, 0x7d, 0x1a, 0xc6, 0xf9, 0x7f, 0xae, 0x01, 0x0b, 0x67, 0x4f, 0x33, 0xda, 0x17, 0xf2, 0x69, + 0x73, 0xc4, 0xa5, 0xd3, 0x82, 0xf8, 0xb8, 0x5e, 0x1a, 0x62, 0x93, 0x1a, 0xc2, 0x30, 0xde, 0x74, + 0xf7, 0x88, 0x47, 0xc2, 0xb0, 0x1a, 0xf8, 0x9b, 0x64, 0x16, 0xd8, 0xc0, 0x9c, 0xcd, 0xd6, 0x98, + 0xf9, 0x9b, 0x64, 0x69, 0x9a, 0xd2, 0xbc, 0xa1, 0xd7, 0xc1, 0x26, 0x09, 0x74, 0x0b, 0x26, 0xe8, + 0x8b, 0xcd, 0x8d, 0x89, 0x8e, 0xf6, 0x22, 0xca, 0xde, 0x55, 0xd8, 0xa8, 0x84, 0x13, 0x44, 0xd0, + 0x4d, 0x18, 0x0b, 0x23, 0x27, 0x88, 0x3a, 0x6d, 0x4e, 0xf4, 0x4c, 0x2f, 0xa2, 0x4c, 0xe1, 0x5a, + 0xd3, 0xaa, 0x60, 0x83, 0x00, 0x7a, 0x0d, 0x4a, 0x4d, 0x77, 0x8b, 0xd4, 0xf7, 0xeb, 0x4d, 0x32, + 0x3b, 0xc6, 0xa8, 0x65, 0x1e, 0x2a, 0x37, 0x24, 0x12, 0xe7, 0x73, 0xd5, 0x5f, 0x1c, 0x57, 0x47, + 0xb7, 0xe1, 0x4c, 0x44, 0x82, 0x96, 0xeb, 0x39, 0xf4, 0x30, 0x10, 0x4f, 0x2b, 0xa6, 0xc8, 0x1c, + 0x67, 0xbb, 0xed, 0xbc, 0x98, 0x8d, 0x33, 0x1b, 0x99, 0x58, 0x38, 0xa7, 0x36, 0xba, 0x07, 0xb3, + 0x19, 0x10, 0xbf, 0xe9, 0xd6, 0xf7, 0x67, 0x4f, 0x31, 0xca, 0x1f, 0x15, 0x94, 0x67, 0x37, 0x72, + 0xf0, 0x0e, 0xbb, 0xc0, 0x70, 0x2e, 0x75, 0x74, 0x13, 0x26, 0xd9, 0x09, 0x54, 0xed, 0x34, 0x9b, + 0xa2, 0xc1, 0x09, 0xd6, 0xe0, 0x07, 0xe4, 0x7d, 0x5c, 0x31, 0xc1, 0x87, 0x07, 0xf3, 0x10, 0xff, + 0xc3, 0xc9, 0xda, 0x68, 0x93, 0xe9, 0xcc, 0x3a, 0x81, 0x1b, 0xed, 0xd3, 0x73, 0x83, 0xdc, 0x8b, + 0x66, 0x27, 0xbb, 0xca, 0x2b, 0x74, 0x54, 0xa5, 0x58, 0xd3, 0x0b, 0x71, 0x92, 0x20, 0x3d, 0x52, + 0xc3, 0xa8, 0xe1, 0x7a, 0xb3, 0x53, 0xfc, 0x5d, 0x22, 0x4f, 0xa4, 0x1a, 0x2d, 0xc4, 0x1c, 0xc6, + 0xf4, 0x65, 0xf4, 0xc7, 0x4d, 0x7a, 0x73, 0x4d, 0x33, 0xc4, 0x58, 0x5f, 0x26, 0x01, 0x38, 0xc6, + 0xa1, 0xcc, 0x64, 0x14, 0xed, 0xcf, 0x22, 0x86, 0xaa, 0x0e, 0x96, 0x8d, 0x8d, 0x4f, 0x62, 0x5a, + 0x6e, 0x6f, 0xc2, 0x84, 0x3a, 0x08, 0xd9, 0x98, 0xa0, 0x79, 0x18, 0x64, 0xec, 0x93, 0x90, 0xae, + 0x95, 0x68, 0x17, 0x18, 0x6b, 0x85, 0x79, 0x39, 0xeb, 0x82, 0xfb, 0x36, 0x59, 0xda, 0x8f, 0x08, + 0x7f, 0xd3, 0x17, 0xb5, 0x2e, 0x48, 0x00, 0x8e, 0x71, 0xec, 0xff, 0xc7, 0xd9, 0xd0, 0xf8, 0xb4, + 0xed, 0xe3, 0x7e, 0x79, 0x06, 0x46, 0x76, 0xfc, 0x30, 0xa2, 0xd8, 0xac, 0x8d, 0xc1, 0x98, 0xf1, + 0xbc, 0x26, 0xca, 0xb1, 0xc2, 0x40, 0xaf, 0xc0, 0x78, 0x5d, 0x6f, 0x40, 0x5c, 0x8e, 0xea, 0x18, + 0x31, 0x5a, 0xc7, 0x26, 0x2e, 0x7a, 0x09, 0x46, 0x98, 0x0d, 0x48, 0xdd, 0x6f, 0x0a, 0xae, 0x4d, + 0xde, 0xf0, 0x23, 0x55, 0x51, 0x7e, 0xa8, 0xfd, 0xc6, 0x0a, 0x1b, 0x5d, 0x84, 0x21, 0xda, 0x85, + 0x4a, 0x55, 0x5c, 0x4b, 0x4a, 0x50, 0x74, 0x8d, 0x95, 0x62, 0x01, 0xb5, 0xff, 0x5a, 0x41, 0x1b, + 0x65, 0xfa, 0x1e, 0x26, 0xa8, 0x0a, 0xc3, 0x77, 0x1d, 0x37, 0x72, 0xbd, 0x6d, 0xc1, 0x7f, 0x3c, + 0xd5, 0xf5, 0x8e, 0x62, 0x95, 0xee, 0xf0, 0x0a, 0xfc, 0x16, 0x15, 0x7f, 0xb0, 0x24, 0x43, 0x29, + 0x06, 0x1d, 0xcf, 0xa3, 0x14, 0x0b, 0xfd, 0x52, 0xc4, 0xbc, 0x02, 0xa7, 0x28, 0xfe, 0x60, 0x49, + 0x06, 0xbd, 0x01, 0x20, 0x77, 0x18, 0x69, 0x08, 0xdb, 0x8b, 0x67, 0x7a, 0x13, 0xdd, 0x50, 0x75, + 0x96, 0x26, 0xe8, 0x1d, 0x1d, 0xff, 0xc7, 0x1a, 0x3d, 0x3b, 0x62, 0x7c, 0x5a, 0xba, 0x33, 0xe8, + 0xdb, 0xe8, 0x12, 0x77, 0x82, 0x88, 0x34, 0x16, 0x23, 0x31, 0x38, 0x1f, 0xec, 0xef, 0x91, 0xb2, + 0xe1, 0xb6, 0x88, 0xbe, 0x1d, 0x04, 0x11, 0x1c, 0xd3, 0xb3, 0x7f, 0xa1, 0x08, 0xb3, 0x79, 0xdd, + 0xa5, 0x8b, 0x8e, 0xdc, 0x73, 0xa3, 0x65, 0xca, 0x5e, 0x59, 0xe6, 0xa2, 0x5b, 0x11, 0xe5, 0x58, + 0x61, 0xd0, 0xd9, 0x0f, 0xdd, 0x6d, 0xf9, 0xc6, 0x1c, 0x8c, 0x67, 0xbf, 0xc6, 0x4a, 0xb1, 0x80, + 0x52, 0xbc, 0x80, 0x38, 0xa1, 0x30, 0xee, 0xd1, 0x56, 0x09, 0x66, 0xa5, 0x58, 0x40, 0x75, 0x69, + 0xd7, 0x40, 0x0f, 0x69, 0x97, 0x31, 0x44, 0x83, 0xc7, 0x3b, 0x44, 0xe8, 0x33, 0x00, 0x5b, 0xae, + 0xe7, 0x86, 0x3b, 0x8c, 0xfa, 0xd0, 0x91, 0xa9, 0x2b, 0xe6, 0x6c, 0x55, 0x51, 0xc1, 0x1a, 0x45, + 0xf4, 0x22, 0x8c, 0xaa, 0x0d, 0x58, 0x29, 0x33, 0x4d, 0xa7, 0x66, 0x39, 0x12, 0x9f, 0x46, 0x65, + 0xac, 0xe3, 0xd9, 0x6f, 0x26, 0xd7, 0x8b, 0xd8, 0x01, 0xda, 0xf8, 0x5a, 0xfd, 0x8e, 0x6f, 0xa1, + 0xfb, 0xf8, 0xda, 0x5f, 0x2b, 0xc2, 0xa4, 0xd1, 0x58, 0x27, 0xec, 0xe3, 0xcc, 0xba, 0x4a, 0x0f, + 0x70, 0x27, 0x22, 0x62, 0xff, 0xd9, 0xbd, 0xb7, 0x8a, 0x7e, 0xc8, 0xd3, 0x1d, 0xc0, 0xeb, 0xa3, + 0xcf, 0x40, 0xa9, 0xe9, 0x84, 0x4c, 0x72, 0x46, 0xc4, 0xbe, 0xeb, 0x87, 0x58, 0xfc, 0x30, 0x71, + 0xc2, 0x48, 0xbb, 0x35, 0x39, 0xed, 0x98, 0x24, 0xbd, 0x69, 0x28, 0x7f, 0x22, 0xad, 0xc7, 0x54, + 0x27, 0x28, 0x13, 0xb3, 0x8f, 0x39, 0x0c, 0xbd, 0x04, 0x63, 0x01, 0x61, 0xab, 0x62, 0x99, 0x72, + 0x73, 0x6c, 0x99, 0x0d, 0xc6, 0x6c, 0x1f, 0xd6, 0x60, 0xd8, 0xc0, 0x8c, 0xdf, 0x06, 0x43, 0x5d, + 0xde, 0x06, 0x4f, 0xc1, 0x30, 0xfb, 0xa1, 0x56, 0x80, 0x9a, 0x8d, 0x0a, 0x2f, 0xc6, 0x12, 0x9e, + 0x5c, 0x30, 0x23, 0xfd, 0x2d, 0x18, 0xfa, 0xfa, 0x10, 0x8b, 0x9a, 0x69, 0x99, 0x47, 0xf8, 0x29, + 0x27, 0x96, 0x3c, 0x96, 0x30, 0xfb, 0x83, 0x30, 0x51, 0x76, 0x48, 0xcb, 0xf7, 0x56, 0xbc, 0x46, + 0xdb, 0x77, 0xbd, 0x08, 0xcd, 0xc2, 0x00, 0xbb, 0x44, 0xf8, 0x11, 0x30, 0x40, 0x1b, 0xc2, 0x03, + 0xf4, 0x41, 0x60, 0x6f, 0xc3, 0xe9, 0xb2, 0x7f, 0xd7, 0xbb, 0xeb, 0x04, 0x8d, 0xc5, 0x6a, 0x45, + 0x7b, 0x5f, 0xaf, 0xcb, 0xf7, 0x1d, 0x37, 0xda, 0xca, 0x3c, 0x7a, 0xb5, 0x9a, 0x9c, 0xad, 0x5d, + 0x75, 0x9b, 0x24, 0x47, 0x0a, 0xf2, 0x37, 0x0a, 0x46, 0x4b, 0x31, 0xbe, 0xd2, 0x6a, 0x59, 0xb9, + 0x5a, 0xad, 0xd7, 0x61, 0x64, 0xcb, 0x25, 0xcd, 0x06, 0x26, 0x5b, 0x62, 0x25, 0x3e, 0x99, 0x6f, + 0x87, 0xb2, 0x4a, 0x31, 0xa5, 0xd4, 0x8b, 0xbf, 0x0e, 0x57, 0x45, 0x65, 0xac, 0xc8, 0xa0, 0x5d, + 0x98, 0x92, 0x0f, 0x06, 0x09, 0x15, 0xeb, 0xf2, 0xa9, 0x6e, 0xaf, 0x10, 0x93, 0xf8, 0xa9, 0xfb, + 0x07, 0xf3, 0x53, 0x38, 0x41, 0x06, 0xa7, 0x08, 0xd3, 0xe7, 0x60, 0x8b, 0x9e, 0xc0, 0x03, 0x6c, + 0xf8, 0xd9, 0x73, 0x90, 0xbd, 0x6c, 0x59, 0xa9, 0xfd, 0x63, 0x16, 0x3c, 0x92, 0x1a, 0x19, 0xf1, + 0xc2, 0x3f, 0xe6, 0x59, 0x48, 0xbe, 0xb8, 0x0b, 0xbd, 0x5f, 0xdc, 0xf6, 0xdf, 0xb7, 0xe0, 0xd4, + 0x4a, 0xab, 0x1d, 0xed, 0x97, 0x5d, 0x53, 0x05, 0xf5, 0x11, 0x18, 0x6a, 0x91, 0x86, 0xdb, 0x69, + 0x89, 0x99, 0x9b, 0x97, 0xa7, 0xd4, 0x1a, 0x2b, 0x3d, 0x3c, 0x98, 0x1f, 0xaf, 0x45, 0x7e, 0xe0, + 0x6c, 0x13, 0x5e, 0x80, 0x05, 0x3a, 0x3b, 0xeb, 0xdd, 0xb7, 0xc9, 0x0d, 0xb7, 0xe5, 0x4a, 0xbb, + 0xa2, 0xae, 0x32, 0xbb, 0x05, 0x39, 0xa0, 0x0b, 0xaf, 0x77, 0x1c, 0x2f, 0x72, 0xa3, 0x7d, 0xa1, + 0x3d, 0x92, 0x44, 0x70, 0x4c, 0xcf, 0xfe, 0xaa, 0x05, 0x93, 0x72, 0xdd, 0x2f, 0x36, 0x1a, 0x01, + 0x09, 0x43, 0x34, 0x07, 0x05, 0xb7, 0x2d, 0x7a, 0x09, 0xa2, 0x97, 0x85, 0x4a, 0x15, 0x17, 0xdc, + 0xb6, 0x64, 0xcb, 0xd8, 0x41, 0x58, 0x34, 0x15, 0x69, 0xd7, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x12, + 0x8c, 0x78, 0x7e, 0x83, 0xdb, 0x76, 0xf1, 0x2b, 0x8d, 0x2d, 0xb0, 0x75, 0x51, 0x86, 0x15, 0x14, + 0x55, 0xa1, 0xc4, 0xcd, 0x9e, 0xe2, 0x45, 0xdb, 0x97, 0xf1, 0x14, 0xfb, 0xb2, 0x0d, 0x59, 0x13, + 0xc7, 0x44, 0xec, 0x5f, 0xb1, 0x60, 0x4c, 0x7e, 0x59, 0x9f, 0x3c, 0x27, 0xdd, 0x5a, 0x31, 0xbf, + 0x19, 0x6f, 0x2d, 0xca, 0x33, 0x32, 0x88, 0xc1, 0x2a, 0x16, 0x8f, 0xc4, 0x2a, 0x5e, 0x81, 0x51, + 0xa7, 0xdd, 0xae, 0x9a, 0x7c, 0x26, 0x5b, 0x4a, 0x8b, 0x71, 0x31, 0xd6, 0x71, 0xec, 0x1f, 0x2d, + 0xc0, 0x84, 0xfc, 0x82, 0x5a, 0x67, 0x33, 0x24, 0x11, 0xda, 0x80, 0x92, 0xc3, 0x67, 0x89, 0xc8, + 0x45, 0xfe, 0x44, 0xb6, 0x1c, 0xc1, 0x98, 0xd2, 0xf8, 0xc2, 0x5f, 0x94, 0xb5, 0x71, 0x4c, 0x08, + 0x35, 0x61, 0xda, 0xf3, 0x23, 0x76, 0xf8, 0x2b, 0x78, 0x37, 0xd5, 0x4e, 0x92, 0xfa, 0x59, 0x41, + 0x7d, 0x7a, 0x3d, 0x49, 0x05, 0xa7, 0x09, 0xa3, 0x15, 0x29, 0x9b, 0x29, 0xe6, 0x0b, 0x03, 0xf4, + 0x89, 0xcb, 0x16, 0xcd, 0xd8, 0xbf, 0x6c, 0x41, 0x49, 0xa2, 0x9d, 0x84, 0x16, 0x6f, 0x0d, 0x86, + 0x43, 0x36, 0x09, 0x72, 0x68, 0xec, 0x6e, 0x1d, 0xe7, 0xf3, 0x15, 0xdf, 0x69, 0xfc, 0x7f, 0x88, + 0x25, 0x0d, 0x26, 0x9a, 0x57, 0xdd, 0x7f, 0x97, 0x88, 0xe6, 0x55, 0x7f, 0x72, 0x2e, 0xa5, 0x3f, + 0x64, 0x7d, 0xd6, 0x64, 0x5d, 0x94, 0xf5, 0x6a, 0x07, 0x64, 0xcb, 0xbd, 0x97, 0x64, 0xbd, 0xaa, + 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x03, 0xc6, 0xea, 0x52, 0x26, 0x1b, 0xef, 0xf0, 0x8b, 0x5d, 0xf5, + 0x03, 0x4a, 0x95, 0xc4, 0x65, 0x21, 0xcb, 0x5a, 0x7d, 0x6c, 0x50, 0x33, 0xcd, 0x08, 0x8a, 0xbd, + 0xcc, 0x08, 0x62, 0xba, 0xf9, 0x4a, 0xf5, 0x1f, 0xb7, 0x60, 0x88, 0xcb, 0xe2, 0xfa, 0x13, 0x85, + 0x6a, 0x9a, 0xb5, 0x78, 0xec, 0x6e, 0xd3, 0x42, 0xa1, 0x29, 0x43, 0x6b, 0x50, 0x62, 0x3f, 0x98, + 0x2c, 0xb1, 0x98, 0x6f, 0x75, 0xcf, 0x5b, 0xd5, 0x3b, 0x78, 0x5b, 0x56, 0xc3, 0x31, 0x05, 0xfb, + 0x87, 0x8b, 0xf4, 0x74, 0x8b, 0x51, 0x8d, 0x4b, 0xdf, 0x7a, 0x78, 0x97, 0x7e, 0xe1, 0x61, 0x5d, + 0xfa, 0xdb, 0x30, 0x59, 0xd7, 0xf4, 0x70, 0xf1, 0x4c, 0x5e, 0xea, 0xba, 0x48, 0x34, 0x95, 0x1d, + 0x97, 0xb2, 0x2c, 0x9b, 0x44, 0x70, 0x92, 0x2a, 0xfa, 0x36, 0x18, 0xe3, 0xf3, 0x2c, 0x5a, 0xe1, + 0x96, 0x18, 0x1f, 0xc8, 0x5f, 0x2f, 0x7a, 0x13, 0x5c, 0x2a, 0xa7, 0x55, 0xc7, 0x06, 0x31, 0xfb, + 0x4f, 0x2c, 0x40, 0x2b, 0xed, 0x1d, 0xd2, 0x22, 0x81, 0xd3, 0x8c, 0xc5, 0xe9, 0xdf, 0x6f, 0xc1, + 0x2c, 0x49, 0x15, 0x2f, 0xfb, 0xad, 0x96, 0x78, 0xb4, 0xe4, 0xbc, 0xab, 0x57, 0x72, 0xea, 0x28, + 0xb7, 0x84, 0xd9, 0x3c, 0x0c, 0x9c, 0xdb, 0x1e, 0x5a, 0x83, 0x19, 0x7e, 0x4b, 0x2a, 0x80, 0x66, + 0x7b, 0xfd, 0xa8, 0x20, 0x3c, 0xb3, 0x91, 0x46, 0xc1, 0x59, 0xf5, 0xec, 0xef, 0x1a, 0x83, 0xdc, + 0x5e, 0xbc, 0xa7, 0x47, 0x78, 0x4f, 0x8f, 0xf0, 0x9e, 0x1e, 0xe1, 0x3d, 0x3d, 0xc2, 0x7b, 0x7a, + 0x84, 0x6f, 0x7a, 0x3d, 0xc2, 0x1f, 0x59, 0x30, 0x93, 0xbe, 0x06, 0x4e, 0x82, 0x31, 0xef, 0xc0, + 0x4c, 0xfa, 0xae, 0xeb, 0x6a, 0x67, 0x97, 0xee, 0x67, 0x7c, 0xef, 0x65, 0x7c, 0x03, 0xce, 0xa2, + 0x6f, 0xff, 0x9a, 0x05, 0xa7, 0x15, 0xb2, 0xf1, 0xd2, 0xff, 0x1c, 0xcc, 0xf0, 0xf3, 0x65, 0xb9, + 0xe9, 0xb8, 0xad, 0x0d, 0xd2, 0x6a, 0x37, 0x9d, 0x48, 0x9a, 0x19, 0x5c, 0xc9, 0xdc, 0xaa, 0x09, + 0x13, 0x5d, 0xa3, 0xe2, 0xd2, 0x23, 0xb4, 0x5f, 0x19, 0x00, 0x9c, 0xd5, 0x8c, 0x61, 0x94, 0x5a, + 0xe8, 0x69, 0x26, 0xfc, 0x0b, 0x23, 0x30, 0xb8, 0xb2, 0x47, 0xbc, 0xe8, 0x04, 0x26, 0xaa, 0x0e, + 0x13, 0xae, 0xb7, 0xe7, 0x37, 0xf7, 0x48, 0x83, 0xc3, 0x8f, 0xf2, 0xd0, 0x3f, 0x23, 0x48, 0x4f, + 0x54, 0x0c, 0x12, 0x38, 0x41, 0xf2, 0x61, 0x08, 0xdb, 0xaf, 0xc2, 0x10, 0xbf, 0xe3, 0x84, 0xa4, + 0x3d, 0xf3, 0x4a, 0x63, 0x83, 0x28, 0x6e, 0xee, 0x58, 0x11, 0xc0, 0xef, 0x50, 0x51, 0x1d, 0xbd, + 0x09, 0x13, 0x5b, 0x6e, 0x10, 0x46, 0x1b, 0x6e, 0x8b, 0x84, 0x91, 0xd3, 0x6a, 0x3f, 0x80, 0x70, + 0x5d, 0x8d, 0xc3, 0xaa, 0x41, 0x09, 0x27, 0x28, 0xa3, 0x6d, 0x18, 0x6f, 0x3a, 0x7a, 0x53, 0xc3, + 0x47, 0x6e, 0x4a, 0x5d, 0x9e, 0x37, 0x74, 0x42, 0xd8, 0xa4, 0x4b, 0x4f, 0x9b, 0x3a, 0x93, 0x0f, + 0x8f, 0x30, 0xa9, 0x89, 0x3a, 0x6d, 0xb8, 0x60, 0x98, 0xc3, 0x28, 0x1f, 0xc8, 0xec, 0x87, 0x4b, + 0x26, 0x1f, 0xa8, 0x59, 0x09, 0x7f, 0x16, 0x4a, 0x84, 0x0e, 0x21, 0x25, 0x2c, 0xee, 0xdf, 0xcb, + 0xfd, 0xf5, 0x75, 0xcd, 0xad, 0x07, 0xbe, 0xa9, 0xd6, 0x58, 0x91, 0x94, 0x70, 0x4c, 0x14, 0x2d, + 0xc3, 0x50, 0x48, 0x02, 0x97, 0x84, 0xe2, 0x26, 0xee, 0x32, 0x8d, 0x0c, 0x8d, 0xbb, 0xde, 0xf0, + 0xdf, 0x58, 0x54, 0xa5, 0xcb, 0xcb, 0x61, 0x12, 0x5f, 0x76, 0x57, 0x6a, 0xcb, 0x6b, 0x91, 0x95, + 0x62, 0x01, 0x45, 0xaf, 0xc1, 0x70, 0x40, 0x9a, 0x4c, 0x6f, 0x36, 0xde, 0xff, 0x22, 0xe7, 0x6a, + 0x38, 0x5e, 0x0f, 0x4b, 0x02, 0xe8, 0x3a, 0xa0, 0x80, 0x50, 0x3e, 0xd2, 0xf5, 0xb6, 0x95, 0x55, + 0xad, 0xb8, 0x87, 0xd4, 0xb9, 0x85, 0x63, 0x0c, 0xe9, 0x05, 0x85, 0x33, 0xaa, 0xa1, 0xab, 0x30, + 0xad, 0x4a, 0x2b, 0x5e, 0x18, 0x39, 0xf4, 0xfc, 0x9f, 0x64, 0xb4, 0x94, 0x18, 0x07, 0x27, 0x11, + 0x70, 0xba, 0x8e, 0xfd, 0x25, 0x0b, 0xf8, 0x38, 0x9f, 0x80, 0xf0, 0xe2, 0x55, 0x53, 0x78, 0x71, + 0x36, 0x77, 0xe6, 0x72, 0x04, 0x17, 0x5f, 0xb2, 0x60, 0x54, 0x9b, 0xd9, 0x78, 0xcd, 0x5a, 0x5d, + 0xd6, 0x6c, 0x07, 0xa6, 0xe8, 0x4a, 0xbf, 0xb9, 0x19, 0x92, 0x60, 0x8f, 0x34, 0xd8, 0xc2, 0x2c, + 0x3c, 0xd8, 0xc2, 0x54, 0x16, 0x7c, 0x37, 0x12, 0x04, 0x71, 0xaa, 0x09, 0xfb, 0xb3, 0xb2, 0xab, + 0xca, 0xe0, 0xb1, 0xae, 0xe6, 0x3c, 0x61, 0xf0, 0xa8, 0x66, 0x15, 0xc7, 0x38, 0x74, 0xab, 0xed, + 0xf8, 0x61, 0x94, 0x34, 0x78, 0xbc, 0xe6, 0x87, 0x11, 0x66, 0x10, 0xfb, 0x79, 0x80, 0x95, 0x7b, + 0xa4, 0xce, 0x57, 0xac, 0xfe, 0xb6, 0xb2, 0xf2, 0xdf, 0x56, 0xf6, 0x6f, 0x59, 0x30, 0xb1, 0xba, + 0x6c, 0xdc, 0x73, 0x0b, 0x00, 0xfc, 0x41, 0x78, 0xe7, 0xce, 0xba, 0xb4, 0x16, 0xe0, 0x0a, 0x5f, + 0x55, 0x8a, 0x35, 0x0c, 0x74, 0x16, 0x8a, 0xcd, 0x8e, 0x27, 0xa4, 0xab, 0xc3, 0x94, 0x7b, 0xb8, + 0xd1, 0xf1, 0x30, 0x2d, 0xd3, 0x3c, 0x2e, 0x8a, 0x7d, 0x7b, 0x5c, 0xf4, 0x8c, 0x7c, 0x80, 0xe6, + 0x61, 0xf0, 0xee, 0x5d, 0xb7, 0xc1, 0xfd, 0x4b, 0x85, 0x25, 0xc3, 0x9d, 0x3b, 0x95, 0x72, 0x88, + 0x79, 0xb9, 0xfd, 0xc5, 0x22, 0xcc, 0xad, 0x36, 0xc9, 0xbd, 0x77, 0xe8, 0x63, 0xdb, 0xaf, 0xbf, + 0xc8, 0xd1, 0xe4, 0x54, 0x47, 0xf5, 0x09, 0xea, 0x3d, 0x1e, 0x5b, 0x30, 0xcc, 0xed, 0xfd, 0xa4, + 0xc7, 0xed, 0x2b, 0x59, 0xad, 0xe7, 0x0f, 0xc8, 0x02, 0xb7, 0x1b, 0x14, 0x0e, 0x83, 0xea, 0xc2, + 0x14, 0xa5, 0x58, 0x12, 0x9f, 0x7b, 0x19, 0xc6, 0x74, 0xcc, 0x23, 0x79, 0xe7, 0xfd, 0x95, 0x22, + 0x4c, 0xd1, 0x1e, 0x3c, 0xd4, 0x89, 0xb8, 0x95, 0x9e, 0x88, 0xe3, 0xf6, 0xd0, 0xea, 0x3d, 0x1b, + 0x6f, 0x24, 0x67, 0xe3, 0x4a, 0xde, 0x6c, 0x9c, 0xf4, 0x1c, 0x7c, 0xa7, 0x05, 0x33, 0xab, 0x4d, + 0xbf, 0xbe, 0x9b, 0xf0, 0xa2, 0x7a, 0x11, 0x46, 0xe9, 0x71, 0x1c, 0x1a, 0x0e, 0xfe, 0x46, 0xc8, + 0x07, 0x01, 0xc2, 0x3a, 0x9e, 0x56, 0xed, 0xd6, 0xad, 0x4a, 0x39, 0x2b, 0x52, 0x84, 0x00, 0x61, + 0x1d, 0xcf, 0xfe, 0x0d, 0x0b, 0xce, 0x5d, 0x5d, 0x5e, 0x89, 0x97, 0x62, 0x2a, 0x58, 0xc5, 0x45, + 0x18, 0x6a, 0x37, 0xb4, 0xae, 0xc4, 0xd2, 0xe7, 0x32, 0xeb, 0x85, 0x80, 0xbe, 0x5b, 0x02, 0xb1, + 0xfc, 0xb4, 0x05, 0x33, 0x57, 0xdd, 0x88, 0xde, 0xae, 0xc9, 0xb0, 0x09, 0xf4, 0x7a, 0x0d, 0xdd, + 0xc8, 0x0f, 0xf6, 0x93, 0x61, 0x13, 0xb0, 0x82, 0x60, 0x0d, 0x8b, 0xb7, 0xbc, 0xe7, 0x32, 0x4b, + 0xf3, 0x82, 0xa9, 0x87, 0xc3, 0xa2, 0x1c, 0x2b, 0x0c, 0xfa, 0x61, 0x0d, 0x37, 0x60, 0x22, 0xcc, + 0x7d, 0x71, 0xc2, 0xaa, 0x0f, 0x2b, 0x4b, 0x00, 0x8e, 0x71, 0xe8, 0x6b, 0x6e, 0xfe, 0x6a, 0xb3, + 0x13, 0x46, 0x24, 0xd8, 0x0a, 0x73, 0x4e, 0xc7, 0xe7, 0xa1, 0x44, 0xa4, 0xc2, 0x40, 0xf4, 0x5a, + 0x71, 0x8c, 0x4a, 0x93, 0xc0, 0xa3, 0x37, 0x28, 0xbc, 0x3e, 0x7c, 0x32, 0x8f, 0xe6, 0x54, 0xb7, + 0x0a, 0x88, 0xe8, 0x6d, 0xe9, 0xe1, 0x2c, 0x98, 0x5f, 0xfc, 0x4a, 0x0a, 0x8a, 0x33, 0x6a, 0xd8, + 0x3f, 0x66, 0xc1, 0x69, 0xf5, 0xc1, 0xef, 0xba, 0xcf, 0xb4, 0x7f, 0xb6, 0x00, 0xe3, 0xd7, 0x36, + 0x36, 0xaa, 0x57, 0x49, 0x24, 0xae, 0xed, 0xde, 0x66, 0x00, 0x58, 0xd3, 0x66, 0x76, 0x7b, 0xcc, + 0x75, 0x22, 0xb7, 0xb9, 0xc0, 0xa3, 0x22, 0x2d, 0x54, 0xbc, 0xe8, 0x66, 0x50, 0x8b, 0x02, 0xd7, + 0xdb, 0xce, 0xd4, 0x7f, 0x4a, 0xe6, 0xa2, 0x98, 0xc7, 0x5c, 0xa0, 0xe7, 0x61, 0x88, 0x85, 0x65, + 0x92, 0x93, 0xf0, 0xa8, 0x7a, 0x0b, 0xb1, 0xd2, 0xc3, 0x83, 0xf9, 0xd2, 0x2d, 0x5c, 0xe1, 0x7f, + 0xb0, 0x40, 0x45, 0xb7, 0x60, 0x74, 0x27, 0x8a, 0xda, 0xd7, 0x88, 0xd3, 0xa0, 0x4f, 0x77, 0x7e, + 0x1c, 0x9e, 0xcf, 0x3a, 0x0e, 0xe9, 0x20, 0x70, 0xb4, 0xf8, 0x04, 0x89, 0xcb, 0x42, 0xac, 0xd3, + 0xb1, 0x6b, 0x00, 0x31, 0xec, 0x98, 0x14, 0x39, 0xf6, 0x1f, 0x58, 0x30, 0xcc, 0x23, 0x64, 0x04, + 0xe8, 0xa3, 0x30, 0x40, 0xee, 0x91, 0xba, 0xe0, 0x78, 0x33, 0x3b, 0x1c, 0x73, 0x5a, 0x5c, 0x20, + 0x4d, 0xff, 0x63, 0x56, 0x0b, 0x5d, 0x83, 0x61, 0xda, 0xdb, 0xab, 0x2a, 0x5c, 0xc8, 0xe3, 0x79, + 0x5f, 0xac, 0xa6, 0x9d, 0x33, 0x67, 0xa2, 0x08, 0xcb, 0xea, 0x4c, 0x7b, 0x5e, 0x6f, 0xd7, 0xe8, + 0x89, 0x1d, 0x75, 0x63, 0x2c, 0x36, 0x96, 0xab, 0x1c, 0x49, 0x50, 0xe3, 0xda, 0x73, 0x59, 0x88, + 0x63, 0x22, 0xf6, 0x06, 0x94, 0xe8, 0xa4, 0x2e, 0x36, 0x5d, 0xa7, 0xbb, 0x41, 0xc0, 0xd3, 0x50, + 0x92, 0xea, 0xfe, 0x50, 0x78, 0xc6, 0x33, 0xaa, 0xd2, 0x1a, 0x20, 0xc4, 0x31, 0xdc, 0xde, 0x82, + 0x53, 0xcc, 0x78, 0xd3, 0x89, 0x76, 0x8c, 0x3d, 0xd6, 0x7b, 0x31, 0x3f, 0x23, 0x1e, 0x90, 0x7c, + 0x66, 0x66, 0x35, 0xe7, 0xd3, 0x31, 0x49, 0x31, 0x7e, 0x4c, 0xda, 0x5f, 0x1b, 0x80, 0x47, 0x2b, + 0xb5, 0xfc, 0xe0, 0x29, 0x2f, 0xc1, 0x18, 0xe7, 0x4b, 0xe9, 0xd2, 0x76, 0x9a, 0xa2, 0x5d, 0x25, + 0x89, 0xde, 0xd0, 0x60, 0xd8, 0xc0, 0x44, 0xe7, 0xa0, 0xe8, 0xbe, 0xe5, 0x25, 0x5d, 0xb3, 0x2a, + 0xaf, 0xaf, 0x63, 0x5a, 0x4e, 0xc1, 0x94, 0xc5, 0xe5, 0x77, 0x87, 0x02, 0x2b, 0x36, 0xf7, 0x55, + 0x98, 0x70, 0xc3, 0x7a, 0xe8, 0x56, 0x3c, 0x7a, 0xce, 0x68, 0x27, 0x95, 0x12, 0x6e, 0xd0, 0x4e, + 0x2b, 0x28, 0x4e, 0x60, 0x6b, 0x17, 0xd9, 0x60, 0xdf, 0x6c, 0x72, 0x4f, 0x57, 0x71, 0xfa, 0x02, + 0x68, 0xb3, 0xaf, 0x0b, 0x99, 0x4a, 0x41, 0xbc, 0x00, 0xf8, 0x07, 0x87, 0x58, 0xc2, 0xe8, 0xcb, + 0xb1, 0xbe, 0xe3, 0xb4, 0x17, 0x3b, 0xd1, 0x4e, 0xd9, 0x0d, 0xeb, 0xfe, 0x1e, 0x09, 0xf6, 0xd9, + 0xa3, 0x7f, 0x24, 0x7e, 0x39, 0x2a, 0xc0, 0xf2, 0xb5, 0xc5, 0x2a, 0xc5, 0xc4, 0xe9, 0x3a, 0x68, + 0x11, 0x26, 0x65, 0x61, 0x8d, 0x84, 0xec, 0x0a, 0x1b, 0x65, 0x64, 0x94, 0xb3, 0x94, 0x28, 0x56, + 0x44, 0x92, 0xf8, 0x26, 0x27, 0x0d, 0xc7, 0xc1, 0x49, 0x7f, 0x04, 0xc6, 0x5d, 0xcf, 0x8d, 0x5c, + 0x27, 0xf2, 0xb9, 0x3e, 0x8c, 0xbf, 0xef, 0x99, 0xa0, 0xbf, 0xa2, 0x03, 0xb0, 0x89, 0x67, 0xff, + 0xb7, 0x01, 0x98, 0x66, 0xd3, 0xf6, 0xde, 0x0a, 0xfb, 0x66, 0x5a, 0x61, 0xb7, 0xd2, 0x2b, 0xec, + 0x38, 0x9e, 0x08, 0x0f, 0xbc, 0xcc, 0xde, 0x84, 0x92, 0xf2, 0x0f, 0x93, 0x0e, 0xa2, 0x56, 0x8e, + 0x83, 0x68, 0x6f, 0xee, 0x43, 0x9a, 0xd8, 0x15, 0x33, 0x4d, 0xec, 0xfe, 0x96, 0x05, 0xb1, 0x82, + 0x07, 0x5d, 0x83, 0x52, 0xdb, 0x67, 0x96, 0xa3, 0x81, 0x34, 0xc7, 0x7e, 0x34, 0xf3, 0xa2, 0xe2, + 0x97, 0x22, 0xff, 0xf8, 0xaa, 0xac, 0x81, 0xe3, 0xca, 0x68, 0x09, 0x86, 0xdb, 0x01, 0xa9, 0x45, + 0x2c, 0x86, 0x4a, 0x4f, 0x3a, 0x7c, 0x8d, 0x70, 0x7c, 0x2c, 0x2b, 0xda, 0x3f, 0x67, 0x01, 0x70, + 0x2b, 0x36, 0xc7, 0xdb, 0x26, 0x27, 0x20, 0xb5, 0x2e, 0xc3, 0x40, 0xd8, 0x26, 0xf5, 0x6e, 0x36, + 0xbd, 0x71, 0x7f, 0x6a, 0x6d, 0x52, 0x8f, 0x07, 0x9c, 0xfe, 0xc3, 0xac, 0xb6, 0xfd, 0xdd, 0x00, + 0x13, 0x31, 0x5a, 0x25, 0x22, 0x2d, 0xf4, 0xac, 0x11, 0x53, 0xe1, 0x6c, 0x22, 0xa6, 0x42, 0x89, + 0x61, 0x6b, 0x02, 0xd2, 0x37, 0xa1, 0xd8, 0x72, 0xee, 0x09, 0x09, 0xd8, 0xd3, 0xdd, 0xbb, 0x41, + 0xe9, 0x2f, 0xac, 0x39, 0xf7, 0xf8, 0x23, 0xf1, 0x69, 0xb9, 0x40, 0xd6, 0x9c, 0x7b, 0x87, 0xdc, + 0x72, 0x97, 0x1d, 0x52, 0x37, 0xdc, 0x30, 0xfa, 0xfc, 0x7f, 0x8d, 0xff, 0xb3, 0x65, 0x47, 0x1b, + 0x61, 0x6d, 0xb9, 0x9e, 0x30, 0xd0, 0xea, 0xab, 0x2d, 0xd7, 0x4b, 0xb6, 0xe5, 0x7a, 0x7d, 0xb4, + 0xe5, 0x7a, 0xe8, 0x6d, 0x18, 0x16, 0xf6, 0x93, 0x22, 0x86, 0xd1, 0xe5, 0x3e, 0xda, 0x13, 0xe6, + 0x97, 0xbc, 0xcd, 0xcb, 0xf2, 0x11, 0x2c, 0x4a, 0x7b, 0xb6, 0x2b, 0x1b, 0x44, 0x7f, 0xdd, 0x82, + 0x09, 0xf1, 0x1b, 0x93, 0xb7, 0x3a, 0x24, 0x8c, 0x04, 0xef, 0xf9, 0xe1, 0xfe, 0xfb, 0x20, 0x2a, + 0xf2, 0xae, 0x7c, 0x58, 0x1e, 0xb3, 0x26, 0xb0, 0x67, 0x8f, 0x12, 0xbd, 0x40, 0xff, 0xd0, 0x82, + 0x53, 0x2d, 0xe7, 0x1e, 0x6f, 0x91, 0x97, 0x61, 0x27, 0x72, 0x7d, 0x61, 0x87, 0xf0, 0xd1, 0xfe, + 0xa6, 0x3f, 0x55, 0x9d, 0x77, 0x52, 0x2a, 0x4b, 0x4f, 0x65, 0xa1, 0xf4, 0xec, 0x6a, 0x66, 0xbf, + 0xe6, 0xb6, 0x60, 0x44, 0xae, 0xb7, 0x0c, 0x51, 0x43, 0x59, 0x67, 0xac, 0x8f, 0x6c, 0xbe, 0xaa, + 0xc7, 0x2a, 0xa0, 0xed, 0x88, 0xb5, 0xf6, 0x50, 0xdb, 0x79, 0x13, 0xc6, 0xf4, 0x35, 0xf6, 0x50, + 0xdb, 0x7a, 0x0b, 0x66, 0x32, 0xd6, 0xd2, 0x43, 0x6d, 0xf2, 0x2e, 0x9c, 0xcd, 0x5d, 0x1f, 0x0f, + 0xb3, 0x61, 0xfb, 0x67, 0x2d, 0xfd, 0x1c, 0x3c, 0x01, 0xd5, 0xc1, 0xb2, 0xa9, 0x3a, 0x38, 0xdf, + 0x7d, 0xe7, 0xe4, 0xe8, 0x0f, 0xde, 0xd0, 0x3b, 0x4d, 0x4f, 0x75, 0xf4, 0x1a, 0x0c, 0x35, 0x69, + 0x89, 0xb4, 0xc2, 0xb5, 0x7b, 0xef, 0xc8, 0x98, 0x97, 0x62, 0xe5, 0x21, 0x16, 0x14, 0xec, 0x5f, + 0xb4, 0x60, 0xe0, 0x04, 0x46, 0x02, 0x9b, 0x23, 0xf1, 0x6c, 0x2e, 0x69, 0x11, 0x5e, 0x79, 0x01, + 0x3b, 0x77, 0x57, 0xee, 0x45, 0xc4, 0x0b, 0xd9, 0x53, 0x31, 0x73, 0x60, 0x7e, 0xd2, 0x82, 0x99, + 0x1b, 0xbe, 0xd3, 0x58, 0x72, 0x9a, 0x8e, 0x57, 0x27, 0x41, 0xc5, 0xdb, 0x3e, 0x92, 0x09, 0x79, + 0xa1, 0xa7, 0x09, 0xf9, 0xb2, 0xb4, 0xc0, 0x1a, 0xc8, 0x9f, 0x3f, 0xca, 0x48, 0x26, 0xa3, 0xcc, + 0x18, 0xb6, 0xc2, 0x3b, 0x80, 0xf4, 0x5e, 0x0a, 0x87, 0x1e, 0x0c, 0xc3, 0x2e, 0xef, 0xaf, 0x98, + 0xc4, 0x27, 0xb3, 0x19, 0xbc, 0xd4, 0xe7, 0x69, 0xae, 0x2a, 0xbc, 0x00, 0x4b, 0x42, 0xf6, 0x4b, + 0x90, 0x19, 0x15, 0xa0, 0xb7, 0xf0, 0xc1, 0xfe, 0x24, 0x4c, 0xb3, 0x9a, 0x47, 0x7c, 0x18, 0xdb, + 0x09, 0xd9, 0x66, 0x46, 0xbc, 0x40, 0xfb, 0x0b, 0x16, 0x4c, 0xae, 0x27, 0xc2, 0xa8, 0x5d, 0x64, + 0xda, 0xd0, 0x0c, 0x91, 0x7a, 0x8d, 0x95, 0x62, 0x01, 0x3d, 0x76, 0x49, 0xd6, 0x5f, 0x58, 0x10, + 0x07, 0xea, 0x38, 0x01, 0xf6, 0x6d, 0xd9, 0x60, 0xdf, 0x32, 0x25, 0x2c, 0xaa, 0x3b, 0x79, 0xdc, + 0x1b, 0xba, 0xae, 0x42, 0x58, 0x75, 0x11, 0xae, 0xc4, 0x64, 0xf8, 0x52, 0x9c, 0x30, 0xe3, 0x5c, + 0xc9, 0xa0, 0x56, 0xf6, 0x6f, 0x17, 0x00, 0x29, 0xdc, 0xbe, 0x43, 0x6c, 0xa5, 0x6b, 0x1c, 0x4f, + 0x88, 0xad, 0x3d, 0x40, 0x4c, 0x9f, 0x1f, 0x38, 0x5e, 0xc8, 0xc9, 0xba, 0x42, 0x76, 0x77, 0x34, + 0x63, 0x81, 0x39, 0xd1, 0x24, 0xba, 0x91, 0xa2, 0x86, 0x33, 0x5a, 0xd0, 0xec, 0x34, 0x06, 0xfb, + 0xb5, 0xd3, 0x18, 0xea, 0xe1, 0xb4, 0xf7, 0x33, 0x16, 0x8c, 0xab, 0x61, 0x7a, 0x97, 0x98, 0xd4, + 0xab, 0xfe, 0xe4, 0x1c, 0xa0, 0x55, 0xad, 0xcb, 0xec, 0x62, 0xf9, 0x56, 0xe6, 0x7c, 0xe9, 0x34, + 0xdd, 0xb7, 0x89, 0x0a, 0x70, 0x38, 0x2f, 0x9c, 0x29, 0x45, 0xe9, 0xe1, 0xc1, 0xfc, 0xb8, 0xfa, + 0xc7, 0x03, 0x2a, 0xc7, 0x55, 0xe8, 0x91, 0x3c, 0x99, 0x58, 0x8a, 0xe8, 0x45, 0x18, 0x6c, 0xef, + 0x38, 0x21, 0x49, 0xb8, 0x1e, 0x0d, 0x56, 0x69, 0xe1, 0xe1, 0xc1, 0xfc, 0x84, 0xaa, 0xc0, 0x4a, + 0x30, 0xc7, 0xee, 0x3f, 0x70, 0x59, 0x7a, 0x71, 0xf6, 0x0c, 0x5c, 0xf6, 0x27, 0x16, 0x0c, 0xac, + 0xfb, 0x8d, 0x93, 0x38, 0x02, 0x5e, 0x35, 0x8e, 0x80, 0xc7, 0xf2, 0x62, 0xdd, 0xe7, 0xee, 0xfe, + 0xd5, 0xc4, 0xee, 0x3f, 0x9f, 0x4b, 0xa1, 0xfb, 0xc6, 0x6f, 0xc1, 0x28, 0x8b, 0xa0, 0x2f, 0xdc, + 0xac, 0x9e, 0x37, 0x36, 0xfc, 0x7c, 0x62, 0xc3, 0x4f, 0x6a, 0xa8, 0xda, 0x4e, 0x7f, 0x0a, 0x86, + 0x85, 0xdf, 0x4e, 0xd2, 0x87, 0x55, 0xe0, 0x62, 0x09, 0xb7, 0x7f, 0xbc, 0x08, 0x46, 0xc4, 0x7e, + 0xf4, 0xcb, 0x16, 0x2c, 0x04, 0xdc, 0x9e, 0xb7, 0x51, 0xee, 0x04, 0xae, 0xb7, 0x5d, 0xab, 0xef, + 0x90, 0x46, 0xa7, 0xe9, 0x7a, 0xdb, 0x95, 0x6d, 0xcf, 0x57, 0xc5, 0x2b, 0xf7, 0x48, 0xbd, 0xc3, + 0x94, 0x60, 0x3d, 0xd2, 0x03, 0x28, 0xbb, 0xf8, 0xe7, 0xee, 0x1f, 0xcc, 0x2f, 0xe0, 0x23, 0xd1, + 0xc6, 0x47, 0xec, 0x0b, 0xfa, 0x0d, 0x0b, 0x2e, 0xf3, 0x40, 0xf6, 0xfd, 0xf7, 0xbf, 0xcb, 0x6b, + 0xb9, 0x2a, 0x49, 0xc5, 0x44, 0x36, 0x48, 0xd0, 0x5a, 0xfa, 0x88, 0x18, 0xd0, 0xcb, 0xd5, 0xa3, + 0xb5, 0x85, 0x8f, 0xda, 0x39, 0xfb, 0x5f, 0x14, 0x61, 0x5c, 0x04, 0xb8, 0x12, 0x77, 0xc0, 0x8b, + 0xc6, 0x92, 0x78, 0x3c, 0xb1, 0x24, 0xa6, 0x0d, 0xe4, 0xe3, 0x39, 0xfe, 0x43, 0x98, 0xa6, 0x87, + 0xf3, 0x35, 0xe2, 0x04, 0xd1, 0x26, 0x71, 0xb8, 0xf9, 0x55, 0xf1, 0xc8, 0xa7, 0xbf, 0x12, 0xcf, + 0xdd, 0x48, 0x12, 0xc3, 0x69, 0xfa, 0xdf, 0x4c, 0x77, 0x8e, 0x07, 0x53, 0xa9, 0x18, 0x65, 0x9f, + 0x82, 0x92, 0x72, 0x3a, 0x11, 0x87, 0x4e, 0xf7, 0x50, 0x7f, 0x49, 0x0a, 0x5c, 0x84, 0x16, 0x3b, + 0x3c, 0xc5, 0xe4, 0xec, 0x7f, 0x54, 0x30, 0x1a, 0xe4, 0x93, 0xb8, 0x0e, 0x23, 0x4e, 0x18, 0xba, + 0xdb, 0x1e, 0x69, 0x88, 0x1d, 0xfb, 0xfe, 0xbc, 0x1d, 0x6b, 0x34, 0xc3, 0x1c, 0x7f, 0x16, 0x45, + 0x4d, 0xac, 0x68, 0xa0, 0x6b, 0xdc, 0xc8, 0x6d, 0x4f, 0xbe, 0xf7, 0xfa, 0xa3, 0x06, 0xd2, 0x0c, + 0x6e, 0x8f, 0x60, 0x51, 0x1f, 0x7d, 0x9a, 0x5b, 0x21, 0x5e, 0xf7, 0xfc, 0xbb, 0xde, 0x55, 0xdf, + 0x97, 0x41, 0x24, 0xfa, 0x23, 0x38, 0x2d, 0x6d, 0x0f, 0x55, 0x75, 0x6c, 0x52, 0xeb, 0x2f, 0xe8, + 0xe7, 0xe7, 0x60, 0x86, 0x92, 0x36, 0x7d, 0xbc, 0x43, 0x44, 0x60, 0x52, 0x44, 0x4f, 0x93, 0x65, + 0x62, 0xec, 0x32, 0x9f, 0x72, 0x66, 0xed, 0x58, 0x8e, 0x7c, 0xdd, 0x24, 0x81, 0x93, 0x34, 0xed, + 0x9f, 0xb2, 0x80, 0xf9, 0xbb, 0x9e, 0x00, 0x3f, 0xf2, 0x31, 0x93, 0x1f, 0x99, 0xcd, 0x1b, 0xe4, + 0x1c, 0x56, 0xe4, 0x05, 0xbe, 0xb2, 0xaa, 0x81, 0x7f, 0x6f, 0x5f, 0x98, 0x8e, 0xf4, 0x7e, 0x7f, + 0xd8, 0xff, 0xd7, 0xe2, 0x87, 0x98, 0x72, 0x09, 0x41, 0xdf, 0x0e, 0x23, 0x75, 0xa7, 0xed, 0xd4, + 0x79, 0x7a, 0x99, 0x5c, 0x89, 0x9e, 0x51, 0x69, 0x61, 0x59, 0xd4, 0xe0, 0x12, 0x2a, 0x19, 0x85, + 0x6f, 0x44, 0x16, 0xf7, 0x94, 0x4a, 0xa9, 0x26, 0xe7, 0x76, 0x61, 0xdc, 0x20, 0xf6, 0x50, 0xc5, + 0x19, 0xdf, 0xce, 0xaf, 0x58, 0x15, 0x35, 0xb2, 0x05, 0xd3, 0x9e, 0xf6, 0x9f, 0x5e, 0x28, 0xf2, + 0x71, 0xf9, 0xfe, 0x5e, 0x97, 0x28, 0xbb, 0x7d, 0x34, 0x57, 0xda, 0x04, 0x19, 0x9c, 0xa6, 0x6c, + 0xff, 0x84, 0x05, 0x8f, 0xe8, 0x88, 0x9a, 0xb7, 0x4e, 0x2f, 0x1d, 0x41, 0x19, 0x46, 0xfc, 0x36, + 0x09, 0x9c, 0xc8, 0x0f, 0xc4, 0xad, 0x71, 0x49, 0x0e, 0xfa, 0x4d, 0x51, 0x7e, 0x28, 0x82, 0xb3, + 0x4b, 0xea, 0xb2, 0x1c, 0xab, 0x9a, 0xf4, 0xf5, 0xc9, 0x06, 0x23, 0x14, 0x7e, 0x59, 0xec, 0x0c, + 0x60, 0xea, 0xf2, 0x10, 0x0b, 0x88, 0xfd, 0x35, 0x8b, 0x2f, 0x2c, 0xbd, 0xeb, 0xe8, 0x2d, 0x98, + 0x6a, 0x39, 0x51, 0x7d, 0x67, 0xe5, 0x5e, 0x3b, 0xe0, 0x1a, 0x17, 0x39, 0x4e, 0x4f, 0xf7, 0x1a, + 0x27, 0xed, 0x23, 0x63, 0xc3, 0xca, 0xb5, 0x04, 0x31, 0x9c, 0x22, 0x8f, 0x36, 0x61, 0x94, 0x95, + 0x31, 0x97, 0xc3, 0xb0, 0x1b, 0x6b, 0x90, 0xd7, 0x9a, 0xb2, 0x38, 0x58, 0x8b, 0xe9, 0x60, 0x9d, + 0xa8, 0xfd, 0xe5, 0x22, 0xdf, 0xed, 0x8c, 0x95, 0x7f, 0x0a, 0x86, 0xdb, 0x7e, 0x63, 0xb9, 0x52, + 0xc6, 0x62, 0x16, 0xd4, 0x35, 0x52, 0xe5, 0xc5, 0x58, 0xc2, 0xd1, 0x25, 0x18, 0x11, 0x3f, 0xa5, + 0x86, 0x8c, 0x9d, 0xcd, 0x02, 0x2f, 0xc4, 0x0a, 0x8a, 0x9e, 0x03, 0x68, 0x07, 0xfe, 0x9e, 0xdb, + 0x60, 0xa1, 0x30, 0x8a, 0xa6, 0xb1, 0x50, 0x55, 0x41, 0xb0, 0x86, 0x85, 0x5e, 0x81, 0xf1, 0x8e, + 0x17, 0x72, 0x76, 0x44, 0x0b, 0x7c, 0xab, 0xcc, 0x58, 0x6e, 0xe9, 0x40, 0x6c, 0xe2, 0xa2, 0x45, + 0x18, 0x8a, 0x1c, 0x66, 0xfc, 0x32, 0x98, 0x6f, 0x7c, 0xbb, 0x41, 0x31, 0xf4, 0x4c, 0x26, 0xb4, + 0x02, 0x16, 0x15, 0xd1, 0xa7, 0xa4, 0xf7, 0x2f, 0x3f, 0xd8, 0x85, 0xd5, 0x7b, 0x7f, 0x97, 0x80, + 0xe6, 0xfb, 0x2b, 0xac, 0xe9, 0x0d, 0x5a, 0xe8, 0x65, 0x00, 0x72, 0x2f, 0x22, 0x81, 0xe7, 0x34, + 0x95, 0x6d, 0x99, 0xe2, 0x0b, 0xca, 0xfe, 0xba, 0x1f, 0xdd, 0x0a, 0xc9, 0x8a, 0xc2, 0xc0, 0x1a, + 0xb6, 0xfd, 0x1b, 0x25, 0x80, 0x98, 0x6f, 0x47, 0x6f, 0xa7, 0x0e, 0xae, 0x67, 0xba, 0x73, 0xfa, + 0xc7, 0x77, 0x6a, 0xa1, 0xef, 0xb1, 0x60, 0xd4, 0x69, 0x36, 0xfd, 0xba, 0xc3, 0x43, 0x13, 0x17, + 0xba, 0x1f, 0x9c, 0xa2, 0xfd, 0xc5, 0xb8, 0x06, 0xef, 0xc2, 0xf3, 0x72, 0x85, 0x6a, 0x90, 0x9e, + 0xbd, 0xd0, 0x1b, 0x46, 0x1f, 0x92, 0x4f, 0xc5, 0xa2, 0x31, 0x94, 0xea, 0xa9, 0x58, 0x62, 0x77, + 0x84, 0xfe, 0x4a, 0xbc, 0x65, 0xbc, 0x12, 0x07, 0xf2, 0xdd, 0x1b, 0x0d, 0xf6, 0xb5, 0xd7, 0x03, + 0x11, 0x55, 0xf5, 0x50, 0x07, 0x83, 0xf9, 0xbe, 0x84, 0xda, 0x3b, 0xa9, 0x47, 0x98, 0x83, 0x37, + 0x61, 0xb2, 0x61, 0x32, 0x01, 0x62, 0x25, 0x3e, 0x99, 0x47, 0x37, 0xc1, 0x33, 0xc4, 0xd7, 0x7e, + 0x02, 0x80, 0x93, 0x84, 0x51, 0x95, 0x47, 0xbe, 0xa8, 0x78, 0x5b, 0xbe, 0xf0, 0xbc, 0xb0, 0x73, + 0xe7, 0x72, 0x3f, 0x8c, 0x48, 0x8b, 0x62, 0xc6, 0xb7, 0xfb, 0xba, 0xa8, 0x8b, 0x15, 0x15, 0xf4, + 0x1a, 0x0c, 0x31, 0x67, 0xb2, 0x70, 0x76, 0x24, 0x5f, 0xe2, 0x6c, 0x86, 0x72, 0x8b, 0x37, 0x24, + 0xfb, 0x1b, 0x62, 0x41, 0x01, 0x5d, 0x93, 0xae, 0x9a, 0x61, 0xc5, 0xbb, 0x15, 0x12, 0xe6, 0xaa, + 0x59, 0x5a, 0x7a, 0x7f, 0xec, 0x85, 0xc9, 0xcb, 0x33, 0xf3, 0x9d, 0x19, 0x35, 0x29, 0x17, 0x25, + 0xfe, 0xcb, 0x34, 0x6a, 0xb3, 0x90, 0xdf, 0x3d, 0x33, 0xd5, 0x5a, 0x3c, 0x9c, 0xb7, 0x4d, 0x12, + 0x38, 0x49, 0x93, 0x72, 0xa4, 0x7c, 0xd7, 0x0b, 0xdf, 0x8d, 0x5e, 0x67, 0x07, 0x7f, 0x88, 0xb3, + 0xdb, 0x88, 0x97, 0x60, 0x51, 0xff, 0x44, 0xd9, 0x83, 0x39, 0x0f, 0xa6, 0x92, 0x5b, 0xf4, 0xa1, + 0xb2, 0x23, 0x7f, 0x30, 0x00, 0x13, 0xe6, 0x92, 0x42, 0x97, 0xa1, 0x24, 0x88, 0xa8, 0xd4, 0x07, + 0x6a, 0x97, 0xac, 0x49, 0x00, 0x8e, 0x71, 0x58, 0xc6, 0x0b, 0x56, 0x5d, 0x33, 0xd6, 0x8d, 0x33, + 0x5e, 0x28, 0x08, 0xd6, 0xb0, 0xe8, 0xc3, 0x6a, 0xd3, 0xf7, 0x23, 0x75, 0x21, 0xa9, 0x75, 0xb7, + 0xc4, 0x4a, 0xb1, 0x80, 0xd2, 0x8b, 0x68, 0x97, 0x04, 0x1e, 0x69, 0x9a, 0x41, 0x92, 0xd5, 0x45, + 0x74, 0x5d, 0x07, 0x62, 0x13, 0x97, 0x5e, 0xa7, 0x7e, 0xc8, 0x16, 0xb2, 0x78, 0xbe, 0xc5, 0xc6, + 0xcf, 0x35, 0xee, 0x2d, 0x2e, 0xe1, 0xe8, 0x93, 0xf0, 0x88, 0x0a, 0x04, 0x85, 0xb9, 0x36, 0x43, + 0xb6, 0x38, 0x64, 0x48, 0x5b, 0x1e, 0x59, 0xce, 0x46, 0xc3, 0x79, 0xf5, 0xd1, 0xab, 0x30, 0x21, + 0x58, 0x7c, 0x49, 0x71, 0xd8, 0x34, 0xb0, 0xb9, 0x6e, 0x40, 0x71, 0x02, 0x5b, 0x86, 0x79, 0x66, + 0x5c, 0xb6, 0xa4, 0x30, 0x92, 0x0e, 0xf3, 0xac, 0xc3, 0x71, 0xaa, 0x06, 0x5a, 0x84, 0x49, 0xce, + 0x83, 0xb9, 0xde, 0x36, 0x9f, 0x13, 0xe1, 0x5a, 0xa5, 0xb6, 0xd4, 0x4d, 0x13, 0x8c, 0x93, 0xf8, + 0xe8, 0x25, 0x18, 0x73, 0x82, 0xfa, 0x8e, 0x1b, 0x91, 0x7a, 0xd4, 0x09, 0xb8, 0xcf, 0x95, 0x66, + 0xa1, 0xb4, 0xa8, 0xc1, 0xb0, 0x81, 0x69, 0xbf, 0x0d, 0x33, 0x19, 0x61, 0x24, 0xe8, 0xc2, 0x71, + 0xda, 0xae, 0xfc, 0xa6, 0x84, 0x19, 0xf3, 0x62, 0xb5, 0x22, 0xbf, 0x46, 0xc3, 0xa2, 0xab, 0x93, + 0x85, 0x9b, 0xd0, 0xb2, 0x26, 0xaa, 0xd5, 0xb9, 0x2a, 0x01, 0x38, 0xc6, 0xb1, 0xff, 0x57, 0x01, + 0x26, 0x33, 0x74, 0x2b, 0x2c, 0x73, 0x5f, 0xe2, 0x91, 0x12, 0x27, 0xea, 0x33, 0xa3, 0x86, 0x17, + 0x8e, 0x10, 0x35, 0xbc, 0xd8, 0x2b, 0x6a, 0xf8, 0xc0, 0x3b, 0x89, 0x1a, 0x6e, 0x8e, 0xd8, 0x60, + 0x5f, 0x23, 0x96, 0x11, 0x69, 0x7c, 0xe8, 0x88, 0x91, 0xc6, 0x8d, 0x41, 0x1f, 0xee, 0x63, 0xd0, + 0x7f, 0xb8, 0x00, 0x53, 0x49, 0x4b, 0xca, 0x13, 0x90, 0xdb, 0xbe, 0x66, 0xc8, 0x6d, 0x2f, 0xf5, + 0xe3, 0x38, 0x9b, 0x2b, 0xc3, 0xc5, 0x09, 0x19, 0xee, 0x07, 0xfb, 0xa2, 0xd6, 0x5d, 0x9e, 0xfb, + 0x77, 0x0a, 0x70, 0x3a, 0xd3, 0x73, 0xf7, 0x04, 0xc6, 0xe6, 0xa6, 0x31, 0x36, 0xcf, 0xf6, 0xed, + 0x54, 0x9c, 0x3b, 0x40, 0x77, 0x12, 0x03, 0x74, 0xb9, 0x7f, 0x92, 0xdd, 0x47, 0xe9, 0xab, 0x45, + 0x38, 0x9f, 0x59, 0x2f, 0x16, 0x7b, 0xae, 0x1a, 0x62, 0xcf, 0xe7, 0x12, 0x62, 0x4f, 0xbb, 0x7b, + 0xed, 0xe3, 0x91, 0x83, 0x0a, 0x77, 0x59, 0x16, 0x13, 0xe1, 0x01, 0x65, 0xa0, 0x86, 0xbb, 0xac, + 0x22, 0x84, 0x4d, 0xba, 0xdf, 0x4c, 0xb2, 0xcf, 0x7f, 0x67, 0xc1, 0xd9, 0xcc, 0xb9, 0x39, 0x01, + 0x59, 0xd7, 0xba, 0x29, 0xeb, 0x7a, 0xaa, 0xef, 0xd5, 0x9a, 0x23, 0xfc, 0xfa, 0xb5, 0x81, 0x9c, + 0x6f, 0x61, 0x2f, 0xf9, 0x9b, 0x30, 0xea, 0xd4, 0xeb, 0x24, 0x0c, 0xd7, 0xfc, 0x86, 0x0a, 0x8c, + 0xfc, 0x2c, 0x7b, 0x67, 0xc5, 0xc5, 0x87, 0x07, 0xf3, 0x73, 0x49, 0x12, 0x31, 0x18, 0xeb, 0x14, + 0xd0, 0xa7, 0x61, 0x24, 0x14, 0xf7, 0xa6, 0x98, 0xfb, 0xe7, 0xfb, 0x1c, 0x1c, 0x67, 0x93, 0x34, + 0xcd, 0xc8, 0x4d, 0x4a, 0x52, 0xa1, 0x48, 0x9a, 0x51, 0x5e, 0x0a, 0xc7, 0x1a, 0xe5, 0xe5, 0x39, + 0x80, 0x3d, 0xf5, 0x18, 0x48, 0xca, 0x1f, 0xb4, 0x67, 0x82, 0x86, 0x85, 0x3e, 0x0e, 0x53, 0x21, + 0x0f, 0x6d, 0xb8, 0xdc, 0x74, 0x42, 0xe6, 0x2c, 0x23, 0x56, 0x21, 0x8b, 0x0e, 0x55, 0x4b, 0xc0, + 0x70, 0x0a, 0x1b, 0xad, 0xca, 0x56, 0x59, 0x1c, 0x46, 0xbe, 0x30, 0x2f, 0xc6, 0x2d, 0x8a, 0xbc, + 0xc1, 0xa7, 0x92, 0xc3, 0xcf, 0x06, 0x5e, 0xab, 0x89, 0x3e, 0x0d, 0x40, 0x97, 0x8f, 0x90, 0x43, + 0x0c, 0xe7, 0x1f, 0x9e, 0xf4, 0x54, 0x69, 0x64, 0xda, 0xf6, 0x32, 0x0f, 0xd7, 0xb2, 0x22, 0x82, + 0x35, 0x82, 0xf6, 0x0f, 0x0f, 0xc0, 0xa3, 0x5d, 0xce, 0x48, 0xb4, 0x68, 0xea, 0x61, 0x9f, 0x4e, + 0x3e, 0xae, 0xe7, 0x32, 0x2b, 0x1b, 0xaf, 0xed, 0xc4, 0x52, 0x2c, 0xbc, 0xe3, 0xa5, 0xf8, 0x03, + 0x96, 0x26, 0xf6, 0xe0, 0x16, 0x9f, 0x1f, 0x3b, 0xe2, 0xd9, 0x7f, 0x8c, 0x72, 0x90, 0xad, 0x0c, + 0x61, 0xc2, 0x73, 0x7d, 0x77, 0xa7, 0x6f, 0xe9, 0xc2, 0xc9, 0x4a, 0x89, 0x7f, 0xcb, 0x82, 0x73, + 0x5d, 0x43, 0x7c, 0x7c, 0x03, 0x32, 0x0c, 0xf6, 0xe7, 0x2d, 0x78, 0x3c, 0xb3, 0x86, 0x61, 0x66, + 0x74, 0x19, 0x4a, 0x75, 0x5a, 0xa8, 0x79, 0x69, 0xc6, 0xee, 0xeb, 0x12, 0x80, 0x63, 0x9c, 0x23, + 0x86, 0x2f, 0xf9, 0x15, 0x0b, 0x52, 0x9b, 0xfe, 0x04, 0x6e, 0x9f, 0x8a, 0x79, 0xfb, 0xbc, 0xbf, + 0x9f, 0xd1, 0xcc, 0xb9, 0x78, 0xfe, 0x78, 0x12, 0xce, 0xe4, 0x78, 0x29, 0xed, 0xc1, 0xf4, 0x76, + 0x9d, 0x98, 0xfe, 0xaf, 0xdd, 0xa2, 0xc8, 0x74, 0x75, 0x96, 0x65, 0x99, 0x4d, 0xa7, 0x53, 0x28, + 0x38, 0xdd, 0x04, 0xfa, 0xbc, 0x05, 0xa7, 0x9c, 0xbb, 0xe1, 0x0a, 0xe5, 0x22, 0xdc, 0xfa, 0x52, + 0xd3, 0xaf, 0xef, 0xd2, 0x23, 0x5a, 0x6e, 0x84, 0x17, 0x32, 0x25, 0x3b, 0x77, 0x6a, 0x29, 0x7c, + 0xa3, 0x79, 0x96, 0xea, 0x35, 0x0b, 0x0b, 0x67, 0xb6, 0x85, 0xb0, 0x88, 0xff, 0x4f, 0xdf, 0x28, + 0x5d, 0x3c, 0xb4, 0xb3, 0xdc, 0xc9, 0xf8, 0xb5, 0x28, 0x21, 0x58, 0xd1, 0x41, 0x9f, 0x85, 0xd2, + 0xb6, 0xf4, 0xf1, 0xcc, 0xb8, 0x76, 0xe3, 0x81, 0xec, 0xee, 0xf9, 0xca, 0xd5, 0xb3, 0x0a, 0x09, + 0xc7, 0x44, 0xd1, 0xab, 0x50, 0xf4, 0xb6, 0xc2, 0x6e, 0xd9, 0x52, 0x13, 0x76, 0x78, 0x3c, 0x0e, + 0xc2, 0xfa, 0x6a, 0x0d, 0xd3, 0x8a, 0xe8, 0x1a, 0x14, 0x83, 0xcd, 0x86, 0x10, 0x4b, 0x66, 0x6e, + 0x52, 0xbc, 0x54, 0xce, 0xe9, 0x15, 0xa3, 0x84, 0x97, 0xca, 0x98, 0x92, 0x40, 0x55, 0x18, 0x64, + 0xae, 0x3d, 0xe2, 0x92, 0xcb, 0x64, 0xe7, 0xbb, 0xb8, 0xc8, 0xf1, 0x60, 0x09, 0x0c, 0x01, 0x73, + 0x42, 0x68, 0x03, 0x86, 0xea, 0x2c, 0xb3, 0xa6, 0x88, 0x1b, 0xf7, 0xa1, 0x4c, 0x01, 0x64, 0x97, + 0x94, 0xa3, 0x42, 0x1e, 0xc7, 0x30, 0xb0, 0xa0, 0xc5, 0xa8, 0x92, 0xf6, 0xce, 0x56, 0x28, 0x32, + 0x41, 0x67, 0x53, 0xed, 0x92, 0x49, 0x57, 0x50, 0x65, 0x18, 0x58, 0xd0, 0x42, 0x2f, 0x43, 0x61, + 0xab, 0x2e, 0xdc, 0x76, 0x32, 0x25, 0x91, 0x66, 0x28, 0x8b, 0xa5, 0xa1, 0xfb, 0x07, 0xf3, 0x85, + 0xd5, 0x65, 0x5c, 0xd8, 0xaa, 0xa3, 0x75, 0x18, 0xde, 0xe2, 0xce, 0xef, 0x42, 0xd8, 0xf8, 0x64, + 0xb6, 0x5f, 0x7e, 0xca, 0x3f, 0x9e, 0x7b, 0xac, 0x08, 0x00, 0x96, 0x44, 0x58, 0x38, 0x7d, 0xe5, + 0xc4, 0x2f, 0x42, 0xac, 0x2d, 0x1c, 0x2d, 0xf0, 0x02, 0x67, 0x3a, 0xe2, 0x50, 0x00, 0x58, 0xa3, + 0x48, 0x57, 0xb5, 0x23, 0xd3, 0xf1, 0x8b, 0x60, 0x33, 0x99, 0xab, 0x5a, 0xe5, 0xec, 0xef, 0xb6, + 0xaa, 0x15, 0x12, 0x8e, 0x89, 0xa2, 0x5d, 0x18, 0xdf, 0x0b, 0xdb, 0x3b, 0x44, 0x6e, 0x69, 0x16, + 0x7b, 0x26, 0xe7, 0x5e, 0xbe, 0x2d, 0x10, 0xdd, 0x20, 0xea, 0x38, 0xcd, 0xd4, 0x29, 0xc4, 0x74, + 0xfa, 0xb7, 0x75, 0x62, 0xd8, 0xa4, 0x4d, 0x87, 0xff, 0xad, 0x8e, 0xbf, 0xb9, 0x1f, 0x11, 0x11, + 0x19, 0x2d, 0x73, 0xf8, 0x5f, 0xe7, 0x28, 0xe9, 0xe1, 0x17, 0x00, 0x2c, 0x89, 0xa0, 0xdb, 0x62, + 0x78, 0xd8, 0xe9, 0x39, 0x95, 0x1f, 0xbe, 0x74, 0x51, 0x22, 0xe5, 0x0c, 0x0a, 0x3b, 0x2d, 0x63, + 0x52, 0xec, 0x94, 0x6c, 0xef, 0xf8, 0x91, 0xef, 0x25, 0x4e, 0xe8, 0xe9, 0xfc, 0x53, 0xb2, 0x9a, + 0x81, 0x9f, 0x3e, 0x25, 0xb3, 0xb0, 0x70, 0x66, 0x5b, 0xa8, 0x01, 0x13, 0x6d, 0x3f, 0x88, 0xee, + 0xfa, 0x81, 0x5c, 0x5f, 0xa8, 0x8b, 0xb0, 0xc4, 0xc0, 0x14, 0x2d, 0xb2, 0xa0, 0x83, 0x26, 0x04, + 0x27, 0x68, 0xa2, 0x4f, 0xc0, 0x70, 0x58, 0x77, 0x9a, 0xa4, 0x72, 0x73, 0x76, 0x26, 0xff, 0xfa, + 0xa9, 0x71, 0x94, 0x9c, 0xd5, 0xc5, 0x63, 0xef, 0x73, 0x14, 0x2c, 0xc9, 0xa1, 0x55, 0x18, 0x64, + 0xe9, 0xd2, 0x58, 0x18, 0xbf, 0x9c, 0x28, 0xac, 0x29, 0xab, 0x68, 0x7e, 0x36, 0xb1, 0x62, 0xcc, + 0xab, 0xd3, 0x3d, 0x20, 0xde, 0x0c, 0x7e, 0x38, 0x7b, 0x3a, 0x7f, 0x0f, 0x88, 0xa7, 0xc6, 0xcd, + 0x5a, 0xb7, 0x3d, 0xa0, 0x90, 0x70, 0x4c, 0x94, 0x9e, 0xcc, 0xf4, 0x34, 0x3d, 0xd3, 0xc5, 0x9c, + 0x27, 0xf7, 0x2c, 0x65, 0x27, 0x33, 0x3d, 0x49, 0x29, 0x09, 0xfb, 0xf7, 0x86, 0xd3, 0x3c, 0x0b, + 0x7b, 0x65, 0x7e, 0x97, 0x95, 0x52, 0x40, 0x7e, 0xb8, 0x5f, 0xa1, 0xd7, 0x31, 0xb2, 0xe0, 0x9f, + 0xb7, 0xe0, 0x4c, 0x3b, 0xf3, 0x43, 0x04, 0x03, 0xd0, 0x9f, 0xec, 0x8c, 0x7f, 0xba, 0x0a, 0xf9, + 0x98, 0x0d, 0xc7, 0x39, 0x2d, 0x25, 0x9f, 0x39, 0xc5, 0x77, 0xfc, 0xcc, 0x59, 0x83, 0x11, 0xc6, + 0x64, 0xf6, 0xc8, 0x34, 0x9d, 0x7c, 0xed, 0x31, 0x56, 0x62, 0x59, 0x54, 0xc4, 0x8a, 0x04, 0xfa, + 0x41, 0x0b, 0xce, 0x25, 0xbb, 0x8e, 0x09, 0x03, 0x8b, 0x38, 0x91, 0xfc, 0x81, 0xbb, 0x2a, 0xbe, + 0x3f, 0xc5, 0xff, 0x1b, 0xc8, 0x87, 0xbd, 0x10, 0x70, 0xf7, 0xc6, 0x50, 0x39, 0xe3, 0x85, 0x3d, + 0x64, 0x6a, 0x15, 0xfa, 0x78, 0x65, 0xbf, 0x00, 0x63, 0x2d, 0xbf, 0xe3, 0x45, 0xc2, 0xfa, 0x47, + 0x58, 0x22, 0x30, 0x0d, 0xfc, 0x9a, 0x56, 0x8e, 0x0d, 0xac, 0xc4, 0xdb, 0x7c, 0xe4, 0x81, 0xdf, + 0xe6, 0x6f, 0xc0, 0x98, 0xa7, 0x99, 0xab, 0x0a, 0x7e, 0xe0, 0x62, 0x7e, 0x8c, 0x57, 0xdd, 0xb8, + 0x95, 0xf7, 0x52, 0x2f, 0xc1, 0x06, 0xb5, 0x93, 0x7d, 0xf0, 0x7d, 0xc9, 0xca, 0x60, 0xea, 0xb9, + 0x08, 0xe0, 0xa3, 0xa6, 0x08, 0xe0, 0x62, 0x52, 0x04, 0x90, 0x92, 0x28, 0x1b, 0xaf, 0xff, 0xfe, + 0x53, 0xd8, 0xf4, 0x1b, 0x08, 0xd1, 0x6e, 0xc2, 0x85, 0x5e, 0xd7, 0x12, 0x33, 0x03, 0x6b, 0x28, + 0xfd, 0x61, 0x6c, 0x06, 0xd6, 0xa8, 0x94, 0x31, 0x83, 0xf4, 0x1b, 0x62, 0xc7, 0xfe, 0x1f, 0x16, + 0x14, 0xab, 0x7e, 0xe3, 0x04, 0x1e, 0xbc, 0x1f, 0x33, 0x1e, 0xbc, 0x8f, 0x66, 0x5f, 0x88, 0x8d, + 0x5c, 0x79, 0xf8, 0x4a, 0x42, 0x1e, 0x7e, 0x2e, 0x8f, 0x40, 0x77, 0xe9, 0xf7, 0x4f, 0x16, 0x61, + 0xb4, 0xea, 0x37, 0x94, 0x0d, 0xf6, 0xaf, 0x3d, 0x88, 0x0d, 0x76, 0x6e, 0x22, 0x06, 0x8d, 0x32, + 0xb3, 0x1e, 0x93, 0xee, 0xa7, 0xdf, 0x60, 0xa6, 0xd8, 0x77, 0x88, 0xbb, 0xbd, 0x13, 0x91, 0x46, + 0xf2, 0x73, 0x4e, 0xce, 0x14, 0xfb, 0xbf, 0x5b, 0x30, 0x99, 0x68, 0x1d, 0x35, 0x61, 0xbc, 0xa9, + 0x4b, 0x5b, 0xc5, 0x3a, 0x7d, 0x20, 0x41, 0xad, 0x30, 0x65, 0xd5, 0x8a, 0xb0, 0x49, 0x1c, 0x2d, + 0x00, 0x28, 0xf5, 0xa3, 0x14, 0xeb, 0x31, 0xae, 0x5f, 0xe9, 0x27, 0x43, 0xac, 0x61, 0xa0, 0x17, + 0x61, 0x34, 0xf2, 0xdb, 0x7e, 0xd3, 0xdf, 0xde, 0xbf, 0x4e, 0x64, 0x50, 0x27, 0x65, 0xa0, 0xb6, + 0x11, 0x83, 0xb0, 0x8e, 0x67, 0xff, 0x74, 0x91, 0x7f, 0xa8, 0x17, 0xb9, 0xef, 0xad, 0xc9, 0x77, + 0xf7, 0x9a, 0xfc, 0xaa, 0x05, 0x53, 0xb4, 0x75, 0x66, 0x03, 0x23, 0x2f, 0x5b, 0x15, 0xdb, 0xd9, + 0xea, 0x12, 0xdb, 0xf9, 0x22, 0x3d, 0xbb, 0x1a, 0x7e, 0x27, 0x12, 0x12, 0x34, 0xed, 0x70, 0xa2, + 0xa5, 0x58, 0x40, 0x05, 0x1e, 0x09, 0x02, 0xe1, 0xb7, 0xa7, 0xe3, 0x91, 0x20, 0xc0, 0x02, 0x2a, + 0x43, 0x3f, 0x0f, 0x64, 0x87, 0x7e, 0xe6, 0x21, 0x2a, 0x85, 0xb5, 0x84, 0x60, 0x7b, 0xb4, 0x10, + 0x95, 0xd2, 0x8c, 0x22, 0xc6, 0xb1, 0x7f, 0xb6, 0x08, 0x63, 0x55, 0xbf, 0x11, 0x2b, 0x00, 0x5f, + 0x30, 0x14, 0x80, 0x17, 0x12, 0x0a, 0xc0, 0x29, 0x1d, 0xf7, 0x3d, 0x75, 0xdf, 0xd7, 0x4b, 0xdd, + 0xf7, 0xcf, 0x2d, 0x36, 0x6b, 0xe5, 0xf5, 0x1a, 0x37, 0xa9, 0x42, 0x57, 0x60, 0x94, 0x1d, 0x48, + 0xcc, 0x51, 0x54, 0x6a, 0xc5, 0x58, 0x4a, 0xa3, 0xf5, 0xb8, 0x18, 0xeb, 0x38, 0xe8, 0x12, 0x8c, + 0x84, 0xc4, 0x09, 0xea, 0x3b, 0xea, 0x8c, 0x13, 0x2a, 0x2c, 0x5e, 0x86, 0x15, 0x14, 0xbd, 0x1e, + 0x47, 0x47, 0x2c, 0xe6, 0x3b, 0x9e, 0xe9, 0xfd, 0xe1, 0x5b, 0x24, 0x3f, 0x24, 0xa2, 0x7d, 0x07, + 0x50, 0x1a, 0xbf, 0x8f, 0xb0, 0x60, 0xf3, 0x66, 0x58, 0xb0, 0x52, 0x2a, 0x24, 0xd8, 0x9f, 0x5b, + 0x30, 0x51, 0xf5, 0x1b, 0x74, 0xeb, 0x7e, 0x33, 0xed, 0x53, 0x3d, 0x34, 0xec, 0x50, 0x97, 0xd0, + 0xb0, 0x4f, 0xc0, 0x60, 0xd5, 0x6f, 0x54, 0xaa, 0xdd, 0xbc, 0xbe, 0xed, 0xbf, 0x6b, 0xc1, 0x70, + 0xd5, 0x6f, 0x9c, 0x80, 0x70, 0xfe, 0xa3, 0xa6, 0x70, 0xfe, 0x91, 0x9c, 0x75, 0x93, 0x23, 0x8f, + 0xff, 0xdb, 0x03, 0x30, 0x4e, 0xfb, 0xe9, 0x6f, 0xcb, 0xa9, 0x34, 0x86, 0xcd, 0xea, 0x63, 0xd8, + 0x28, 0x2f, 0xec, 0x37, 0x9b, 0xfe, 0xdd, 0xe4, 0xb4, 0xae, 0xb2, 0x52, 0x2c, 0xa0, 0xe8, 0x19, + 0x18, 0x69, 0x07, 0x64, 0xcf, 0xf5, 0x05, 0x93, 0xa9, 0xa9, 0x3a, 0xaa, 0xa2, 0x1c, 0x2b, 0x0c, + 0xfa, 0x38, 0x0b, 0x5d, 0xaf, 0x4e, 0x6a, 0xa4, 0xee, 0x7b, 0x0d, 0x2e, 0xbf, 0x2e, 0x8a, 0xf4, + 0x0e, 0x5a, 0x39, 0x36, 0xb0, 0xd0, 0x1d, 0x28, 0xb1, 0xff, 0xec, 0xd8, 0x39, 0x7a, 0xa2, 0x50, + 0x91, 0x38, 0x4e, 0x10, 0xc0, 0x31, 0x2d, 0xf4, 0x1c, 0x40, 0x24, 0x63, 0x80, 0x87, 0x22, 0x04, + 0x94, 0x62, 0xc8, 0x55, 0x74, 0xf0, 0x10, 0x6b, 0x58, 0xe8, 0x69, 0x28, 0x45, 0x8e, 0xdb, 0xbc, + 0xe1, 0x7a, 0x24, 0x64, 0x72, 0xe9, 0xa2, 0xcc, 0xdf, 0x26, 0x0a, 0x71, 0x0c, 0xa7, 0x0c, 0x11, + 0x8b, 0x8f, 0xc0, 0xd3, 0x0c, 0x8f, 0x30, 0x6c, 0xc6, 0x10, 0xdd, 0x50, 0xa5, 0x58, 0xc3, 0x40, + 0x3b, 0xf0, 0x98, 0xeb, 0xb1, 0x54, 0x08, 0xa4, 0xb6, 0xeb, 0xb6, 0x37, 0x6e, 0xd4, 0x6e, 0x93, + 0xc0, 0xdd, 0xda, 0x5f, 0x72, 0xea, 0xbb, 0xc4, 0x93, 0x29, 0x20, 0xdf, 0x2f, 0xba, 0xf8, 0x58, + 0xa5, 0x0b, 0x2e, 0xee, 0x4a, 0xc9, 0x7e, 0x09, 0x4e, 0x57, 0xfd, 0x46, 0xd5, 0x0f, 0xa2, 0x55, + 0x3f, 0xb8, 0xeb, 0x04, 0x0d, 0xb9, 0x52, 0xe6, 0x65, 0xac, 0x02, 0x7a, 0x14, 0x0e, 0xf2, 0x83, + 0xc2, 0x88, 0x43, 0xf0, 0x3c, 0x63, 0xbe, 0x8e, 0xe8, 0x61, 0x53, 0x67, 0x6c, 0x80, 0xca, 0x0b, + 0x72, 0xd5, 0x89, 0x08, 0xba, 0xc9, 0xf2, 0x1d, 0xc7, 0x37, 0xa2, 0xa8, 0xfe, 0x94, 0x96, 0xef, + 0x38, 0x06, 0x66, 0x5e, 0xa1, 0x66, 0x7d, 0xfb, 0x7f, 0x0e, 0xb2, 0xc3, 0x31, 0x91, 0x5b, 0x02, + 0x7d, 0x06, 0x26, 0x42, 0x72, 0xc3, 0xf5, 0x3a, 0xf7, 0xa4, 0x4c, 0xa0, 0x8b, 0x8f, 0x54, 0x6d, + 0x45, 0xc7, 0xe4, 0x92, 0x45, 0xb3, 0x0c, 0x27, 0xa8, 0xa1, 0x16, 0x4c, 0xdc, 0x75, 0xbd, 0x86, + 0x7f, 0x37, 0x94, 0xf4, 0x47, 0xf2, 0x05, 0x8c, 0x77, 0x38, 0x66, 0xa2, 0x8f, 0x46, 0x73, 0x77, + 0x0c, 0x62, 0x38, 0x41, 0x9c, 0x2e, 0xc0, 0xa0, 0xe3, 0x2d, 0x86, 0xb7, 0x42, 0x12, 0x88, 0xcc, + 0xd5, 0x6c, 0x01, 0x62, 0x59, 0x88, 0x63, 0x38, 0x5d, 0x80, 0xec, 0xcf, 0xd5, 0xc0, 0xef, 0xf0, + 0x48, 0xfd, 0x62, 0x01, 0x62, 0x55, 0x8a, 0x35, 0x0c, 0xba, 0x41, 0xd9, 0xbf, 0x75, 0xdf, 0xc3, + 0xbe, 0x1f, 0xc9, 0x2d, 0xcd, 0x72, 0xa5, 0x6a, 0xe5, 0xd8, 0xc0, 0x42, 0xab, 0x80, 0xc2, 0x4e, + 0xbb, 0xdd, 0x64, 0xc6, 0x17, 0x4e, 0x93, 0x91, 0xe2, 0x8a, 0xef, 0x22, 0x0f, 0x60, 0x5a, 0x4b, + 0x41, 0x71, 0x46, 0x0d, 0x7a, 0x56, 0x6f, 0x89, 0xae, 0x0e, 0xb2, 0xae, 0x72, 0x65, 0x44, 0x8d, + 0xf7, 0x53, 0xc2, 0xd0, 0x0a, 0x0c, 0x87, 0xfb, 0x61, 0x3d, 0x12, 0x91, 0xd8, 0x72, 0xd2, 0x07, + 0xd5, 0x18, 0x8a, 0x96, 0xbd, 0x8e, 0x57, 0xc1, 0xb2, 0x2e, 0xaa, 0xc3, 0x8c, 0xa0, 0xb8, 0xbc, + 0xe3, 0x78, 0x2a, 0x19, 0x0b, 0xb7, 0x41, 0xbd, 0x72, 0xff, 0x60, 0x7e, 0x46, 0xb4, 0xac, 0x83, + 0x0f, 0x0f, 0xe6, 0xcf, 0x54, 0xfd, 0x46, 0x06, 0x04, 0x67, 0x51, 0xe3, 0x8b, 0xaf, 0x5e, 0xf7, + 0x5b, 0xed, 0x6a, 0xe0, 0x6f, 0xb9, 0x4d, 0xd2, 0x4d, 0xa1, 0x53, 0x33, 0x30, 0xc5, 0xe2, 0x33, + 0xca, 0x70, 0x82, 0x9a, 0xfd, 0xed, 0x8c, 0x9f, 0x61, 0xc9, 0x9a, 0xa3, 0x4e, 0x40, 0x50, 0x0b, + 0xc6, 0xdb, 0x6c, 0x9b, 0x88, 0xf8, 0xf9, 0x62, 0xad, 0xbf, 0xd0, 0xa7, 0x60, 0xe2, 0x2e, 0xbd, + 0x06, 0x94, 0xe0, 0x90, 0xbd, 0xf8, 0xaa, 0x3a, 0x39, 0x6c, 0x52, 0xb7, 0x7f, 0xec, 0x11, 0x76, + 0x23, 0xd6, 0xb8, 0xb4, 0x61, 0x58, 0x98, 0xbc, 0x8b, 0xa7, 0xd5, 0x5c, 0xbe, 0xd8, 0x2b, 0x9e, + 0x16, 0x61, 0x36, 0x8f, 0x65, 0x5d, 0xf4, 0x69, 0x98, 0xa0, 0x2f, 0x15, 0x2d, 0x0b, 0xca, 0xa9, + 0xfc, 0xd0, 0x04, 0x71, 0xf2, 0x13, 0x2d, 0xb7, 0x86, 0x5e, 0x19, 0x27, 0x88, 0xa1, 0xd7, 0x99, + 0x71, 0x86, 0x99, 0x60, 0xa5, 0x07, 0x69, 0xdd, 0x0e, 0x43, 0x92, 0xd5, 0x88, 0xe4, 0x25, 0x6f, + 0xb1, 0x1f, 0x6e, 0xf2, 0x16, 0x74, 0x03, 0xc6, 0x45, 0xc6, 0x62, 0xb1, 0x72, 0x8b, 0x86, 0x34, + 0x6e, 0x1c, 0xeb, 0xc0, 0xc3, 0x64, 0x01, 0x36, 0x2b, 0xa3, 0x6d, 0x38, 0xa7, 0x65, 0x10, 0xba, + 0x1a, 0x38, 0x4c, 0xa5, 0xee, 0xb2, 0xe3, 0x54, 0xbb, 0xab, 0x1f, 0xbf, 0x7f, 0x30, 0x7f, 0x6e, + 0xa3, 0x1b, 0x22, 0xee, 0x4e, 0x07, 0xdd, 0x84, 0xd3, 0xdc, 0xb1, 0xb6, 0x4c, 0x9c, 0x46, 0xd3, + 0xf5, 0x14, 0x33, 0xc0, 0xb7, 0xfc, 0xd9, 0xfb, 0x07, 0xf3, 0xa7, 0x17, 0xb3, 0x10, 0x70, 0x76, + 0x3d, 0xf4, 0x51, 0x28, 0x35, 0xbc, 0x50, 0x8c, 0xc1, 0x90, 0x91, 0xa4, 0xa9, 0x54, 0x5e, 0xaf, + 0xa9, 0xef, 0x8f, 0xff, 0xe0, 0xb8, 0x02, 0xda, 0xe6, 0x12, 0x5b, 0x25, 0x20, 0x19, 0x4e, 0x05, + 0x16, 0x4a, 0x8a, 0xda, 0x0c, 0xd7, 0x3a, 0xae, 0xaa, 0x50, 0x16, 0xe7, 0x86, 0xd7, 0x9d, 0x41, + 0x18, 0xbd, 0x06, 0x88, 0xbe, 0x20, 0xdc, 0x3a, 0x59, 0xac, 0xb3, 0xe4, 0x0c, 0x4c, 0xc0, 0x3d, + 0x62, 0x3a, 0x7b, 0xd5, 0x52, 0x18, 0x38, 0xa3, 0x16, 0xba, 0x46, 0x4f, 0x15, 0xbd, 0x54, 0x9c, + 0x5a, 0x2a, 0xa5, 0x5e, 0x99, 0xb4, 0x03, 0x52, 0x77, 0x22, 0xd2, 0x30, 0x29, 0xe2, 0x44, 0x3d, + 0xd4, 0x80, 0xc7, 0x9c, 0x4e, 0xe4, 0x33, 0x61, 0xb8, 0x89, 0xba, 0xe1, 0xef, 0x12, 0x8f, 0xe9, + 0xa1, 0x46, 0x96, 0x2e, 0x50, 0x6e, 0x63, 0xb1, 0x0b, 0x1e, 0xee, 0x4a, 0x85, 0x72, 0x89, 0x2a, + 0x87, 0x2e, 0x98, 0xe1, 0x92, 0x32, 0xf2, 0xe8, 0xbe, 0x08, 0xa3, 0x3b, 0x7e, 0x18, 0xad, 0x93, + 0xe8, 0xae, 0x1f, 0xec, 0x8a, 0xa8, 0x97, 0x71, 0xa4, 0xe4, 0x18, 0x84, 0x75, 0x3c, 0xfa, 0x0c, + 0x64, 0x56, 0x12, 0x95, 0x32, 0x53, 0x50, 0x8f, 0xc4, 0x67, 0xcc, 0x35, 0x5e, 0x8c, 0x25, 0x5c, + 0xa2, 0x56, 0xaa, 0xcb, 0x4c, 0xd9, 0x9c, 0x40, 0xad, 0x54, 0x97, 0xb1, 0x84, 0xd3, 0xe5, 0x1a, + 0xee, 0x38, 0x01, 0xa9, 0x06, 0x7e, 0x9d, 0x84, 0x5a, 0x7c, 0xee, 0x47, 0x79, 0x4c, 0x4f, 0xba, + 0x5c, 0x6b, 0x59, 0x08, 0x38, 0xbb, 0x1e, 0x22, 0xe9, 0xec, 0x59, 0x13, 0xf9, 0x5a, 0x82, 0x34, + 0x3f, 0xd3, 0x67, 0x02, 0x2d, 0x0f, 0xa6, 0x54, 0xde, 0x2e, 0x1e, 0xc5, 0x33, 0x9c, 0x9d, 0x64, + 0x6b, 0xbb, 0xff, 0x10, 0xa0, 0x4a, 0xef, 0x52, 0x49, 0x50, 0xc2, 0x29, 0xda, 0x46, 0x44, 0xac, + 0xa9, 0x9e, 0x11, 0xb1, 0x2e, 0x43, 0x29, 0xec, 0x6c, 0x36, 0xfc, 0x96, 0xe3, 0x7a, 0x4c, 0xd9, + 0xac, 0xbd, 0x47, 0x6a, 0x12, 0x80, 0x63, 0x1c, 0xb4, 0x0a, 0x23, 0x8e, 0x54, 0xaa, 0xa0, 0xfc, + 0x18, 0x28, 0x4a, 0x95, 0xc2, 0xc3, 0x02, 0x48, 0x35, 0x8a, 0xaa, 0x8b, 0x5e, 0x81, 0x71, 0xe1, + 0x18, 0x2a, 0x52, 0x46, 0xce, 0x98, 0xde, 0x3b, 0x35, 0x1d, 0x88, 0x4d, 0x5c, 0x74, 0x0b, 0x46, + 0x23, 0xbf, 0xc9, 0x5c, 0x50, 0x28, 0x9b, 0x77, 0x26, 0x3f, 0x9a, 0xd7, 0x86, 0x42, 0xd3, 0xe5, + 0x99, 0xaa, 0x2a, 0xd6, 0xe9, 0xa0, 0x0d, 0xbe, 0xde, 0x59, 0x9c, 0x6a, 0x12, 0xce, 0x3e, 0x92, + 0x7f, 0x27, 0xa9, 0x70, 0xd6, 0xe6, 0x76, 0x10, 0x35, 0xb1, 0x4e, 0x06, 0x5d, 0x85, 0xe9, 0x76, + 0xe0, 0xfa, 0x6c, 0x4d, 0x28, 0x7d, 0xda, 0xac, 0x99, 0x24, 0xa7, 0x9a, 0x44, 0xc0, 0xe9, 0x3a, + 0xcc, 0xaf, 0x57, 0x14, 0xce, 0x9e, 0xe5, 0x59, 0xa5, 0xf9, 0xf3, 0x8e, 0x97, 0x61, 0x05, 0x45, + 0x6b, 0xec, 0x24, 0xe6, 0x92, 0x89, 0xd9, 0xb9, 0xfc, 0xb0, 0x2b, 0xba, 0x04, 0x83, 0x33, 0xaf, + 0xea, 0x2f, 0x8e, 0x29, 0xa0, 0x86, 0x96, 0x7e, 0x90, 0xbe, 0x18, 0xc2, 0xd9, 0xc7, 0xba, 0x98, + 0xaa, 0x25, 0x9e, 0x17, 0x31, 0x43, 0x60, 0x14, 0x87, 0x38, 0x41, 0x13, 0x7d, 0x1c, 0xa6, 0x44, + 0xb0, 0xb8, 0x78, 0x98, 0xce, 0xc5, 0x86, 0xbd, 0x38, 0x01, 0xc3, 0x29, 0x6c, 0x1e, 0xbf, 0xdf, + 0xd9, 0x6c, 0x12, 0x71, 0xf4, 0xdd, 0x70, 0xbd, 0xdd, 0x70, 0xf6, 0x3c, 0x3b, 0x1f, 0x44, 0xfc, + 0xfe, 0x24, 0x14, 0x67, 0xd4, 0x40, 0x1b, 0x30, 0xd5, 0x0e, 0x08, 0x69, 0x31, 0x46, 0x5f, 0xdc, + 0x67, 0xf3, 0xdc, 0xad, 0x9d, 0xf6, 0xa4, 0x9a, 0x80, 0x1d, 0x66, 0x94, 0xe1, 0x14, 0x05, 0x74, + 0x17, 0x46, 0xfc, 0x3d, 0x12, 0xec, 0x10, 0xa7, 0x31, 0x7b, 0xa1, 0x8b, 0xa1, 0xb9, 0xb8, 0xdc, + 0x6e, 0x0a, 0xdc, 0x84, 0x0e, 0x5e, 0x16, 0xf7, 0xd6, 0xc1, 0xcb, 0xc6, 0xd0, 0x0f, 0x59, 0x70, + 0x56, 0x8a, 0xed, 0x6b, 0x6d, 0x3a, 0xea, 0xcb, 0xbe, 0x17, 0x46, 0x01, 0x77, 0xc4, 0x7e, 0x3c, + 0xdf, 0x39, 0x79, 0x23, 0xa7, 0x92, 0x12, 0x8e, 0x9e, 0xcd, 0xc3, 0x08, 0x71, 0x7e, 0x8b, 0x68, + 0x19, 0xa6, 0x43, 0x12, 0xc9, 0xc3, 0x68, 0x31, 0x5c, 0x7d, 0xbd, 0xbc, 0x3e, 0xfb, 0x04, 0xf7, + 0x22, 0xa7, 0x9b, 0xa1, 0x96, 0x04, 0xe2, 0x34, 0xfe, 0xdc, 0xb7, 0xc2, 0x74, 0xea, 0xfa, 0x3f, + 0x4a, 0x5e, 0x92, 0xb9, 0x5d, 0x18, 0x37, 0x86, 0xf8, 0xa1, 0xea, 0x70, 0xff, 0xcd, 0x30, 0x94, + 0x94, 0x7e, 0x0f, 0x5d, 0x36, 0xd5, 0xb6, 0x67, 0x93, 0x6a, 0xdb, 0x11, 0xfa, 0xae, 0xd7, 0x35, + 0xb5, 0x1b, 0x19, 0xb1, 0xb3, 0xf2, 0x36, 0x74, 0xff, 0x4e, 0xd1, 0x9a, 0xb8, 0xb6, 0xd8, 0xb7, + 0xfe, 0x77, 0xa0, 0xab, 0x04, 0xf8, 0x2a, 0x4c, 0x7b, 0x3e, 0xe3, 0x39, 0x49, 0x43, 0x32, 0x14, + 0x8c, 0x6f, 0x28, 0xe9, 0xc1, 0x28, 0x12, 0x08, 0x38, 0x5d, 0x87, 0x36, 0xc8, 0x2f, 0xfe, 0xa4, + 0xc8, 0x99, 0xf3, 0x05, 0x58, 0x40, 0xd1, 0x13, 0x30, 0xd8, 0xf6, 0x1b, 0x95, 0xaa, 0xe0, 0x37, + 0xb5, 0x88, 0x8d, 0x8d, 0x4a, 0x15, 0x73, 0x18, 0x5a, 0x84, 0x21, 0xf6, 0x23, 0x9c, 0x1d, 0xcb, + 0x8f, 0x3a, 0xc0, 0x6a, 0x68, 0x59, 0x5f, 0x58, 0x05, 0x2c, 0x2a, 0x32, 0xd1, 0x17, 0x65, 0xd2, + 0x99, 0xe8, 0x6b, 0xf8, 0x01, 0x45, 0x5f, 0x92, 0x00, 0x8e, 0x69, 0xa1, 0x7b, 0x70, 0xda, 0x78, + 0x18, 0xf1, 0x25, 0x42, 0x42, 0xe1, 0xf9, 0xfc, 0x44, 0xd7, 0x17, 0x91, 0xd0, 0x17, 0x9f, 0x13, + 0x9d, 0x3e, 0x5d, 0xc9, 0xa2, 0x84, 0xb3, 0x1b, 0x40, 0x4d, 0x98, 0xae, 0xa7, 0x5a, 0x1d, 0xe9, + 0xbf, 0x55, 0x35, 0xa1, 0xe9, 0x16, 0xd3, 0x84, 0xd1, 0x2b, 0x30, 0xf2, 0x96, 0x1f, 0xb2, 0xb3, + 0x5a, 0xf0, 0xc8, 0xd2, 0x6d, 0x76, 0xe4, 0xf5, 0x9b, 0x35, 0x56, 0x7e, 0x78, 0x30, 0x3f, 0x5a, + 0xf5, 0x1b, 0xf2, 0x2f, 0x56, 0x15, 0xd0, 0xf7, 0x5a, 0x30, 0x97, 0x7e, 0x79, 0xa9, 0x4e, 0x8f, + 0xf7, 0xdf, 0x69, 0x5b, 0x34, 0x3a, 0xb7, 0x92, 0x4b, 0x0e, 0x77, 0x69, 0xca, 0xfe, 0x25, 0xae, + 0xdb, 0x15, 0x1a, 0x20, 0x12, 0x76, 0x9a, 0x27, 0x91, 0xec, 0x72, 0xc5, 0x50, 0x4e, 0x3d, 0xb0, + 0xfd, 0xc0, 0xbf, 0xb4, 0x98, 0xfd, 0xc0, 0x09, 0x3a, 0x0a, 0xbc, 0x0e, 0x23, 0x91, 0x4c, 0x59, + 0xda, 0x25, 0x3f, 0xa7, 0xd6, 0x29, 0x66, 0x43, 0xa1, 0x38, 0x56, 0x95, 0x9d, 0x54, 0x91, 0xb1, + 0xff, 0x09, 0x9f, 0x01, 0x09, 0x39, 0x01, 0x1d, 0x40, 0xd9, 0xd4, 0x01, 0xcc, 0xf7, 0xf8, 0x82, + 0x1c, 0x5d, 0xc0, 0x3f, 0x36, 0xfb, 0xcd, 0x24, 0x35, 0xef, 0x76, 0xc3, 0x15, 0xfb, 0x0b, 0x16, + 0x40, 0x1c, 0x10, 0x97, 0xc9, 0x97, 0xfd, 0x40, 0x66, 0x3a, 0xcc, 0xca, 0xe9, 0xf3, 0x12, 0xe5, + 0x51, 0xfd, 0xc8, 0xaf, 0xfb, 0x4d, 0xa1, 0xe1, 0x7a, 0x2c, 0x56, 0x43, 0xf0, 0xf2, 0x43, 0xed, + 0x37, 0x56, 0xd8, 0x68, 0x5e, 0x86, 0xdf, 0x2a, 0xc6, 0x8a, 0x31, 0x23, 0xf4, 0xd6, 0x8f, 0x58, + 0x70, 0x2a, 0xcb, 0xea, 0x94, 0xbe, 0x78, 0xb8, 0xcc, 0x4a, 0x19, 0x15, 0xa9, 0xd9, 0xbc, 0x2d, + 0xca, 0xb1, 0xc2, 0xe8, 0x3b, 0x7f, 0xd7, 0xd1, 0x22, 0xd1, 0xde, 0x84, 0xf1, 0x6a, 0x40, 0xb4, + 0xcb, 0xf5, 0x55, 0xee, 0xd2, 0xcd, 0xfb, 0xf3, 0xcc, 0x91, 0xdd, 0xb9, 0xed, 0x2f, 0x17, 0xe0, + 0x14, 0xb7, 0x0a, 0x58, 0xdc, 0xf3, 0xdd, 0x46, 0xd5, 0x6f, 0x88, 0xdc, 0x6b, 0x9f, 0x82, 0xb1, + 0xb6, 0x26, 0x68, 0xec, 0x16, 0x55, 0x51, 0x17, 0x48, 0xc6, 0xa2, 0x11, 0xbd, 0x14, 0x1b, 0xb4, + 0x50, 0x03, 0xc6, 0xc8, 0x9e, 0x5b, 0x57, 0xaa, 0xe5, 0xc2, 0x91, 0x2f, 0x3a, 0xd5, 0xca, 0x8a, + 0x46, 0x07, 0x1b, 0x54, 0x1f, 0x42, 0x56, 0x5d, 0xfb, 0x47, 0x2d, 0x78, 0x24, 0x27, 0x06, 0x23, + 0x6d, 0xee, 0x2e, 0xb3, 0xbf, 0x10, 0xcb, 0x56, 0x35, 0xc7, 0xad, 0x32, 0xb0, 0x80, 0xa2, 0x4f, + 0x00, 0x70, 0xab, 0x0a, 0xfa, 0xe4, 0xee, 0x15, 0xac, 0xce, 0x88, 0xb3, 0xa5, 0x85, 0x4c, 0x92, + 0xf5, 0xb1, 0x46, 0xcb, 0xfe, 0xa9, 0x22, 0x0c, 0xf2, 0x04, 0xe9, 0xab, 0x30, 0xbc, 0xc3, 0x33, + 0x52, 0xf4, 0x93, 0xfc, 0x22, 0x16, 0x86, 0xf0, 0x02, 0x2c, 0x2b, 0xa3, 0x35, 0x98, 0xe1, 0x19, + 0x3d, 0x9a, 0x65, 0xd2, 0x74, 0xf6, 0xa5, 0xe4, 0x8e, 0x67, 0xc3, 0x54, 0x12, 0xcc, 0x4a, 0x1a, + 0x05, 0x67, 0xd5, 0x43, 0xaf, 0xc2, 0x04, 0x7d, 0x49, 0xf9, 0x9d, 0x48, 0x52, 0xe2, 0xb9, 0x3c, + 0xd4, 0xd3, 0x6d, 0xc3, 0x80, 0xe2, 0x04, 0x36, 0x7d, 0xcc, 0xb7, 0x53, 0x32, 0xca, 0xc1, 0xf8, + 0x31, 0x6f, 0xca, 0x25, 0x4d, 0x5c, 0x66, 0x6e, 0xda, 0x61, 0xc6, 0xb5, 0x1b, 0x3b, 0x01, 0x09, + 0x77, 0xfc, 0x66, 0x83, 0x31, 0x7d, 0x83, 0x9a, 0xb9, 0x69, 0x02, 0x8e, 0x53, 0x35, 0x28, 0x95, + 0x2d, 0xc7, 0x6d, 0x76, 0x02, 0x12, 0x53, 0x19, 0x32, 0xa9, 0xac, 0x26, 0xe0, 0x38, 0x55, 0x83, + 0xae, 0xa3, 0xd3, 0xd5, 0xc0, 0xa7, 0x07, 0xa9, 0x0c, 0x2c, 0xa3, 0x6c, 0x88, 0x87, 0xa5, 0x0f, + 0x6c, 0x97, 0x10, 0x6c, 0xc2, 0xca, 0x92, 0x53, 0x30, 0x0c, 0x08, 0x6a, 0xc2, 0xfb, 0x55, 0x52, + 0x41, 0x57, 0x60, 0x54, 0xe4, 0x69, 0x60, 0xa6, 0xae, 0x7c, 0xea, 0x98, 0xc1, 0x43, 0x39, 0x2e, + 0xc6, 0x3a, 0x8e, 0xfd, 0x7d, 0x05, 0x98, 0xc9, 0xf0, 0x55, 0xe0, 0x47, 0xd5, 0xb6, 0x1b, 0x46, + 0x2a, 0xe3, 0x9f, 0x76, 0x54, 0xf1, 0x72, 0xac, 0x30, 0xe8, 0x7e, 0xe0, 0x87, 0x61, 0xf2, 0x00, + 0x14, 0xb6, 0xc0, 0x02, 0x7a, 0xc4, 0xdc, 0x79, 0x17, 0x60, 0xa0, 0x13, 0x12, 0x19, 0x3c, 0x51, + 0x5d, 0x0d, 0x4c, 0x0f, 0xc6, 0x20, 0x94, 0x55, 0xdf, 0x56, 0x2a, 0x25, 0x8d, 0x55, 0xe7, 0x4a, + 0x25, 0x0e, 0xa3, 0x9d, 0x8b, 0x88, 0xe7, 0x78, 0x91, 0x60, 0xe8, 0xe3, 0x28, 0x60, 0xac, 0x14, + 0x0b, 0xa8, 0xfd, 0xc5, 0x22, 0x9c, 0xcd, 0xf5, 0x5e, 0xa2, 0x5d, 0x6f, 0xf9, 0x9e, 0x1b, 0xf9, + 0xca, 0x92, 0x84, 0x47, 0xfe, 0x22, 0xed, 0x9d, 0x35, 0x51, 0x8e, 0x15, 0x06, 0xba, 0x08, 0x83, + 0x4c, 0x8a, 0x96, 0xca, 0x7d, 0xb8, 0x54, 0xe6, 0xa1, 0x60, 0x38, 0xb8, 0xef, 0xbc, 0xb2, 0x4f, + 0xd0, 0x5b, 0xd2, 0x6f, 0x26, 0x0f, 0x2d, 0xda, 0x5d, 0xdf, 0x6f, 0x62, 0x06, 0x44, 0x1f, 0x10, + 0xe3, 0x95, 0x30, 0x9d, 0xc0, 0x4e, 0xc3, 0x0f, 0xb5, 0x41, 0x7b, 0x0a, 0x86, 0x77, 0xc9, 0x7e, + 0xe0, 0x7a, 0xdb, 0x49, 0x93, 0x9a, 0xeb, 0xbc, 0x18, 0x4b, 0xb8, 0x99, 0xc6, 0x6a, 0xf8, 0xb8, + 0x13, 0xc2, 0x8e, 0xf4, 0xbc, 0x02, 0x7f, 0xa0, 0x08, 0x93, 0x78, 0xa9, 0xfc, 0xde, 0x44, 0xdc, + 0x4a, 0x4f, 0xc4, 0x71, 0x27, 0x84, 0xed, 0x3d, 0x1b, 0x3f, 0x6f, 0xc1, 0x24, 0xcb, 0x16, 0x21, + 0x62, 0x46, 0xb9, 0xbe, 0x77, 0x02, 0xec, 0xe6, 0x13, 0x30, 0x18, 0xd0, 0x46, 0x93, 0x49, 0x0f, + 0x59, 0x4f, 0x30, 0x87, 0xa1, 0xc7, 0x60, 0x80, 0x75, 0x81, 0x4e, 0xde, 0x18, 0xcf, 0x17, 0x55, + 0x76, 0x22, 0x07, 0xb3, 0x52, 0x16, 0x08, 0x05, 0x93, 0x76, 0xd3, 0xe5, 0x9d, 0x8e, 0x75, 0x9c, + 0xef, 0x0e, 0xbf, 0xe6, 0xcc, 0xae, 0xbd, 0xb3, 0x40, 0x28, 0xd9, 0x24, 0xbb, 0x3f, 0xe5, 0xfe, + 0xa8, 0x00, 0xe7, 0x33, 0xeb, 0xf5, 0x1d, 0x08, 0xa5, 0x7b, 0xed, 0x87, 0x99, 0x0f, 0xa0, 0x78, + 0x82, 0x06, 0x8b, 0x03, 0xfd, 0x72, 0x98, 0x83, 0x7d, 0xc4, 0x27, 0xc9, 0x1c, 0xb2, 0x77, 0x49, + 0x7c, 0x92, 0xcc, 0xbe, 0xe5, 0x3c, 0x45, 0xff, 0xa2, 0x90, 0xf3, 0x2d, 0xec, 0x51, 0x7a, 0x89, + 0x9e, 0x33, 0x0c, 0x18, 0xca, 0x87, 0x1e, 0x3f, 0x63, 0x78, 0x19, 0x56, 0x50, 0xb4, 0x08, 0x93, + 0x2d, 0xd7, 0xa3, 0x87, 0xcf, 0xbe, 0xc9, 0xf8, 0xa9, 0xf0, 0x51, 0x6b, 0x26, 0x18, 0x27, 0xf1, + 0x91, 0xab, 0xc5, 0x2e, 0x29, 0xe4, 0xa7, 0x11, 0xcf, 0xed, 0xed, 0x82, 0xa9, 0xff, 0x55, 0xa3, + 0x98, 0x11, 0xc7, 0x64, 0x4d, 0x93, 0x45, 0x14, 0xfb, 0x97, 0x45, 0x8c, 0x65, 0xcb, 0x21, 0xe6, + 0x5e, 0x81, 0xf1, 0x07, 0x16, 0x3e, 0xdb, 0x5f, 0x2d, 0xc2, 0xa3, 0x5d, 0xb6, 0x3d, 0x3f, 0xeb, + 0x8d, 0x39, 0xd0, 0xce, 0xfa, 0xd4, 0x3c, 0x54, 0xe1, 0xd4, 0x56, 0xa7, 0xd9, 0xdc, 0x67, 0x3e, + 0x01, 0xa4, 0x21, 0x31, 0x04, 0x4f, 0x29, 0x1f, 0xe0, 0xa7, 0x56, 0x33, 0x70, 0x70, 0x66, 0x4d, + 0xca, 0xd0, 0xd3, 0x9b, 0x64, 0x5f, 0x91, 0x4a, 0x30, 0xf4, 0x58, 0x07, 0x62, 0x13, 0x17, 0x5d, + 0x85, 0x69, 0x67, 0xcf, 0x71, 0x79, 0x00, 0x58, 0x49, 0x80, 0x73, 0xf4, 0x4a, 0x66, 0xb8, 0x98, + 0x44, 0xc0, 0xe9, 0x3a, 0xe8, 0x35, 0x40, 0xfe, 0x26, 0xb3, 0xf6, 0x6d, 0x5c, 0x25, 0x9e, 0x50, + 0xd3, 0xb1, 0xb9, 0x2b, 0xc6, 0x47, 0xc2, 0xcd, 0x14, 0x06, 0xce, 0xa8, 0x95, 0x88, 0x05, 0x32, + 0x94, 0x1f, 0x0b, 0xa4, 0xfb, 0xb9, 0xd8, 0x33, 0x15, 0xc5, 0x7f, 0xb1, 0xe8, 0xf5, 0xc5, 0x99, + 0x7c, 0x33, 0xa4, 0xdd, 0x2b, 0xcc, 0xcc, 0x8e, 0xcb, 0x13, 0xb5, 0x08, 0x16, 0xa7, 0x35, 0x33, + 0xbb, 0x18, 0x88, 0x4d, 0x5c, 0xbe, 0x20, 0xc2, 0xd8, 0x71, 0xd2, 0x60, 0xf1, 0x45, 0xdc, 0x1d, + 0x85, 0x81, 0x3e, 0x09, 0xc3, 0x0d, 0x77, 0xcf, 0x0d, 0x85, 0x34, 0xe5, 0xc8, 0xaa, 0x8b, 0xf8, + 0x1c, 0x2c, 0x73, 0x32, 0x58, 0xd2, 0xb3, 0x7f, 0xa0, 0x00, 0xe3, 0xb2, 0xc5, 0xd7, 0x3b, 0x7e, + 0xe4, 0x9c, 0xc0, 0xb5, 0x7c, 0xd5, 0xb8, 0x96, 0x3f, 0xd0, 0x2d, 0xf8, 0x10, 0xeb, 0x52, 0xee, + 0x75, 0x7c, 0x33, 0x71, 0x1d, 0x3f, 0xd9, 0x9b, 0x54, 0xf7, 0x6b, 0xf8, 0x9f, 0x5a, 0x30, 0x6d, + 0xe0, 0x9f, 0xc0, 0x6d, 0xb0, 0x6a, 0xde, 0x06, 0x8f, 0xf7, 0xfc, 0x86, 0x9c, 0x5b, 0xe0, 0xbb, + 0x8b, 0x89, 0xbe, 0xb3, 0xd3, 0xff, 0x2d, 0x18, 0xd8, 0x71, 0x82, 0x46, 0xb7, 0x60, 0xeb, 0xa9, + 0x4a, 0x0b, 0xd7, 0x9c, 0x40, 0xe8, 0x29, 0x9f, 0x51, 0x59, 0xbc, 0x9d, 0xa0, 0xb7, 0x8e, 0x92, + 0x35, 0x85, 0x5e, 0x82, 0xa1, 0xb0, 0xee, 0xb7, 0x95, 0x15, 0xff, 0x05, 0x9e, 0xe1, 0x9b, 0x96, + 0x1c, 0x1e, 0xcc, 0x23, 0xb3, 0x39, 0x5a, 0x8c, 0x05, 0x3e, 0xfa, 0x14, 0x8c, 0xb3, 0x5f, 0xca, + 0x68, 0xa8, 0x98, 0x9f, 0x98, 0xa9, 0xa6, 0x23, 0x72, 0x8b, 0x3a, 0xa3, 0x08, 0x9b, 0xa4, 0xe6, + 0xb6, 0xa1, 0xa4, 0x3e, 0xeb, 0xa1, 0xea, 0x06, 0xff, 0x63, 0x11, 0x66, 0x32, 0xd6, 0x1c, 0x0a, + 0x8d, 0x99, 0xb8, 0xd2, 0xe7, 0x52, 0x7d, 0x87, 0x73, 0x11, 0xb2, 0xd7, 0x50, 0x43, 0xac, 0xad, + 0xbe, 0x1b, 0xbd, 0x15, 0x92, 0x64, 0xa3, 0xb4, 0xa8, 0x77, 0xa3, 0xb4, 0xb1, 0x13, 0x1b, 0x6a, + 0xda, 0x90, 0xea, 0xe9, 0x43, 0x9d, 0xd3, 0x3f, 0x2d, 0xc2, 0xa9, 0xac, 0x78, 0x68, 0xe8, 0x73, + 0x89, 0x54, 0x7f, 0x2f, 0xf4, 0x1b, 0x49, 0x8d, 0xe7, 0xff, 0xe3, 0x32, 0xe0, 0xa5, 0x05, 0x33, + 0xf9, 0x5f, 0xcf, 0x61, 0x16, 0x6d, 0xb2, 0xa0, 0x00, 0x01, 0x4f, 0xd1, 0x28, 0x8f, 0x8f, 0x0f, + 0xf7, 0xdd, 0x01, 0x91, 0xdb, 0x31, 0x4c, 0x18, 0x24, 0xc8, 0xe2, 0xde, 0x06, 0x09, 0xb2, 0xe5, + 0x39, 0x17, 0x46, 0xb5, 0xaf, 0x79, 0xa8, 0x33, 0xbe, 0x4b, 0x6f, 0x2b, 0xad, 0xdf, 0x0f, 0x75, + 0xd6, 0x7f, 0xd4, 0x82, 0x84, 0x8d, 0xba, 0x12, 0x8b, 0x59, 0xb9, 0x62, 0xb1, 0x0b, 0x30, 0x10, + 0xf8, 0x4d, 0x92, 0xcc, 0x89, 0x87, 0xfd, 0x26, 0xc1, 0x0c, 0x42, 0x31, 0xa2, 0x58, 0xd8, 0x31, + 0xa6, 0x3f, 0xe4, 0xc4, 0x13, 0xed, 0x09, 0x18, 0x6c, 0x92, 0x3d, 0xd2, 0x4c, 0xa6, 0x2e, 0xb9, + 0x41, 0x0b, 0x31, 0x87, 0xd9, 0x3f, 0x3f, 0x00, 0xe7, 0xba, 0x86, 0xd5, 0xa0, 0xcf, 0xa1, 0x6d, + 0x27, 0x22, 0x77, 0x9d, 0xfd, 0x64, 0x8e, 0x81, 0xab, 0xbc, 0x18, 0x4b, 0x38, 0xf3, 0x22, 0xe2, + 0xa1, 0x82, 0x13, 0x42, 0x44, 0x11, 0x21, 0x58, 0x40, 0x4d, 0xa1, 0x54, 0xf1, 0x38, 0x84, 0x52, + 0xcf, 0x01, 0x84, 0x61, 0x93, 0x5b, 0xf2, 0x34, 0x84, 0x7b, 0x52, 0x1c, 0x52, 0xba, 0x76, 0x43, + 0x40, 0xb0, 0x86, 0x85, 0xca, 0x30, 0xd5, 0x0e, 0xfc, 0x88, 0xcb, 0x64, 0xcb, 0xdc, 0xd8, 0x6d, + 0xd0, 0x8c, 0x68, 0x50, 0x4d, 0xc0, 0x71, 0xaa, 0x06, 0x7a, 0x11, 0x46, 0x45, 0x94, 0x83, 0xaa, + 0xef, 0x37, 0x85, 0x18, 0x48, 0xd9, 0x7f, 0xd5, 0x62, 0x10, 0xd6, 0xf1, 0xb4, 0x6a, 0x4c, 0xd0, + 0x3b, 0x9c, 0x59, 0x8d, 0x0b, 0x7b, 0x35, 0xbc, 0x44, 0x6c, 0xc4, 0x91, 0xbe, 0x62, 0x23, 0xc6, + 0x82, 0xb1, 0x52, 0xdf, 0xba, 0x2d, 0xe8, 0x29, 0x4a, 0xfa, 0x99, 0x01, 0x98, 0x11, 0x0b, 0xe7, + 0x61, 0x2f, 0x97, 0x5b, 0xe9, 0xe5, 0x72, 0x1c, 0xa2, 0xb3, 0xf7, 0xd6, 0xcc, 0x49, 0xaf, 0x99, + 0x1f, 0xb4, 0xc0, 0x64, 0xaf, 0xd0, 0x5f, 0xca, 0x4d, 0xd2, 0xf2, 0x62, 0x2e, 0xbb, 0xd6, 0x90, + 0x17, 0xc8, 0x3b, 0x4c, 0xd7, 0x62, 0xff, 0x67, 0x0b, 0x1e, 0xef, 0x49, 0x11, 0xad, 0x40, 0x89, + 0xf1, 0x80, 0xda, 0xeb, 0xec, 0x49, 0x65, 0x0c, 0x2b, 0x01, 0x39, 0x2c, 0x69, 0x5c, 0x13, 0xad, + 0xa4, 0xb2, 0xe1, 0x3c, 0x95, 0x91, 0x0d, 0xe7, 0xb4, 0x31, 0x3c, 0x0f, 0x98, 0x0e, 0xe7, 0xfb, + 0xe9, 0x8d, 0x63, 0x38, 0xa2, 0xa0, 0x0f, 0x1b, 0x62, 0x3f, 0x3b, 0x21, 0xf6, 0x43, 0x26, 0xb6, + 0x76, 0x87, 0x7c, 0x1c, 0xa6, 0x58, 0xf8, 0x23, 0x66, 0x9a, 0x2d, 0x5c, 0x64, 0x0a, 0xb1, 0xf9, + 0xe5, 0x8d, 0x04, 0x0c, 0xa7, 0xb0, 0xed, 0x3f, 0x2c, 0xc2, 0x10, 0xdf, 0x7e, 0x27, 0xf0, 0x26, + 0x7c, 0x1a, 0x4a, 0x6e, 0xab, 0xd5, 0xe1, 0x09, 0x4e, 0x06, 0xb9, 0x5f, 0x2c, 0x9d, 0xa7, 0x8a, + 0x2c, 0xc4, 0x31, 0x1c, 0xad, 0x0a, 0x89, 0x73, 0x97, 0x08, 0x8b, 0xbc, 0xe3, 0x0b, 0x65, 0x27, + 0x72, 0x38, 0x83, 0xa3, 0xee, 0xd9, 0x58, 0x36, 0x8d, 0x3e, 0x03, 0x10, 0x46, 0x81, 0xeb, 0x6d, + 0xd3, 0x32, 0x11, 0x50, 0xf4, 0x83, 0x5d, 0xa8, 0xd5, 0x14, 0x32, 0xa7, 0x19, 0x9f, 0x39, 0x0a, + 0x80, 0x35, 0x8a, 0x68, 0xc1, 0xb8, 0xe9, 0xe7, 0x12, 0x73, 0x07, 0x9c, 0x6a, 0x3c, 0x67, 0x73, + 0x1f, 0x81, 0x92, 0x22, 0xde, 0x4b, 0xfe, 0x34, 0xa6, 0xb3, 0x45, 0x1f, 0x83, 0xc9, 0x44, 0xdf, + 0x8e, 0x24, 0xbe, 0xfa, 0x05, 0x0b, 0x26, 0x79, 0x67, 0x56, 0xbc, 0x3d, 0x71, 0x1b, 0xbc, 0x0d, + 0xa7, 0x9a, 0x19, 0xa7, 0xb2, 0x98, 0xfe, 0xfe, 0x4f, 0x71, 0x25, 0xae, 0xca, 0x82, 0xe2, 0xcc, + 0x36, 0xd0, 0x25, 0xba, 0xe3, 0xe8, 0xa9, 0xeb, 0x34, 0x85, 0x9b, 0xec, 0x18, 0xdf, 0x6d, 0xbc, + 0x0c, 0x2b, 0xa8, 0xfd, 0x3b, 0x16, 0x4c, 0xf3, 0x9e, 0x5f, 0x27, 0xfb, 0xea, 0x6c, 0xfa, 0x7a, + 0xf6, 0x5d, 0xa4, 0xd6, 0x2a, 0xe4, 0xa4, 0xd6, 0xd2, 0x3f, 0xad, 0xd8, 0xf5, 0xd3, 0xbe, 0x6c, + 0x81, 0x58, 0x21, 0x27, 0x20, 0x84, 0xf8, 0x56, 0x53, 0x08, 0x31, 0x97, 0xbf, 0x09, 0x72, 0xa4, + 0x0f, 0x7f, 0x6e, 0xc1, 0x14, 0x47, 0x88, 0xb5, 0xe5, 0x5f, 0xd7, 0x79, 0xe8, 0x27, 0x01, 0xef, + 0x75, 0xb2, 0xbf, 0xe1, 0x57, 0x9d, 0x68, 0x27, 0xfb, 0xa3, 0x8c, 0xc9, 0x1a, 0xe8, 0x3a, 0x59, + 0x0d, 0xb9, 0x81, 0x8e, 0x90, 0xd5, 0xfb, 0xc8, 0x99, 0x27, 0xec, 0xaf, 0x59, 0x80, 0x78, 0x33, + 0x06, 0xe3, 0x46, 0xd9, 0x21, 0x56, 0xaa, 0x5d, 0x74, 0xf1, 0xd1, 0xa4, 0x20, 0x58, 0xc3, 0x3a, + 0x96, 0xe1, 0x49, 0x98, 0x3c, 0x14, 0x7b, 0x9b, 0x3c, 0x1c, 0x61, 0x44, 0xff, 0xed, 0x10, 0x24, + 0x9d, 0x71, 0xd0, 0x6d, 0x18, 0xab, 0x3b, 0x6d, 0x67, 0xd3, 0x6d, 0xba, 0x91, 0x4b, 0xc2, 0x6e, + 0xb6, 0x52, 0xcb, 0x1a, 0x9e, 0x50, 0x52, 0x6b, 0x25, 0xd8, 0xa0, 0x83, 0x16, 0x00, 0xda, 0x81, + 0xbb, 0xe7, 0x36, 0xc9, 0x36, 0x93, 0x95, 0x30, 0xc7, 0x7c, 0x6e, 0x00, 0x24, 0x4b, 0xb1, 0x86, + 0x91, 0xe1, 0xf9, 0x5c, 0x7c, 0xc8, 0x9e, 0xcf, 0x70, 0x62, 0x9e, 0xcf, 0x03, 0x47, 0xf2, 0x7c, + 0x1e, 0x39, 0xb2, 0xe7, 0xf3, 0x60, 0x5f, 0x9e, 0xcf, 0x18, 0xce, 0x48, 0xde, 0x93, 0xfe, 0x5f, + 0x75, 0x9b, 0x44, 0x3c, 0x38, 0x78, 0x34, 0x81, 0xb9, 0xfb, 0x07, 0xf3, 0x67, 0x70, 0x26, 0x06, + 0xce, 0xa9, 0x89, 0x3e, 0x01, 0xb3, 0x4e, 0xb3, 0xe9, 0xdf, 0x55, 0x93, 0xba, 0x12, 0xd6, 0x9d, + 0x26, 0x57, 0x42, 0x0c, 0x33, 0xaa, 0x8f, 0xdd, 0x3f, 0x98, 0x9f, 0x5d, 0xcc, 0xc1, 0xc1, 0xb9, + 0xb5, 0xd1, 0x47, 0xa1, 0xd4, 0x0e, 0xfc, 0xfa, 0x9a, 0xe6, 0x31, 0x78, 0x9e, 0x0e, 0x60, 0x55, + 0x16, 0x1e, 0x1e, 0xcc, 0x8f, 0xab, 0x3f, 0xec, 0xc2, 0x8f, 0x2b, 0x64, 0xb8, 0x32, 0x8f, 0x1e, + 0xab, 0x2b, 0xf3, 0x2e, 0xcc, 0xd4, 0x48, 0xe0, 0xb2, 0x1c, 0xe0, 0x8d, 0xf8, 0x7c, 0xda, 0x80, + 0x52, 0x90, 0x38, 0x91, 0xfb, 0x8a, 0x7a, 0xa8, 0xa5, 0x00, 0x90, 0x27, 0x70, 0x4c, 0xc8, 0xfe, + 0x3f, 0x16, 0x0c, 0x0b, 0xe7, 0x9b, 0x13, 0xe0, 0x1a, 0x17, 0x0d, 0x4d, 0xc2, 0x7c, 0xf6, 0x80, + 0xb1, 0xce, 0xe4, 0xea, 0x10, 0x2a, 0x09, 0x1d, 0xc2, 0xe3, 0xdd, 0x88, 0x74, 0xd7, 0x1e, 0xfc, + 0xcd, 0x22, 0xe5, 0xde, 0x0d, 0x37, 0xd0, 0x87, 0x3f, 0x04, 0xeb, 0x30, 0x1c, 0x0a, 0x37, 0xc4, + 0x42, 0xbe, 0xdd, 0x7c, 0x72, 0x12, 0x63, 0x3b, 0x36, 0xe1, 0x78, 0x28, 0x89, 0x64, 0xfa, 0x37, + 0x16, 0x1f, 0xa2, 0x7f, 0x63, 0x2f, 0x47, 0xd9, 0x81, 0xe3, 0x70, 0x94, 0xb5, 0xbf, 0xc2, 0x6e, + 0x4e, 0xbd, 0xfc, 0x04, 0x98, 0xaa, 0xab, 0xe6, 0x1d, 0x6b, 0x77, 0x59, 0x59, 0xa2, 0x53, 0x39, + 0xcc, 0xd5, 0xcf, 0x59, 0x70, 0x2e, 0xe3, 0xab, 0x34, 0x4e, 0xeb, 0x19, 0x18, 0x71, 0x3a, 0x0d, + 0x57, 0xed, 0x65, 0x4d, 0x9f, 0xb8, 0x28, 0xca, 0xb1, 0xc2, 0x40, 0xcb, 0x30, 0x4d, 0xee, 0xb5, + 0x5d, 0xae, 0x4a, 0xd5, 0x8d, 0x4d, 0x8b, 0xdc, 0x63, 0x6b, 0x25, 0x09, 0xc4, 0x69, 0x7c, 0x15, + 0x9c, 0xa4, 0x98, 0x1b, 0x9c, 0xe4, 0x1f, 0x58, 0x30, 0xaa, 0x1c, 0xf1, 0x1e, 0xfa, 0x68, 0x7f, + 0xdc, 0x1c, 0xed, 0x47, 0xbb, 0x8c, 0x76, 0xce, 0x30, 0xff, 0x56, 0x41, 0xf5, 0xb7, 0xea, 0x07, + 0x51, 0x1f, 0x1c, 0xdc, 0x83, 0x9b, 0xc7, 0x5f, 0x81, 0x51, 0xa7, 0xdd, 0x96, 0x00, 0x69, 0x83, + 0xc6, 0x62, 0xd8, 0xc6, 0xc5, 0x58, 0xc7, 0x51, 0xd6, 0xfa, 0xc5, 0x5c, 0x6b, 0xfd, 0x06, 0x40, + 0xe4, 0x04, 0xdb, 0x24, 0xa2, 0x65, 0x22, 0x90, 0x58, 0xfe, 0x79, 0xd3, 0x89, 0xdc, 0xe6, 0x82, + 0xeb, 0x45, 0x61, 0x14, 0x2c, 0x54, 0xbc, 0xe8, 0x66, 0xc0, 0x9f, 0x90, 0x5a, 0xa4, 0x1e, 0x45, + 0x0b, 0x6b, 0x74, 0xa5, 0xd3, 0x39, 0x6b, 0x63, 0xd0, 0x34, 0x66, 0x58, 0x17, 0xe5, 0x58, 0x61, + 0xd8, 0x1f, 0x61, 0xb7, 0x0f, 0x1b, 0xd3, 0xa3, 0x85, 0xb6, 0xf9, 0xf2, 0xa8, 0x9a, 0x0d, 0xa6, + 0xc9, 0x2c, 0xeb, 0x01, 0x74, 0xba, 0x1f, 0xf6, 0xb4, 0x61, 0xdd, 0x77, 0x2c, 0x8e, 0xb2, 0x83, + 0xbe, 0x2d, 0x65, 0xa0, 0xf2, 0x6c, 0x8f, 0x5b, 0xe3, 0x08, 0x26, 0x29, 0x2c, 0xa1, 0x05, 0x0b, + 0xf7, 0x5f, 0xa9, 0x8a, 0x7d, 0xa1, 0x25, 0xb4, 0x10, 0x00, 0x1c, 0xe3, 0x50, 0x66, 0x4a, 0xfd, + 0x09, 0x67, 0x51, 0x1c, 0xd8, 0x51, 0x61, 0x87, 0x58, 0xc3, 0x40, 0x97, 0x85, 0x40, 0x81, 0xeb, + 0x05, 0x1e, 0x4d, 0x08, 0x14, 0xe4, 0x70, 0x69, 0x52, 0xa0, 0x2b, 0x30, 0xaa, 0x72, 0xda, 0x56, + 0x79, 0xaa, 0x54, 0xb1, 0xcc, 0x56, 0xe2, 0x62, 0xac, 0xe3, 0xa0, 0x0d, 0x98, 0x0c, 0xb9, 0x9c, + 0x4d, 0x45, 0xdb, 0xe5, 0xf2, 0xca, 0x0f, 0x4a, 0x2b, 0xa0, 0x9a, 0x09, 0x3e, 0x64, 0x45, 0xfc, + 0x74, 0x92, 0x8e, 0xe1, 0x49, 0x12, 0xe8, 0x55, 0x98, 0x68, 0xfa, 0x4e, 0x63, 0xc9, 0x69, 0x3a, + 0x5e, 0x9d, 0x8d, 0xcf, 0x88, 0x99, 0x1a, 0xf1, 0x86, 0x01, 0xc5, 0x09, 0x6c, 0xca, 0xbc, 0xe9, + 0x25, 0x22, 0x42, 0xb4, 0xe3, 0x6d, 0x93, 0x50, 0x64, 0x28, 0x65, 0xcc, 0xdb, 0x8d, 0x1c, 0x1c, + 0x9c, 0x5b, 0x1b, 0xbd, 0x04, 0x63, 0xf2, 0xf3, 0xb5, 0x38, 0x0a, 0xb1, 0xe3, 0x83, 0x06, 0xc3, + 0x06, 0x26, 0xba, 0x0b, 0xa7, 0xe5, 0xff, 0x8d, 0xc0, 0xd9, 0xda, 0x72, 0xeb, 0xc2, 0xb9, 0x98, + 0x7b, 0x48, 0x2e, 0x4a, 0x37, 0xbe, 0x95, 0x2c, 0xa4, 0xc3, 0x83, 0xf9, 0x0b, 0x62, 0xd4, 0x32, + 0xe1, 0x6c, 0x12, 0xb3, 0xe9, 0xa3, 0x35, 0x98, 0xd9, 0x21, 0x4e, 0x33, 0xda, 0x59, 0xde, 0x21, + 0xf5, 0x5d, 0xb9, 0xe9, 0x58, 0x74, 0x06, 0xcd, 0x5d, 0xe0, 0x5a, 0x1a, 0x05, 0x67, 0xd5, 0x43, + 0x6f, 0xc0, 0x6c, 0xbb, 0xb3, 0xd9, 0x74, 0xc3, 0x9d, 0x75, 0x3f, 0x62, 0xa6, 0x40, 0x2a, 0x45, + 0xae, 0x08, 0xe3, 0xa0, 0xe2, 0x5f, 0x54, 0x73, 0xf0, 0x70, 0x2e, 0x05, 0xf4, 0x36, 0x9c, 0x4e, + 0x2c, 0x06, 0xe1, 0xc8, 0x3e, 0x91, 0x1f, 0x6f, 0xbf, 0x96, 0x55, 0x41, 0xc4, 0x84, 0xc8, 0x02, + 0xe1, 0xec, 0x26, 0xe8, 0xe3, 0x43, 0x0b, 0x70, 0x1a, 0xce, 0x4e, 0xc5, 0x36, 0xcb, 0x5a, 0x14, + 0xd4, 0x10, 0x1b, 0x58, 0xe8, 0x65, 0x00, 0xb7, 0xbd, 0xea, 0xb4, 0xdc, 0x26, 0x7d, 0x64, 0xce, + 0xb0, 0x3a, 0xf4, 0xc1, 0x01, 0x95, 0xaa, 0x2c, 0xa5, 0xa7, 0xba, 0xf8, 0xb7, 0x8f, 0x35, 0x6c, + 0x54, 0x85, 0x09, 0xf1, 0x6f, 0x5f, 0x2c, 0x86, 0x69, 0xe5, 0x69, 0x3e, 0x21, 0x6b, 0xa8, 0x15, + 0x80, 0xcc, 0x12, 0x36, 0xe7, 0x89, 0xfa, 0x68, 0x1b, 0xce, 0x89, 0x1c, 0xcc, 0x44, 0x5f, 0xdd, + 0x72, 0xf6, 0x42, 0x16, 0x1e, 0x7f, 0x84, 0x07, 0x90, 0x59, 0xec, 0x86, 0x88, 0xbb, 0xd3, 0x79, + 0x67, 0x16, 0x70, 0xbf, 0x6d, 0xd1, 0xda, 0x1a, 0x97, 0x8c, 0x3e, 0x0b, 0x63, 0xfa, 0x9e, 0x13, + 0x37, 0xfe, 0xc5, 0x6c, 0x26, 0x52, 0xdb, 0x9b, 0x9c, 0xc7, 0x56, 0xfb, 0x4f, 0x87, 0x61, 0x83, + 0x22, 0xaa, 0x67, 0xb8, 0x51, 0x5f, 0xee, 0x8f, 0xa3, 0xe8, 0xdf, 0x00, 0x8c, 0x40, 0xf6, 0x92, + 0x43, 0x37, 0x60, 0xa4, 0xde, 0x74, 0x89, 0x17, 0x55, 0xaa, 0xdd, 0x02, 0x9f, 0x2d, 0x0b, 0x1c, + 0xb1, 0x86, 0x45, 0xcc, 0x78, 0x5e, 0x86, 0x15, 0x05, 0xfb, 0x57, 0x0b, 0x30, 0xdf, 0x23, 0x01, + 0x41, 0x42, 0x1d, 0x64, 0xf5, 0xa5, 0x0e, 0x5a, 0x94, 0x39, 0x98, 0xd7, 0x13, 0x92, 0xa6, 0x44, + 0x7e, 0xe5, 0x58, 0xde, 0x94, 0xc4, 0xef, 0xdb, 0x3c, 0x5f, 0xd7, 0x28, 0x0d, 0xf4, 0x74, 0x30, + 0x31, 0x34, 0xc9, 0x83, 0xfd, 0x3f, 0x3f, 0x73, 0xb5, 0x82, 0xf6, 0x57, 0x0a, 0x70, 0x5a, 0x0d, + 0xe1, 0x37, 0xef, 0xc0, 0xdd, 0x4a, 0x0f, 0xdc, 0x31, 0xe8, 0x54, 0xed, 0x9b, 0x30, 0xc4, 0x23, + 0xb9, 0xf5, 0xc1, 0xf6, 0x3e, 0x61, 0x06, 0x3d, 0x55, 0x9c, 0x96, 0x11, 0xf8, 0xf4, 0x7b, 0x2d, + 0x98, 0xdc, 0x58, 0xae, 0xd6, 0xfc, 0xfa, 0x2e, 0x89, 0x16, 0xf9, 0x33, 0x05, 0x6b, 0x0e, 0xa7, + 0x0f, 0xc2, 0x9a, 0x66, 0x31, 0xbd, 0x17, 0x60, 0x60, 0xc7, 0x0f, 0xa3, 0xa4, 0xc1, 0xc5, 0x35, + 0x3f, 0x8c, 0x30, 0x83, 0xd8, 0xbf, 0x6b, 0xc1, 0xe0, 0x86, 0xe3, 0x7a, 0x91, 0x14, 0xce, 0x5b, + 0x39, 0xc2, 0xf9, 0x7e, 0xbe, 0x0b, 0xbd, 0x08, 0x43, 0x64, 0x6b, 0x8b, 0xd4, 0x23, 0x31, 0xab, + 0xd2, 0x5b, 0x7f, 0x68, 0x85, 0x95, 0x52, 0x3e, 0x8c, 0x35, 0xc6, 0xff, 0x62, 0x81, 0x8c, 0xee, + 0x40, 0x29, 0x72, 0x5b, 0x64, 0xb1, 0xd1, 0x10, 0x2a, 0xeb, 0x07, 0x88, 0x38, 0xb0, 0x21, 0x09, + 0xe0, 0x98, 0x96, 0xfd, 0xc5, 0x02, 0x40, 0x1c, 0x02, 0xa7, 0xd7, 0x27, 0x2e, 0xa5, 0x94, 0x99, + 0x17, 0x33, 0x94, 0x99, 0x28, 0x26, 0x98, 0xa1, 0xc9, 0x54, 0xc3, 0x54, 0xec, 0x6b, 0x98, 0x06, + 0x8e, 0x32, 0x4c, 0xcb, 0x30, 0x1d, 0x87, 0xf0, 0x31, 0x23, 0x98, 0xb1, 0xa7, 0xe9, 0x46, 0x12, + 0x88, 0xd3, 0xf8, 0x36, 0x81, 0x0b, 0x2a, 0x92, 0x89, 0xb8, 0xd1, 0x98, 0x45, 0xb4, 0xae, 0x1c, + 0xee, 0x31, 0x4e, 0xb1, 0xb6, 0xb6, 0x90, 0xab, 0xad, 0xfd, 0x09, 0x0b, 0x4e, 0x25, 0xdb, 0x61, + 0x2e, 0xaa, 0x5f, 0xb0, 0xe0, 0x34, 0xd3, 0x59, 0xb3, 0x56, 0xd3, 0x1a, 0xf2, 0x17, 0xba, 0x46, + 0x67, 0xc9, 0xe9, 0x71, 0x1c, 0x16, 0x62, 0x2d, 0x8b, 0x34, 0xce, 0x6e, 0xd1, 0xfe, 0x4f, 0x05, + 0x98, 0xcd, 0x0b, 0xeb, 0xc2, 0x1c, 0x26, 0x9c, 0x7b, 0xb5, 0x5d, 0x72, 0x57, 0x98, 0xa5, 0xc7, + 0x0e, 0x13, 0xbc, 0x18, 0x4b, 0x78, 0x32, 0xa6, 0x7c, 0xa1, 0xbf, 0x98, 0xf2, 0x68, 0x07, 0xa6, + 0xef, 0xee, 0x10, 0xef, 0x96, 0x17, 0x3a, 0x91, 0x1b, 0x6e, 0xb9, 0x4c, 0xbf, 0xcb, 0xd7, 0xcd, + 0xcb, 0xd2, 0x78, 0xfc, 0x4e, 0x12, 0xe1, 0xf0, 0x60, 0xfe, 0x9c, 0x51, 0x10, 0x77, 0x99, 0x1f, + 0x24, 0x38, 0x4d, 0x34, 0x1d, 0x92, 0x7f, 0xe0, 0x21, 0x86, 0xe4, 0xb7, 0xbf, 0x60, 0xc1, 0xd9, + 0xdc, 0x3c, 0xa0, 0xe8, 0x12, 0x8c, 0x38, 0x6d, 0x97, 0x8b, 0xc8, 0xc5, 0x31, 0xca, 0x44, 0x31, + 0xd5, 0x0a, 0x17, 0x90, 0x2b, 0xa8, 0xca, 0x4f, 0x5e, 0xc8, 0xcd, 0x4f, 0xde, 0x33, 0xdd, 0xb8, + 0xfd, 0x3d, 0x16, 0x08, 0x67, 0xcf, 0x3e, 0xce, 0xee, 0x4f, 0xc1, 0xd8, 0x5e, 0x3a, 0x6d, 0xcf, + 0x85, 0x7c, 0xef, 0x57, 0x91, 0xac, 0x47, 0x31, 0x64, 0x46, 0x8a, 0x1e, 0x83, 0x96, 0xdd, 0x00, + 0x01, 0x2d, 0x13, 0x26, 0x00, 0xee, 0xdd, 0x9b, 0xe7, 0x00, 0x1a, 0x0c, 0x57, 0x4b, 0xf2, 0xae, + 0x6e, 0xe6, 0xb2, 0x82, 0x60, 0x0d, 0xcb, 0xfe, 0xf7, 0x05, 0x18, 0x95, 0x69, 0x62, 0x3a, 0x5e, + 0x3f, 0x62, 0x9a, 0x23, 0xe5, 0x8d, 0xa4, 0xaf, 0x78, 0x26, 0x47, 0xac, 0xc6, 0xd2, 0x2d, 0xf5, + 0x8a, 0x5f, 0x93, 0x00, 0x1c, 0xe3, 0xd0, 0x5d, 0x14, 0x76, 0x36, 0x19, 0x7a, 0xc2, 0x35, 0xb1, + 0xc6, 0x8b, 0xb1, 0x84, 0xa3, 0x4f, 0xc0, 0x14, 0xaf, 0x17, 0xf8, 0x6d, 0x67, 0x9b, 0xeb, 0x1e, + 0x06, 0x55, 0x4c, 0x81, 0xa9, 0xb5, 0x04, 0xec, 0xf0, 0x60, 0xfe, 0x54, 0xb2, 0x8c, 0x29, 0xd5, + 0x52, 0x54, 0x98, 0x89, 0x11, 0x6f, 0x84, 0xee, 0xfe, 0x94, 0x65, 0x52, 0x0c, 0xc2, 0x3a, 0x9e, + 0xfd, 0x59, 0x40, 0xe9, 0x84, 0x39, 0xe8, 0x35, 0x6e, 0x57, 0xea, 0x06, 0xa4, 0xd1, 0x4d, 0xc9, + 0xa6, 0x7b, 0xce, 0x4b, 0xaf, 0x22, 0x5e, 0x0b, 0xab, 0xfa, 0xf6, 0x5f, 0x2d, 0xc2, 0x54, 0xd2, + 0x8f, 0x1a, 0x5d, 0x83, 0x21, 0xce, 0x7a, 0x08, 0xf2, 0x5d, 0x6c, 0x38, 0x34, 0xef, 0x6b, 0x76, + 0x08, 0x0b, 0xee, 0x45, 0xd4, 0x47, 0x6f, 0xc0, 0x68, 0xc3, 0xbf, 0xeb, 0xdd, 0x75, 0x82, 0xc6, + 0x62, 0xb5, 0x22, 0x96, 0x73, 0xe6, 0xa3, 0xb2, 0x1c, 0xa3, 0xe9, 0x1e, 0xdd, 0x4c, 0x5f, 0x19, + 0x83, 0xb0, 0x4e, 0x0e, 0x6d, 0xb0, 0xf8, 0xde, 0x5b, 0xee, 0xf6, 0x9a, 0xd3, 0xee, 0xe6, 0x64, + 0xb0, 0x2c, 0x91, 0x34, 0xca, 0xe3, 0x22, 0x08, 0x38, 0x07, 0xe0, 0x98, 0x10, 0xfa, 0x1c, 0xcc, + 0x84, 0x39, 0xa2, 0xee, 0xbc, 0xfc, 0x69, 0xdd, 0xa4, 0xbf, 0x4b, 0x8f, 0xd0, 0xe7, 0x7e, 0x96, + 0x50, 0x3c, 0xab, 0x19, 0xfb, 0x47, 0x4e, 0x81, 0xb1, 0x89, 0x8d, 0x74, 0x9a, 0xd6, 0x31, 0xa5, + 0xd3, 0xc4, 0x30, 0x42, 0x5a, 0xed, 0x68, 0xbf, 0xec, 0x06, 0xdd, 0x92, 0x4c, 0xaf, 0x08, 0x9c, + 0x34, 0x4d, 0x09, 0xc1, 0x8a, 0x4e, 0x76, 0xce, 0xd3, 0xe2, 0xd7, 0x31, 0xe7, 0xe9, 0xc0, 0x09, + 0xe6, 0x3c, 0x5d, 0x87, 0xe1, 0x6d, 0x37, 0xc2, 0xa4, 0xed, 0x0b, 0xa6, 0x3f, 0x73, 0x1d, 0x5e, + 0xe5, 0x28, 0xe9, 0xec, 0x7a, 0x02, 0x80, 0x25, 0x11, 0xf4, 0x9a, 0xda, 0x81, 0x43, 0xf9, 0x0f, + 0xf3, 0xb4, 0xb1, 0x41, 0xe6, 0x1e, 0x14, 0x99, 0x4d, 0x87, 0x1f, 0x34, 0xb3, 0xe9, 0xaa, 0xcc, + 0x47, 0x3a, 0x92, 0xef, 0x11, 0xc4, 0xd2, 0x8d, 0xf6, 0xc8, 0x42, 0x7a, 0x5b, 0xcf, 0xe1, 0x5a, + 0xca, 0x3f, 0x09, 0x54, 0x7a, 0xd6, 0x3e, 0x33, 0xb7, 0x7e, 0x8f, 0x05, 0xa7, 0xdb, 0x59, 0xe9, + 0x8c, 0x85, 0x5e, 0xfe, 0xc5, 0xbe, 0x33, 0x26, 0x1b, 0x0d, 0x32, 0x79, 0x56, 0x26, 0x1a, 0xce, + 0x6e, 0x8e, 0x0e, 0x74, 0xb0, 0xd9, 0x10, 0xfa, 0xe1, 0x27, 0x72, 0x52, 0xc0, 0x76, 0x49, 0xfc, + 0xba, 0x91, 0x91, 0x6e, 0xf4, 0xfd, 0x79, 0xe9, 0x46, 0xfb, 0x4e, 0x32, 0xfa, 0x9a, 0x4a, 0xfe, + 0x3a, 0x9e, 0xbf, 0x94, 0x78, 0x6a, 0xd7, 0x9e, 0x29, 0x5f, 0x5f, 0x53, 0x29, 0x5f, 0xbb, 0x04, + 0x6f, 0xe5, 0x09, 0x5d, 0x7b, 0x26, 0x7a, 0xd5, 0x92, 0xb5, 0x4e, 0x1e, 0x4f, 0xb2, 0x56, 0xe3, + 0xaa, 0xe1, 0xf9, 0x42, 0x9f, 0xee, 0x71, 0xd5, 0x18, 0x74, 0xbb, 0x5f, 0x36, 0x3c, 0x31, 0xed, + 0xf4, 0x03, 0x25, 0xa6, 0xbd, 0xad, 0x27, 0x7a, 0x45, 0x3d, 0x32, 0x99, 0x52, 0xa4, 0x3e, 0xd3, + 0xbb, 0xde, 0xd6, 0x2f, 0xc0, 0x99, 0x7c, 0xba, 0xea, 0x9e, 0x4b, 0xd3, 0xcd, 0xbc, 0x02, 0x53, + 0x69, 0x63, 0x4f, 0x9d, 0x4c, 0xda, 0xd8, 0xd3, 0xc7, 0x9e, 0x36, 0xf6, 0xcc, 0x09, 0xa4, 0x8d, + 0x7d, 0xe4, 0x04, 0xd3, 0xc6, 0xde, 0x66, 0xc6, 0x2c, 0x3c, 0x64, 0x8e, 0x08, 0x36, 0x9b, 0x1d, + 0xd8, 0x34, 0x2b, 0xae, 0x0e, 0xff, 0x38, 0x05, 0xc2, 0x31, 0xa9, 0x8c, 0x74, 0xb4, 0xb3, 0x0f, + 0x21, 0x1d, 0xed, 0x7a, 0x9c, 0x8e, 0xf6, 0x6c, 0xfe, 0x54, 0x67, 0xb8, 0x3f, 0xe4, 0x24, 0xa1, + 0xbd, 0xad, 0x27, 0x8f, 0x7d, 0xb4, 0x8b, 0xc6, 0x22, 0x4b, 0xf0, 0xd8, 0x25, 0x65, 0xec, 0xab, + 0x3c, 0x65, 0xec, 0x63, 0xf9, 0x27, 0x79, 0xf2, 0xba, 0x33, 0x12, 0xc5, 0xd2, 0x7e, 0xa9, 0xb0, + 0x86, 0x2c, 0xac, 0x6e, 0x4e, 0xbf, 0x54, 0x5c, 0xc4, 0x74, 0xbf, 0x14, 0x08, 0xc7, 0xa4, 0xec, + 0xef, 0x2b, 0xc0, 0xf9, 0xee, 0xfb, 0x2d, 0x96, 0xa6, 0x56, 0x63, 0x05, 0x6e, 0x42, 0x9a, 0xca, + 0xdf, 0x6c, 0x31, 0x56, 0xdf, 0x51, 0xda, 0xae, 0xc2, 0xb4, 0xf2, 0x9b, 0x68, 0xba, 0xf5, 0xfd, + 0xf5, 0xf8, 0xe5, 0xab, 0x7c, 0xcd, 0x6b, 0x49, 0x04, 0x9c, 0xae, 0x83, 0x16, 0x61, 0xd2, 0x28, + 0xac, 0x94, 0xc5, 0xdb, 0x4c, 0x89, 0x6f, 0x6b, 0x26, 0x18, 0x27, 0xf1, 0xed, 0x2f, 0x59, 0xf0, + 0x48, 0x4e, 0xa6, 0xb7, 0xbe, 0x83, 0x90, 0x6d, 0xc1, 0x64, 0xdb, 0xac, 0xda, 0x23, 0x6e, 0xa2, + 0x91, 0x4f, 0x4e, 0xf5, 0x35, 0x01, 0xc0, 0x49, 0xa2, 0xf6, 0x9f, 0x59, 0x70, 0xae, 0xab, 0x21, + 0x20, 0xc2, 0x70, 0x66, 0xbb, 0x15, 0x3a, 0xcb, 0x01, 0x69, 0x10, 0x2f, 0x72, 0x9d, 0x66, 0xad, + 0x4d, 0xea, 0x9a, 0x3c, 0x9c, 0x59, 0xd4, 0x5d, 0x5d, 0xab, 0x2d, 0xa6, 0x31, 0x70, 0x4e, 0x4d, + 0xb4, 0x0a, 0x28, 0x0d, 0x11, 0x33, 0xcc, 0x02, 0x34, 0xa7, 0xe9, 0xe1, 0x8c, 0x1a, 0xe8, 0x23, + 0x30, 0xae, 0x0c, 0x0c, 0xb5, 0x19, 0x67, 0x07, 0x3b, 0xd6, 0x01, 0xd8, 0xc4, 0x5b, 0xba, 0xf4, + 0xeb, 0xbf, 0x7f, 0xfe, 0x7d, 0xbf, 0xf9, 0xfb, 0xe7, 0xdf, 0xf7, 0x3b, 0xbf, 0x7f, 0xfe, 0x7d, + 0xdf, 0x71, 0xff, 0xbc, 0xf5, 0xeb, 0xf7, 0xcf, 0x5b, 0xbf, 0x79, 0xff, 0xbc, 0xf5, 0x3b, 0xf7, + 0xcf, 0x5b, 0xbf, 0x77, 0xff, 0xbc, 0xf5, 0xc5, 0x3f, 0x38, 0xff, 0xbe, 0x4f, 0x15, 0xf6, 0xae, + 0xfc, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0xad, 0xf2, 0xaa, 0x3b, 0x4d, 0x03, 0x01, 0x00, } func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { @@ -11552,6 +11587,20 @@ func (m *LoadBalancerIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } i -= len(m.Hostname) copy(dAtA[i:], m.Hostname) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) @@ -15561,6 +15610,44 @@ func (m *PodTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *PortStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PortStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PortStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + i -= len(*m.Error) + copy(dAtA[i:], *m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Error))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *PortworxVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -17949,6 +18036,49 @@ func (m *ServiceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.AllocateLoadBalancerNodePorts != nil { + i-- + if *m.AllocateLoadBalancerNodePorts { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 + } + if len(m.IPFamilies) > 0 { + for iNdEx := len(m.IPFamilies) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.IPFamilies[iNdEx]) + copy(dAtA[i:], m.IPFamilies[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPFamilies[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + } + if len(m.ClusterIPs) > 0 { + for iNdEx := len(m.ClusterIPs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ClusterIPs[iNdEx]) + copy(dAtA[i:], m.ClusterIPs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterIPs[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + } + if m.IPFamilyPolicy != nil { + i -= len(*m.IPFamilyPolicy) + copy(dAtA[i:], *m.IPFamilyPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IPFamilyPolicy))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } if len(m.TopologyKeys) > 0 { for iNdEx := len(m.TopologyKeys) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.TopologyKeys[iNdEx]) @@ -17960,13 +18090,6 @@ func (m *ServiceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x82 } } - if m.IPFamily != nil { - i -= len(*m.IPFamily) - copy(dAtA[i:], *m.IPFamily) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IPFamily))) - i-- - dAtA[i] = 0x7a - } if m.SessionAffinityConfig != nil { { size, err := m.SessionAffinityConfig.MarshalToSizedBuffer(dAtA[:i]) @@ -18099,6 +18222,20 @@ func (m *ServiceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } { size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -21043,6 +21180,12 @@ func (m *LoadBalancerIngress) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Hostname) n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -22499,6 +22642,22 @@ func (m *PodTemplateSpec) Size() (n int) { return n } +func (m *PortStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Port)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + if m.Error != nil { + l = len(*m.Error) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *PortworxVolumeSource) Size() (n int) { if m == nil { return 0 @@ -23405,16 +23564,31 @@ func (m *ServiceSpec) Size() (n int) { l = m.SessionAffinityConfig.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.IPFamily != nil { - l = len(*m.IPFamily) - n += 1 + l + sovGenerated(uint64(l)) - } if len(m.TopologyKeys) > 0 { for _, s := range m.TopologyKeys { l = len(s) n += 2 + l + sovGenerated(uint64(l)) } } + if m.IPFamilyPolicy != nil { + l = len(*m.IPFamilyPolicy) + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.ClusterIPs) > 0 { + for _, s := range m.ClusterIPs { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.IPFamilies) > 0 { + for _, s := range m.IPFamilies { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.AllocateLoadBalancerNodePorts != nil { + n += 3 + } return n } @@ -23426,6 +23600,12 @@ func (m *ServiceStatus) Size() (n int) { _ = l l = m.LoadBalancer.Size() n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -25156,9 +25336,15 @@ func (this *LoadBalancerIngress) String() string { if this == nil { return "nil" } + repeatedStringForPorts := "[]PortStatus{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "PortStatus", "PortStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" s := strings.Join([]string{`&LoadBalancerIngress{`, `IP:` + fmt.Sprintf("%v", this.IP) + `,`, `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Ports:` + repeatedStringForPorts + `,`, `}`, }, "") return s @@ -26257,6 +26443,18 @@ func (this *PodTemplateSpec) String() string { }, "") return s } +func (this *PortStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortStatus{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `Error:` + valueToStringGenerated(this.Error) + `,`, + `}`, + }, "") + return s +} func (this *PortworxVolumeSource) String() string { if this == nil { return "nil" @@ -26979,8 +27177,11 @@ func (this *ServiceSpec) String() string { `HealthCheckNodePort:` + fmt.Sprintf("%v", this.HealthCheckNodePort) + `,`, `PublishNotReadyAddresses:` + fmt.Sprintf("%v", this.PublishNotReadyAddresses) + `,`, `SessionAffinityConfig:` + strings.Replace(this.SessionAffinityConfig.String(), "SessionAffinityConfig", "SessionAffinityConfig", 1) + `,`, - `IPFamily:` + valueToStringGenerated(this.IPFamily) + `,`, `TopologyKeys:` + fmt.Sprintf("%v", this.TopologyKeys) + `,`, + `IPFamilyPolicy:` + valueToStringGenerated(this.IPFamilyPolicy) + `,`, + `ClusterIPs:` + fmt.Sprintf("%v", this.ClusterIPs) + `,`, + `IPFamilies:` + fmt.Sprintf("%v", this.IPFamilies) + `,`, + `AllocateLoadBalancerNodePorts:` + valueToStringGenerated(this.AllocateLoadBalancerNodePorts) + `,`, `}`, }, "") return s @@ -26989,8 +27190,14 @@ func (this *ServiceStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&ServiceStatus{`, `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -42189,13 +42396,45 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IP = string(dAtA[iNdEx:postIndex]) + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -42205,23 +42444,25 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Hostname = string(dAtA[iNdEx:postIndex]) + m.Ports = append(m.Ports, PortStatus{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -54597,17 +54838,256 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodTemplateList: wiretype end group for non-group") + return fmt.Errorf("proto: PodTemplateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodTemplate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodTemplateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PortStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PortStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } - var msglen int + m.Port = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -54617,117 +55097,16 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Port |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PodTemplate{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodTemplateSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -54737,30 +55116,29 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -54770,24 +55148,24 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Error = &s iNdEx = postIndex default: iNdEx = preIndex @@ -63206,9 +63584,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 15: + case 16: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPFamily", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TopologyKeys", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -63236,12 +63614,11 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := IPFamily(dAtA[iNdEx:postIndex]) - m.IPFamily = &s + m.TopologyKeys = append(m.TopologyKeys, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 16: + case 17: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TopologyKeys", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IPFamilyPolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -63269,8 +63646,94 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TopologyKeys = append(m.TopologyKeys, string(dAtA[iNdEx:postIndex])) + s := IPFamilyPolicyType(dAtA[iNdEx:postIndex]) + m.IPFamilyPolicy = &s + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterIPs = append(m.ClusterIPs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPFamilies", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IPFamilies = append(m.IPFamilies, IPFamily(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 20: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocateLoadBalancerNodePorts", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllocateLoadBalancerNodePorts = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -63357,6 +63820,40 @@ func (m *ServiceStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index 916e2601e..3a13c53fa 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.core.v1; @@ -718,7 +718,6 @@ message Container { // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, // when it might take a long time to load data or warm a cache, than during steady-state operation. // This cannot be updated. - // This is a beta feature enabled by the StartupProbe feature flag. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional optional Probe startupProbe = 22; @@ -817,6 +816,7 @@ message ContainerPort { // Protocol for port. Must be UDP, TCP, or SCTP. // Defaults to "TCP". // +optional + // +default="TCP" optional string protocol = 4; // What host IP to bind the external port to. @@ -1404,7 +1404,12 @@ message EphemeralVolumeSource { optional bool readOnly = 2; } -// Event is a report of an event somewhere in the cluster. +// Event is a report of an event somewhere in the cluster. Events +// have a limited retention time and triggers and messages may evolve +// with time. Event consumers should not rely on the timing of an event +// with a given Reason reflecting a consistent underlying trigger, or the +// continued existence of events with that Reason. Events should be +// treated as informative, best-effort, supplemental data. message Event { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata @@ -2032,6 +2037,12 @@ message LoadBalancerIngress { // (typically AWS load-balancers) // +optional optional string hostname = 2; + + // Ports is a list of records of service ports + // If used, every port defined in the service should have an entry in it + // +listType=atomic + // +optional + repeated PortStatus ports = 4; } // LoadBalancerStatus represents the status of a load-balancer. @@ -2677,17 +2688,13 @@ message PersistentVolumeClaimSpec { optional string volumeMode = 6; // This field can be used to specify either: - // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta) + // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) // * An existing PVC (PersistentVolumeClaim) - // * An existing custom resource/object that implements data population (Alpha) - // In order to use VolumeSnapshot object types, the appropriate feature gate - // must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) + // * An existing custom resource that implements data population (Alpha) + // In order to use custom resource types that implement data population, + // the AnyVolumeDataSource feature gate must be enabled. // If the provisioner or an external controller can support the specified data source, // it will create a new volume based on the contents of the specified data source. - // If the specified data source is not supported, the volume will - // not be created and the failure will be reported as an event. - // In the future, we plan to support more data source types and the behavior - // of the provisioner may change. // +optional optional TypedLocalObjectReference dataSource = 7; } @@ -3340,7 +3347,7 @@ message PodSecurityContext { // volume types which support fsGroup based ownership(and permissions). // It will have no effect on ephemeral volume types such as: secret, configmaps // and emptydir. - // Valid values are "OnRootMismatch" and "Always". If not specified defaults to "Always". + // Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. // +optional optional string fsGroupChangePolicy = 9; @@ -3765,6 +3772,29 @@ message PodTemplateSpec { optional PodSpec spec = 2; } +message PortStatus { + // Port is the port number of the service port of which status is recorded here + optional int32 port = 1; + + // Protocol is the protocol of the service port of which status is recorded here + // The supported values are: "TCP", "UDP", "SCTP" + optional string protocol = 2; + + // Error is to record the problem with the service port + // The format of the error shall comply with the following rules: + // - built-in error values shall be specified in this file and those shall use + // CamelCase names + // - cloud provider specific error values must have names that comply with the + // format foo.example.com/CamelCase. + // --- + // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + // +optional + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MaxLength=316 + optional string error = 3; +} + // PortworxVolumeSource represents a Portworx volume resource. message PortworxVolumeSource { // VolumeID uniquely identifies a Portworx volume @@ -3853,6 +3883,7 @@ message Probe { // Represents a projected volume source message ProjectedVolumeSource { // list of volume projections + // +optional repeated VolumeProjection sources = 1; // Mode bits used to set permissions on created files by default. @@ -4750,10 +4781,14 @@ message ServicePort { // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 4; - // The port on each node on which this service is exposed when type=NodePort or LoadBalancer. - // Usually assigned by the system. If specified, it will be allocated to the service - // if unused or else creation of the service will fail. - // Default is to auto-allocate a port if the ServiceType of this Service requires one. + // The port on each node on which this service is exposed when type is + // NodePort or LoadBalancer. Usually assigned by the system. If a value is + // specified, in-range, and not in use it will be used, otherwise the + // operation will fail. If not specified, a port will be allocated if this + // Service requires one. If this field is specified when creating a + // Service which does not need it, creation will fail. This field will be + // wiped when updating a Service to no longer need it (e.g. changing type + // from NodePort to ClusterIP). // More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport // +optional optional int32 nodePort = 5; @@ -4791,30 +4826,68 @@ message ServiceSpec { map selector = 2; // clusterIP is the IP address of the service and is usually assigned - // randomly by the master. If an address is specified manually and is not in - // use by others, it will be allocated to the service; otherwise, creation - // of the service will fail. This field can not be changed through updates. - // Valid values are "None", empty string (""), or a valid IP address. "None" - // can be specified for headless services when proxying is not required. - // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if - // type is ExternalName. + // randomly. If an address is specified manually, is in-range (as per + // system configuration), and is not in use, it will be allocated to the + // service; otherwise creation of the service will fail. This field may not + // be changed through updates unless the type field is also being changed + // to ExternalName (which requires this field to be blank) or the type + // field is being changed from ExternalName (in which case this field may + // optionally be specified, as describe above). Valid values are "None", + // empty string (""), or a valid IP address. Setting this to "None" makes a + // "headless service" (no virtual IP), which is useful when direct endpoint + // connections are preferred and proxying is not required. Only applies to + // types ClusterIP, NodePort, and LoadBalancer. If this field is specified + // when creating a Service of type ExternalName, creation will fail. This + // field will be wiped when updating a Service to type ExternalName. // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // +optional optional string clusterIP = 3; + // ClusterIPs is a list of IP addresses assigned to this service, and are + // usually assigned randomly. If an address is specified manually, is + // in-range (as per system configuration), and is not in use, it will be + // allocated to the service; otherwise creation of the service will fail. + // This field may not be changed through updates unless the type field is + // also being changed to ExternalName (which requires this field to be + // empty) or the type field is being changed from ExternalName (in which + // case this field may optionally be specified, as describe above). Valid + // values are "None", empty string (""), or a valid IP address. Setting + // this to "None" makes a "headless service" (no virtual IP), which is + // useful when direct endpoint connections are preferred and proxying is + // not required. Only applies to types ClusterIP, NodePort, and + // LoadBalancer. If this field is specified when creating a Service of type + // ExternalName, creation will fail. This field will be wiped when updating + // a Service to type ExternalName. If this field is not specified, it will + // be initialized from the clusterIP field. If this field is specified, + // clients must ensure that clusterIPs[0] and clusterIP have the same + // value. + // + // Unless the "IPv6DualStack" feature gate is enabled, this field is + // limited to one value, which must be the same as the clusterIP field. If + // the feature gate is enabled, this field may hold a maximum of two + // entries (dual-stack IPs, in either order). These IPs must correspond to + // the values of the ipFamilies field. Both clusterIPs and ipFamilies are + // governed by the ipFamilyPolicy field. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +listType=atomic + // +optional + repeated string clusterIPs = 18; + // type determines how the Service is exposed. Defaults to ClusterIP. Valid // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. - // "ExternalName" maps to the specified externalName. - // "ClusterIP" allocates a cluster-internal IP address for load-balancing to - // endpoints. Endpoints are determined by the selector or if that is not - // specified, by manual construction of an Endpoints object. If clusterIP is - // "None", no virtual IP is allocated and the endpoints are published as a - // set of endpoints rather than a stable IP. + // "ClusterIP" allocates a cluster-internal IP address for load-balancing + // to endpoints. Endpoints are determined by the selector or if that is not + // specified, by manual construction of an Endpoints object or + // EndpointSlice objects. If clusterIP is "None", no virtual IP is + // allocated and the endpoints are published as a set of endpoints rather + // than a virtual IP. // "NodePort" builds on ClusterIP and allocates a port on every node which - // routes to the clusterIP. - // "LoadBalancer" builds on NodePort and creates an - // external load-balancer (if supported in the current cloud) which routes - // to the clusterIP. + // routes to the same endpoints as the clusterIP. + // "LoadBalancer" builds on NodePort and creates an external load-balancer + // (if supported in the current cloud) which routes to the same endpoints + // as the clusterIP. + // "ExternalName" aliases this service to the specified externalName. + // Several other fields do not apply to ExternalName services. // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types // +optional optional string type = 4; @@ -4850,10 +4923,10 @@ message ServiceSpec { // +optional repeated string loadBalancerSourceRanges = 9; - // externalName is the external reference that kubedns or equivalent will - // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) - // and requires Type to be ExternalName. + // externalName is the external reference that discovery mechanisms will + // return as an alias for this service (e.g. a DNS CNAME record). No + // proxying will be involved. Must be a lowercase RFC-1123 hostname + // (https://tools.ietf.org/html/rfc1123) and requires Type to be // +optional optional string externalName = 10; @@ -4867,10 +4940,14 @@ message ServiceSpec { optional string externalTrafficPolicy = 11; // healthCheckNodePort specifies the healthcheck nodePort for the service. - // If not specified, HealthCheckNodePort is created by the service api - // backend with the allocated nodePort. Will use user-specified nodePort value - // if specified by the client. Only effects when Type is set to LoadBalancer - // and ExternalTrafficPolicy is set to Local. + // This only applies when type is set to LoadBalancer and + // externalTrafficPolicy is set to Local. If a value is specified, is + // in-range, and is not in use, it will be used. If not specified, a value + // will be automatically allocated. External systems (e.g. load-balancers) + // can use this port to determine if a given node holds endpoints for this + // service or not. If this field is specified when creating a Service + // which does not need it, creation will fail. This field will be wiped + // when updating a Service to no longer need it (e.g. changing type). // +optional optional int32 healthCheckNodePort = 12; @@ -4889,24 +4966,6 @@ message ServiceSpec { // +optional optional SessionAffinityConfig sessionAffinityConfig = 14; - // ipFamily specifies whether this Service has a preference for a particular IP family (e.g. - // IPv4 vs. IPv6) when the IPv6DualStack feature gate is enabled. In a dual-stack cluster, - // you can specify ipFamily when creating a ClusterIP Service to determine whether the - // controller will allocate an IPv4 or IPv6 IP for it, and you can specify ipFamily when - // creating a headless Service to determine whether it will have IPv4 or IPv6 Endpoints. In - // either case, if you do not specify an ipFamily explicitly, it will default to the - // cluster's primary IP family. - // This field is part of an alpha feature, and you should not make any assumptions about its - // semantics other than those described above. In particular, you should not assume that it - // can (or cannot) be changed after creation time; that it can only have the values "IPv4" - // and "IPv6"; or that its current value on a given Service correctly reflects the current - // state of that Service. (For ClusterIP Services, look at clusterIP to see if the Service - // is IPv4 or IPv6. For headless Services, look at the endpoints, which may be dual-stack in - // the future. For ExternalName Services, ipFamily has no meaning, but it may be set to an - // irrelevant value anyway.) - // +optional - optional string ipFamily = 15; - // topologyKeys is a preference-order list of topology keys which // implementations of services should use to preferentially sort endpoints // when accessing this Service, it can not be used at the same time as @@ -4919,8 +4978,51 @@ message ServiceSpec { // The special value "*" may be used to mean "any topology". This catch-all // value, if used, only makes sense as the last value in the list. // If this is not specified or empty, no topology constraints will be applied. + // This field is alpha-level and is only honored by servers that enable the ServiceTopology feature. // +optional repeated string topologyKeys = 16; + + // IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + // service, and is gated by the "IPv6DualStack" feature gate. This field + // is usually assigned automatically based on cluster configuration and the + // ipFamilyPolicy field. If this field is specified manually, the requested + // family is available in the cluster, and ipFamilyPolicy allows it, it + // will be used; otherwise creation of the service will fail. This field + // is conditionally mutable: it allows for adding or removing a secondary + // IP family, but it does not allow changing the primary IP family of the + // Service. Valid values are "IPv4" and "IPv6". This field only applies + // to Services of types ClusterIP, NodePort, and LoadBalancer, and does + // apply to "headless" services. This field will be wiped when updating a + // Service to type ExternalName. + // + // This field may hold a maximum of two entries (dual-stack families, in + // either order). These families must correspond to the values of the + // clusterIPs field, if specified. Both clusterIPs and ipFamilies are + // governed by the ipFamilyPolicy field. + // +listType=atomic + // +optional + repeated string ipFamilies = 19; + + // IPFamilyPolicy represents the dual-stack-ness requested or required by + // this Service, and is gated by the "IPv6DualStack" feature gate. If + // there is no value provided, then this field will be set to SingleStack. + // Services can be "SingleStack" (a single IP family), "PreferDualStack" + // (two IP families on dual-stack configured clusters or a single IP family + // on single-stack clusters), or "RequireDualStack" (two IP families on + // dual-stack configured clusters, otherwise fail). The ipFamilies and + // clusterIPs fields depend on the value of this field. This field will be + // wiped when updating a service to type ExternalName. + // +optional + optional string ipFamilyPolicy = 17; + + // allocateLoadBalancerNodePorts defines if NodePorts will be automatically + // allocated for services with type LoadBalancer. Default is "true". It may be + // set to "false" if the cluster load-balancer does not rely on NodePorts. + // allocateLoadBalancerNodePorts may only be set for services with type LoadBalancer + // and will be cleared if the type is changed to any other type. + // This field is alpha-level and is only honored by servers that enable the ServiceLBNodePortControl feature. + // +optional + optional bool allocateLoadBalancerNodePorts = 20; } // ServiceStatus represents the current status of a service. @@ -4929,6 +5031,14 @@ message ServiceStatus { // if one is present. // +optional optional LoadBalancerStatus loadBalancer = 1; + + // Current service state + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 2; } // SessionAffinityConfig represents the configurations of session affinity. diff --git a/vendor/k8s.io/api/core/v1/resource.go b/vendor/k8s.io/api/core/v1/resource.go index 5bc9cd5bf..4e249d03a 100644 --- a/vendor/k8s.io/api/core/v1/resource.go +++ b/vendor/k8s.io/api/core/v1/resource.go @@ -21,44 +21,39 @@ import ( ) // Returns string version of ResourceName. -func (self ResourceName) String() string { - return string(self) +func (rn ResourceName) String() string { + return string(rn) } -// Returns the CPU limit if specified. -func (self *ResourceList) Cpu() *resource.Quantity { - if val, ok := (*self)[ResourceCPU]; ok { - return &val - } - return &resource.Quantity{Format: resource.DecimalSI} +// Cpu returns the Cpu limit if specified. +func (rl *ResourceList) Cpu() *resource.Quantity { + return rl.Name(ResourceCPU, resource.DecimalSI) } -// Returns the Memory limit if specified. -func (self *ResourceList) Memory() *resource.Quantity { - if val, ok := (*self)[ResourceMemory]; ok { - return &val - } - return &resource.Quantity{Format: resource.BinarySI} +// Memory returns the Memory limit if specified. +func (rl *ResourceList) Memory() *resource.Quantity { + return rl.Name(ResourceMemory, resource.BinarySI) } -// Returns the Storage limit if specified. -func (self *ResourceList) Storage() *resource.Quantity { - if val, ok := (*self)[ResourceStorage]; ok { - return &val - } - return &resource.Quantity{Format: resource.BinarySI} +// Storage returns the Storage limit if specified. +func (rl *ResourceList) Storage() *resource.Quantity { + return rl.Name(ResourceStorage, resource.BinarySI) } -func (self *ResourceList) Pods() *resource.Quantity { - if val, ok := (*self)[ResourcePods]; ok { - return &val - } - return &resource.Quantity{} +// Pods returns the list of pods +func (rl *ResourceList) Pods() *resource.Quantity { + return rl.Name(ResourcePods, resource.DecimalSI) +} + +// StorageEphemeral returns the list of ephemeral storage volumes, if any +func (rl *ResourceList) StorageEphemeral() *resource.Quantity { + return rl.Name(ResourceEphemeralStorage, resource.BinarySI) } -func (self *ResourceList) StorageEphemeral() *resource.Quantity { - if val, ok := (*self)[ResourceEphemeralStorage]; ok { +// Name returns the resource with name if specified, otherwise it returns a nil quantity with default format. +func (rl *ResourceList) Name(name ResourceName, defaultFormat resource.Format) *resource.Quantity { + if val, ok := (*rl)[name]; ok { return &val } - return &resource.Quantity{} + return &resource.Quantity{Format: defaultFormat} } diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index f3ec52e71..2bba97251 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -489,17 +489,13 @@ type PersistentVolumeClaimSpec struct { // +optional VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"` // This field can be used to specify either: - // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta) + // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) // * An existing PVC (PersistentVolumeClaim) - // * An existing custom resource/object that implements data population (Alpha) - // In order to use VolumeSnapshot object types, the appropriate feature gate - // must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) + // * An existing custom resource that implements data population (Alpha) + // In order to use custom resource types that implement data population, + // the AnyVolumeDataSource feature gate must be enabled. // If the provisioner or an external controller can support the specified data source, // it will create a new volume based on the contents of the specified data source. - // If the specified data source is not supported, the volume will - // not be created and the failure will be reported as an event. - // In the future, we plan to support more data source types and the behavior - // of the provisioner may change. // +optional DataSource *TypedLocalObjectReference `json:"dataSource,omitempty" protobuf:"bytes,7,opt,name=dataSource"` } @@ -1615,6 +1611,7 @@ type ServiceAccountTokenProjection struct { // Represents a projected volume source type ProjectedVolumeSource struct { // list of volume projections + // +optional Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"` // Mode bits used to set permissions on created files by default. // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. @@ -1840,6 +1837,7 @@ type ContainerPort struct { // Protocol for port. Must be UDP, TCP, or SCTP. // Defaults to "TCP". // +optional + // +default="TCP" Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,4,opt,name=protocol,casttype=Protocol"` // What host IP to bind the external port to. // +optional @@ -2287,7 +2285,6 @@ type Container struct { // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, // when it might take a long time to load data or warm a cache, than during steady-state operation. // This cannot be updated. - // This is a beta feature enabled by the StartupProbe feature flag. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional StartupProbe *Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` @@ -3298,7 +3295,7 @@ type PodSecurityContext struct { // volume types which support fsGroup based ownership(and permissions). // It will have no effect on ephemeral volume types such as: secret, configmaps // and emptydir. - // Valid values are "OnRootMismatch" and "Always". If not specified defaults to "Always". + // Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. // +optional FSGroupChangePolicy *PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty" protobuf:"bytes,9,opt,name=fsGroupChangePolicy"` // The seccomp options to use by the containers in this pod. @@ -3943,12 +3940,26 @@ const ( ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" ) +// These are the valid conditions of a service. +const ( + // LoadBalancerPortsError represents the condition of the requested ports + // on the cloud load balancer instance. + LoadBalancerPortsError = "LoadBalancerPortsError" +) + // ServiceStatus represents the current status of a service. type ServiceStatus struct { // LoadBalancer contains the current status of the load-balancer, // if one is present. // +optional LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` + // Current service state + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` } // LoadBalancerStatus represents the status of a load-balancer. @@ -3971,10 +3982,21 @@ type LoadBalancerIngress struct { // (typically AWS load-balancers) // +optional Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` + + // Ports is a list of records of service ports + // If used, every port defined in the service should have an entry in it + // +listType=atomic + // +optional + Ports []PortStatus `json:"ports,omitempty" protobuf:"bytes,4,rep,name=ports"` } +const ( + // MaxServiceTopologyKeys is the largest number of topology keys allowed on a service + MaxServiceTopologyKeys = 16 +) + // IPFamily represents the IP Family (IPv4 or IPv6). This type is used -// to express the family of an IP expressed by a type (i.e. service.Spec.IPFamily) +// to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). type IPFamily string const ( @@ -3982,8 +4004,29 @@ const ( IPv4Protocol IPFamily = "IPv4" // IPv6Protocol indicates that this IP is IPv6 protocol IPv6Protocol IPFamily = "IPv6" - // MaxServiceTopologyKeys is the largest number of topology keys allowed on a service - MaxServiceTopologyKeys = 16 +) + +// IPFamilyPolicyType represents the dual-stack-ness requested or required by a Service +type IPFamilyPolicyType string + +const ( + // IPFamilyPolicySingleStack indicates that this service is required to have a single IPFamily. + // The IPFamily assigned is based on the default IPFamily used by the cluster + // or as identified by service.spec.ipFamilies field + IPFamilyPolicySingleStack IPFamilyPolicyType = "SingleStack" + // IPFamilyPolicyPreferDualStack indicates that this service prefers dual-stack when + // the cluster is configured for dual-stack. If the cluster is not configured + // for dual-stack the service will be assigned a single IPFamily. If the IPFamily is not + // set in service.spec.ipFamilies then the service will be assigned the default IPFamily + // configured on the cluster + IPFamilyPolicyPreferDualStack IPFamilyPolicyType = "PreferDualStack" + // IPFamilyPolicyRequireDualStack indicates that this service requires dual-stack. Using + // IPFamilyPolicyRequireDualStack on a single stack cluster will result in validation errors. The + // IPFamilies (and their order) assigned to this service is based on service.spec.ipFamilies. If + // service.spec.ipFamilies was not provided then it will be assigned according to how they are + // configured on the cluster. If service.spec.ipFamilies has only one entry then the alternative + // IPFamily will be added by apiserver + IPFamilyPolicyRequireDualStack IPFamilyPolicyType = "RequireDualStack" ) // ServiceSpec describes the attributes that a user creates on a service. @@ -4007,30 +4050,68 @@ type ServiceSpec struct { Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` // clusterIP is the IP address of the service and is usually assigned - // randomly by the master. If an address is specified manually and is not in - // use by others, it will be allocated to the service; otherwise, creation - // of the service will fail. This field can not be changed through updates. - // Valid values are "None", empty string (""), or a valid IP address. "None" - // can be specified for headless services when proxying is not required. - // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if - // type is ExternalName. + // randomly. If an address is specified manually, is in-range (as per + // system configuration), and is not in use, it will be allocated to the + // service; otherwise creation of the service will fail. This field may not + // be changed through updates unless the type field is also being changed + // to ExternalName (which requires this field to be blank) or the type + // field is being changed from ExternalName (in which case this field may + // optionally be specified, as describe above). Valid values are "None", + // empty string (""), or a valid IP address. Setting this to "None" makes a + // "headless service" (no virtual IP), which is useful when direct endpoint + // connections are preferred and proxying is not required. Only applies to + // types ClusterIP, NodePort, and LoadBalancer. If this field is specified + // when creating a Service of type ExternalName, creation will fail. This + // field will be wiped when updating a Service to type ExternalName. // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // +optional ClusterIP string `json:"clusterIP,omitempty" protobuf:"bytes,3,opt,name=clusterIP"` + // ClusterIPs is a list of IP addresses assigned to this service, and are + // usually assigned randomly. If an address is specified manually, is + // in-range (as per system configuration), and is not in use, it will be + // allocated to the service; otherwise creation of the service will fail. + // This field may not be changed through updates unless the type field is + // also being changed to ExternalName (which requires this field to be + // empty) or the type field is being changed from ExternalName (in which + // case this field may optionally be specified, as describe above). Valid + // values are "None", empty string (""), or a valid IP address. Setting + // this to "None" makes a "headless service" (no virtual IP), which is + // useful when direct endpoint connections are preferred and proxying is + // not required. Only applies to types ClusterIP, NodePort, and + // LoadBalancer. If this field is specified when creating a Service of type + // ExternalName, creation will fail. This field will be wiped when updating + // a Service to type ExternalName. If this field is not specified, it will + // be initialized from the clusterIP field. If this field is specified, + // clients must ensure that clusterIPs[0] and clusterIP have the same + // value. + // + // Unless the "IPv6DualStack" feature gate is enabled, this field is + // limited to one value, which must be the same as the clusterIP field. If + // the feature gate is enabled, this field may hold a maximum of two + // entries (dual-stack IPs, in either order). These IPs must correspond to + // the values of the ipFamilies field. Both clusterIPs and ipFamilies are + // governed by the ipFamilyPolicy field. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +listType=atomic + // +optional + ClusterIPs []string `json:"clusterIPs,omitempty" protobuf:"bytes,18,opt,name=clusterIPs"` + // type determines how the Service is exposed. Defaults to ClusterIP. Valid // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. - // "ExternalName" maps to the specified externalName. - // "ClusterIP" allocates a cluster-internal IP address for load-balancing to - // endpoints. Endpoints are determined by the selector or if that is not - // specified, by manual construction of an Endpoints object. If clusterIP is - // "None", no virtual IP is allocated and the endpoints are published as a - // set of endpoints rather than a stable IP. + // "ClusterIP" allocates a cluster-internal IP address for load-balancing + // to endpoints. Endpoints are determined by the selector or if that is not + // specified, by manual construction of an Endpoints object or + // EndpointSlice objects. If clusterIP is "None", no virtual IP is + // allocated and the endpoints are published as a set of endpoints rather + // than a virtual IP. // "NodePort" builds on ClusterIP and allocates a port on every node which - // routes to the clusterIP. - // "LoadBalancer" builds on NodePort and creates an - // external load-balancer (if supported in the current cloud) which routes - // to the clusterIP. + // routes to the same endpoints as the clusterIP. + // "LoadBalancer" builds on NodePort and creates an external load-balancer + // (if supported in the current cloud) which routes to the same endpoints + // as the clusterIP. + // "ExternalName" aliases this service to the specified externalName. + // Several other fields do not apply to ExternalName services. // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types // +optional Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"` @@ -4066,10 +4147,10 @@ type ServiceSpec struct { // +optional LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"` - // externalName is the external reference that kubedns or equivalent will - // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) - // and requires Type to be ExternalName. + // externalName is the external reference that discovery mechanisms will + // return as an alias for this service (e.g. a DNS CNAME record). No + // proxying will be involved. Must be a lowercase RFC-1123 hostname + // (https://tools.ietf.org/html/rfc1123) and requires Type to be // +optional ExternalName string `json:"externalName,omitempty" protobuf:"bytes,10,opt,name=externalName"` @@ -4083,10 +4164,14 @@ type ServiceSpec struct { ExternalTrafficPolicy ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"` // healthCheckNodePort specifies the healthcheck nodePort for the service. - // If not specified, HealthCheckNodePort is created by the service api - // backend with the allocated nodePort. Will use user-specified nodePort value - // if specified by the client. Only effects when Type is set to LoadBalancer - // and ExternalTrafficPolicy is set to Local. + // This only applies when type is set to LoadBalancer and + // externalTrafficPolicy is set to Local. If a value is specified, is + // in-range, and is not in use, it will be used. If not specified, a value + // will be automatically allocated. External systems (e.g. load-balancers) + // can use this port to determine if a given node holds endpoints for this + // service or not. If this field is specified when creating a Service + // which does not need it, creation will fail. This field will be wiped + // when updating a Service to no longer need it (e.g. changing type). // +optional HealthCheckNodePort int32 `json:"healthCheckNodePort,omitempty" protobuf:"bytes,12,opt,name=healthCheckNodePort"` @@ -4105,24 +4190,6 @@ type ServiceSpec struct { // +optional SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"` - // ipFamily specifies whether this Service has a preference for a particular IP family (e.g. - // IPv4 vs. IPv6) when the IPv6DualStack feature gate is enabled. In a dual-stack cluster, - // you can specify ipFamily when creating a ClusterIP Service to determine whether the - // controller will allocate an IPv4 or IPv6 IP for it, and you can specify ipFamily when - // creating a headless Service to determine whether it will have IPv4 or IPv6 Endpoints. In - // either case, if you do not specify an ipFamily explicitly, it will default to the - // cluster's primary IP family. - // This field is part of an alpha feature, and you should not make any assumptions about its - // semantics other than those described above. In particular, you should not assume that it - // can (or cannot) be changed after creation time; that it can only have the values "IPv4" - // and "IPv6"; or that its current value on a given Service correctly reflects the current - // state of that Service. (For ClusterIP Services, look at clusterIP to see if the Service - // is IPv4 or IPv6. For headless Services, look at the endpoints, which may be dual-stack in - // the future. For ExternalName Services, ipFamily has no meaning, but it may be set to an - // irrelevant value anyway.) - // +optional - IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"` - // topologyKeys is a preference-order list of topology keys which // implementations of services should use to preferentially sort endpoints // when accessing this Service, it can not be used at the same time as @@ -4135,8 +4202,54 @@ type ServiceSpec struct { // The special value "*" may be used to mean "any topology". This catch-all // value, if used, only makes sense as the last value in the list. // If this is not specified or empty, no topology constraints will be applied. + // This field is alpha-level and is only honored by servers that enable the ServiceTopology feature. // +optional TopologyKeys []string `json:"topologyKeys,omitempty" protobuf:"bytes,16,opt,name=topologyKeys"` + + // IPFamily is tombstoned to show why 15 is a reserved protobuf tag. + // IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"` + + // IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + // service, and is gated by the "IPv6DualStack" feature gate. This field + // is usually assigned automatically based on cluster configuration and the + // ipFamilyPolicy field. If this field is specified manually, the requested + // family is available in the cluster, and ipFamilyPolicy allows it, it + // will be used; otherwise creation of the service will fail. This field + // is conditionally mutable: it allows for adding or removing a secondary + // IP family, but it does not allow changing the primary IP family of the + // Service. Valid values are "IPv4" and "IPv6". This field only applies + // to Services of types ClusterIP, NodePort, and LoadBalancer, and does + // apply to "headless" services. This field will be wiped when updating a + // Service to type ExternalName. + // + // This field may hold a maximum of two entries (dual-stack families, in + // either order). These families must correspond to the values of the + // clusterIPs field, if specified. Both clusterIPs and ipFamilies are + // governed by the ipFamilyPolicy field. + // +listType=atomic + // +optional + IPFamilies []IPFamily `json:"ipFamilies,omitempty" protobuf:"bytes,19,opt,name=ipFamilies,casttype=IPFamily"` + + // IPFamilyPolicy represents the dual-stack-ness requested or required by + // this Service, and is gated by the "IPv6DualStack" feature gate. If + // there is no value provided, then this field will be set to SingleStack. + // Services can be "SingleStack" (a single IP family), "PreferDualStack" + // (two IP families on dual-stack configured clusters or a single IP family + // on single-stack clusters), or "RequireDualStack" (two IP families on + // dual-stack configured clusters, otherwise fail). The ipFamilies and + // clusterIPs fields depend on the value of this field. This field will be + // wiped when updating a service to type ExternalName. + // +optional + IPFamilyPolicy *IPFamilyPolicyType `json:"ipFamilyPolicy,omitempty" protobuf:"bytes,17,opt,name=ipFamilyPolicy,casttype=IPFamilyPolicyType"` + + // allocateLoadBalancerNodePorts defines if NodePorts will be automatically + // allocated for services with type LoadBalancer. Default is "true". It may be + // set to "false" if the cluster load-balancer does not rely on NodePorts. + // allocateLoadBalancerNodePorts may only be set for services with type LoadBalancer + // and will be cleared if the type is changed to any other type. + // This field is alpha-level and is only honored by servers that enable the ServiceLBNodePortControl feature. + // +optional + AllocateLoadBalancerNodePorts *bool `json:"allocateLoadBalancerNodePorts,omitempty" protobuf:"bytes,20,opt,name=allocateLoadBalancerNodePorts"` } // ServicePort contains information on service's port. @@ -4179,10 +4292,14 @@ type ServicePort struct { // +optional TargetPort intstr.IntOrString `json:"targetPort,omitempty" protobuf:"bytes,4,opt,name=targetPort"` - // The port on each node on which this service is exposed when type=NodePort or LoadBalancer. - // Usually assigned by the system. If specified, it will be allocated to the service - // if unused or else creation of the service will fail. - // Default is to auto-allocate a port if the ServiceType of this Service requires one. + // The port on each node on which this service is exposed when type is + // NodePort or LoadBalancer. Usually assigned by the system. If a value is + // specified, in-range, and not in use it will be used, otherwise the + // operation will fail. If not specified, a port will be allocated if this + // Service requires one. If this field is specified when creating a + // Service which does not need it, creation will fail. This field will be + // wiped when updating a Service to no longer need it (e.g. changing type + // from NodePort to ClusterIP). // More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport // +optional NodePort int32 `json:"nodePort,omitempty" protobuf:"varint,5,opt,name=nodePort"` @@ -5275,7 +5392,12 @@ const ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// Event is a report of an event somewhere in the cluster. +// Event is a report of an event somewhere in the cluster. Events +// have a limited retention time and triggers and messages may evolve +// with time. Event consumers should not rely on the timing of an event +// with a given Reason reflecting a consistent underlying trigger, or the +// continued existence of events with that Reason. Events should be +// treated as informative, best-effort, supplemental data. type Event struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -6104,3 +6226,26 @@ const ( // and data streams for a single forwarded connection PortForwardRequestIDHeader = "requestID" ) + +// PortStatus represents the error condition of a service port + +type PortStatus struct { + // Port is the port number of the service port of which status is recorded here + Port int32 `json:"port" protobuf:"varint,1,opt,name=port"` + // Protocol is the protocol of the service port of which status is recorded here + // The supported values are: "TCP", "UDP", "SCTP" + Protocol Protocol `json:"protocol" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` + // Error is to record the problem with the service port + // The format of the error shall comply with the following rules: + // - built-in error values shall be specified in this file and those shall use + // CamelCase names + // - cloud provider specific error values must have names that comply with the + // format foo.example.com/CamelCase. + // --- + // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + // +optional + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MaxLength=316 + Error *string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` +} diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 61832b815..c58d8ac56 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -339,7 +339,7 @@ var map_Container = map[string]string{ "volumeDevices": "volumeDevices is the list of block devices to be used by the container.", "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - "startupProbe": "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is a beta feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "startupProbe": "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", @@ -637,7 +637,7 @@ func (EphemeralVolumeSource) SwaggerDoc() map[string]string { } var map_Event = map[string]string{ - "": "Event is a report of an event somewhere in the cluster.", + "": "Event is a report of an event somewhere in the cluster. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "involvedObject": "The object that this event is about.", "reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", @@ -953,6 +953,7 @@ var map_LoadBalancerIngress = map[string]string{ "": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.", "ip": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)", "hostname": "Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)", + "ports": "Ports is a list of records of service ports If used, every port defined in the service should have an entry in it", } func (LoadBalancerIngress) SwaggerDoc() map[string]string { @@ -1310,7 +1311,7 @@ var map_PersistentVolumeClaimSpec = map[string]string{ "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", "storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", - "dataSource": "This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta) * An existing PVC (PersistentVolumeClaim) * An existing custom resource/object that implements data population (Alpha) In order to use VolumeSnapshot object types, the appropriate feature gate must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the specified data source is not supported, the volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.", + "dataSource": "This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.", } func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { @@ -1602,7 +1603,7 @@ var map_PodSecurityContext = map[string]string{ "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.", "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ", "sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.", - "fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified defaults to \"Always\".", + "fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used.", "seccompProfile": "The seccomp options to use by the containers in this pod.", } @@ -1723,6 +1724,16 @@ func (PodTemplateSpec) SwaggerDoc() map[string]string { return map_PodTemplateSpec } +var map_PortStatus = map[string]string{ + "port": "Port is the port number of the service port of which status is recorded here", + "protocol": "Protocol is the protocol of the service port of which status is recorded here The supported values are: \"TCP\", \"UDP\", \"SCTP\"", + "error": "Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", +} + +func (PortStatus) SwaggerDoc() map[string]string { + return map_PortStatus +} + var map_PortworxVolumeSource = map[string]string{ "": "PortworxVolumeSource represents a Portworx volume resource.", "volumeID": "VolumeID uniquely identifies a Portworx volume", @@ -2209,7 +2220,7 @@ var map_ServicePort = map[string]string{ "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. This is a beta field that is guarded by the ServiceAppProtocol feature gate and enabled by default.", "port": "The port that will be exposed by this service.", "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", - "nodePort": "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", + "nodePort": "The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", } func (ServicePort) SwaggerDoc() map[string]string { @@ -2226,22 +2237,25 @@ func (ServiceProxyOptions) SwaggerDoc() map[string]string { } var map_ServiceSpec = map[string]string{ - "": "ServiceSpec describes the attributes that a user creates on a service.", - "ports": "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", - "selector": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/", - "clusterIP": "clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \"None\", empty string (\"\"), or a valid IP address. \"None\" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", - "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types", - "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.", - "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", - "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", - "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/", - "externalName": "externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires Type to be ExternalName.", - "externalTrafficPolicy": "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.", - "healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. If not specified, HealthCheckNodePort is created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.", - "publishNotReadyAddresses": "publishNotReadyAddresses indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready. The primary use case for setting this field is for a StatefulSet's Headless Service to propagate SRV DNS records for its Pods for the purpose of peer discovery. The Kubernetes controllers that generate Endpoints and EndpointSlice resources for Services interpret this to mean that all endpoints are considered \"ready\" even if the Pods themselves are not. Agents which consume only Kubernetes generated endpoints through the Endpoints or EndpointSlice resources can safely assume this behavior.", - "sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.", - "ipFamily": "ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. IPv6) when the IPv6DualStack feature gate is enabled. In a dual-stack cluster, you can specify ipFamily when creating a ClusterIP Service to determine whether the controller will allocate an IPv4 or IPv6 IP for it, and you can specify ipFamily when creating a headless Service to determine whether it will have IPv4 or IPv6 Endpoints. In either case, if you do not specify an ipFamily explicitly, it will default to the cluster's primary IP family. This field is part of an alpha feature, and you should not make any assumptions about its semantics other than those described above. In particular, you should not assume that it can (or cannot) be changed after creation time; that it can only have the values \"IPv4\" and \"IPv6\"; or that its current value on a given Service correctly reflects the current state of that Service. (For ClusterIP Services, look at clusterIP to see if the Service is IPv4 or IPv6. For headless Services, look at the endpoints, which may be dual-stack in the future. For ExternalName Services, ipFamily has no meaning, but it may be set to an irrelevant value anyway.)", - "topologyKeys": "topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value \"*\" may be used to mean \"any topology\". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied.", + "": "ServiceSpec describes the attributes that a user creates on a service.", + "ports": "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "selector": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/", + "clusterIP": "clusterIP is the IP address of the service and is usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be blank) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "clusterIPs": "ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\n\nUnless the \"IPv6DualStack\" feature gate is enabled, this field is limited to one value, which must be the same as the clusterIP field. If the feature gate is enabled, this field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object or EndpointSlice objects. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a virtual IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the same endpoints as the clusterIP. \"ExternalName\" aliases this service to the specified externalName. Several other fields do not apply to ExternalName services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types", + "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.", + "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", + "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/", + "externalName": "externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires Type to be", + "externalTrafficPolicy": "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.", + "healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type).", + "publishNotReadyAddresses": "publishNotReadyAddresses indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready. The primary use case for setting this field is for a StatefulSet's Headless Service to propagate SRV DNS records for its Pods for the purpose of peer discovery. The Kubernetes controllers that generate Endpoints and EndpointSlice resources for Services interpret this to mean that all endpoints are considered \"ready\" even if the Pods themselves are not. Agents which consume only Kubernetes generated endpoints through the Endpoints or EndpointSlice resources can safely assume this behavior.", + "sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.", + "topologyKeys": "topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value \"*\" may be used to mean \"any topology\". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied. This field is alpha-level and is only honored by servers that enable the ServiceTopology feature.", + "ipFamilies": "IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service, and is gated by the \"IPv6DualStack\" feature gate. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \"IPv4\" and \"IPv6\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \"headless\" services. This field will be wiped when updating a Service to type ExternalName.\n\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.", + "ipFamilyPolicy": "IPFamilyPolicy represents the dual-stack-ness requested or required by this Service, and is gated by the \"IPv6DualStack\" feature gate. If there is no value provided, then this field will be set to SingleStack. Services can be \"SingleStack\" (a single IP family), \"PreferDualStack\" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or \"RequireDualStack\" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName.", + "allocateLoadBalancerNodePorts": "allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts. allocateLoadBalancerNodePorts may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type. This field is alpha-level and is only honored by servers that enable the ServiceLBNodePortControl feature.", } func (ServiceSpec) SwaggerDoc() map[string]string { @@ -2251,6 +2265,7 @@ func (ServiceSpec) SwaggerDoc() map[string]string { var map_ServiceStatus = map[string]string{ "": "ServiceStatus represents the current status of a service.", "loadBalancer": "LoadBalancer contains the current status of the load-balancer, if one is present.", + "conditions": "Current service state", } func (ServiceStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/core/v1/well_known_labels.go b/vendor/k8s.io/api/core/v1/well_known_labels.go index 22aa55b91..a506f17f6 100644 --- a/vendor/k8s.io/api/core/v1/well_known_labels.go +++ b/vendor/k8s.io/api/core/v1/well_known_labels.go @@ -19,10 +19,16 @@ package v1 const ( LabelHostname = "kubernetes.io/hostname" - LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone" - LabelZoneRegion = "failure-domain.beta.kubernetes.io/region" - LabelZoneFailureDomainStable = "topology.kubernetes.io/zone" - LabelZoneRegionStable = "topology.kubernetes.io/region" + LabelFailureDomainBetaZone = "failure-domain.beta.kubernetes.io/zone" + LabelFailureDomainBetaRegion = "failure-domain.beta.kubernetes.io/region" + LabelTopologyZone = "topology.kubernetes.io/zone" + LabelTopologyRegion = "topology.kubernetes.io/region" + + // Legacy names for compat. + LabelZoneFailureDomain = LabelFailureDomainBetaZone // deprecated, remove after 1.20 + LabelZoneRegion = LabelFailureDomainBetaRegion // deprecated, remove after 1.20 + LabelZoneFailureDomainStable = LabelTopologyZone + LabelZoneRegionStable = LabelTopologyRegion LabelInstanceType = "beta.kubernetes.io/instance-type" LabelInstanceTypeStable = "node.kubernetes.io/instance-type" diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 445c7c04a..b868f9ba5 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -2144,6 +2144,13 @@ func (in *List) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) { *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]PortStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -2163,7 +2170,9 @@ func (in *LoadBalancerStatus) DeepCopyInto(out *LoadBalancerStatus) { if in.Ingress != nil { in, out := &in.Ingress, &out.Ingress *out = make([]LoadBalancerIngress, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } return } @@ -4079,6 +4088,27 @@ func (in *PodTemplateSpec) DeepCopy() *PodTemplateSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortStatus) DeepCopyInto(out *PortStatus) { + *out = *in + if in.Error != nil { + in, out := &in.Error, &out.Error + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortStatus. +func (in *PortStatus) DeepCopy() *PortStatus { + if in == nil { + return nil + } + out := new(PortStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PortworxVolumeSource) DeepCopyInto(out *PortworxVolumeSource) { *out = *in @@ -5270,6 +5300,11 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { (*out)[key] = val } } + if in.ClusterIPs != nil { + in, out := &in.ClusterIPs, &out.ClusterIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.ExternalIPs != nil { in, out := &in.ExternalIPs, &out.ExternalIPs *out = make([]string, len(*in)) @@ -5285,16 +5320,26 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { *out = new(SessionAffinityConfig) (*in).DeepCopyInto(*out) } - if in.IPFamily != nil { - in, out := &in.IPFamily, &out.IPFamily - *out = new(IPFamily) - **out = **in - } if in.TopologyKeys != nil { in, out := &in.TopologyKeys, &out.TopologyKeys *out = make([]string, len(*in)) copy(*out, *in) } + if in.IPFamilies != nil { + in, out := &in.IPFamilies, &out.IPFamilies + *out = make([]IPFamily, len(*in)) + copy(*out, *in) + } + if in.IPFamilyPolicy != nil { + in, out := &in.IPFamilyPolicy, &out.IPFamilyPolicy + *out = new(IPFamilyPolicyType) + **out = **in + } + if in.AllocateLoadBalancerNodePorts != nil { + in, out := &in.AllocateLoadBalancerNodePorts, &out.AllocateLoadBalancerNodePorts + *out = new(bool) + **out = **in + } return } @@ -5312,6 +5357,13 @@ func (in *ServiceSpec) DeepCopy() *ServiceSpec { func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) { *out = *in in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } diff --git a/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go b/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go index 45c4382cf..5cbee6168 100644 --- a/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go @@ -200,54 +200,58 @@ func init() { } var fileDescriptor_772f83c5b34e07a5 = []byte{ - // 746 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4b, 0x6f, 0xd3, 0x4a, - 0x14, 0x8e, 0x9b, 0x5a, 0xb2, 0x27, 0x8d, 0x6e, 0x3b, 0xba, 0x8b, 0x28, 0xf7, 0x5e, 0x3b, 0xca, - 0x5d, 0x10, 0xa9, 0x30, 0x26, 0x15, 0x45, 0x15, 0xac, 0x6a, 0x28, 0x0f, 0x89, 0x47, 0x18, 0xba, - 0x40, 0x88, 0x05, 0x13, 0x7b, 0xea, 0x98, 0x24, 0x1e, 0xcb, 0x9e, 0x44, 0xca, 0x8e, 0x9f, 0xc0, - 0x0f, 0x62, 0x89, 0x50, 0x97, 0x5d, 0x76, 0x65, 0x51, 0xf7, 0x5f, 0x74, 0x85, 0x66, 0xfc, 0x4a, - 0x09, 0x8f, 0xec, 0x66, 0xbe, 0x39, 0xdf, 0x77, 0xce, 0xf9, 0xe6, 0x1c, 0xf0, 0x68, 0x7c, 0x10, - 0x23, 0x9f, 0x59, 0xe3, 0xd9, 0x90, 0x46, 0x01, 0xe5, 0x34, 0xb6, 0xe6, 0x34, 0x70, 0x59, 0x64, - 0xe5, 0x0f, 0x24, 0xf4, 0x2d, 0xd7, 0x8f, 0x1d, 0x36, 0xa7, 0xd1, 0xc2, 0x9a, 0xf7, 0xc9, 0x24, - 0x1c, 0x91, 0xbe, 0xe5, 0xd1, 0x80, 0x46, 0x84, 0x53, 0x17, 0x85, 0x11, 0xe3, 0x0c, 0xfe, 0x97, - 0x85, 0x23, 0x12, 0xfa, 0xa8, 0x0c, 0x47, 0x45, 0x78, 0xfb, 0x96, 0xe7, 0xf3, 0xd1, 0x6c, 0x88, - 0x1c, 0x36, 0xb5, 0x3c, 0xe6, 0x31, 0x4b, 0xb2, 0x86, 0xb3, 0x13, 0x79, 0x93, 0x17, 0x79, 0xca, - 0xd4, 0xda, 0xdd, 0xa5, 0xe4, 0x0e, 0x8b, 0xa8, 0x35, 0x5f, 0xc9, 0xd8, 0xbe, 0x53, 0xc5, 0x4c, - 0x89, 0x33, 0xf2, 0x03, 0x51, 0x5f, 0x38, 0xf6, 0x04, 0x10, 0x5b, 0x53, 0xca, 0xc9, 0xcf, 0x58, - 0xd6, 0xaf, 0x58, 0xd1, 0x2c, 0xe0, 0xfe, 0x94, 0xae, 0x10, 0xee, 0xfe, 0x89, 0x10, 0x3b, 0x23, - 0x3a, 0x25, 0x3f, 0xf2, 0xba, 0x9f, 0xeb, 0x40, 0x3b, 0x0a, 0xdc, 0x90, 0xf9, 0x01, 0x87, 0xbb, - 0x40, 0x27, 0xae, 0x1b, 0xd1, 0x38, 0xa6, 0x71, 0x4b, 0xe9, 0xd4, 0x7b, 0xba, 0xdd, 0x4c, 0x13, - 0x53, 0x3f, 0x2c, 0x40, 0x5c, 0xbd, 0x43, 0x0a, 0x80, 0xc3, 0x02, 0xd7, 0xe7, 0x3e, 0x0b, 0xe2, - 0xd6, 0x46, 0x47, 0xe9, 0x35, 0xf6, 0xfa, 0xe8, 0xb7, 0xfe, 0xa2, 0x22, 0xd3, 0x83, 0x92, 0x68, - 0xc3, 0xd3, 0xc4, 0xac, 0xa5, 0x89, 0x09, 0x2a, 0x0c, 0x2f, 0x09, 0xc3, 0x1e, 0xd0, 0x46, 0x2c, - 0xe6, 0x01, 0x99, 0xd2, 0x56, 0xbd, 0xa3, 0xf4, 0x74, 0x7b, 0x2b, 0x4d, 0x4c, 0xed, 0x49, 0x8e, - 0xe1, 0xf2, 0x15, 0x0e, 0x80, 0xce, 0x49, 0xe4, 0x51, 0x8e, 0xe9, 0x49, 0x6b, 0x53, 0xd6, 0xf3, - 0xff, 0x72, 0x3d, 0xe2, 0x87, 0xd0, 0xbc, 0x8f, 0x5e, 0x0e, 0x3f, 0x50, 0x47, 0x04, 0xd1, 0x88, - 0x06, 0x0e, 0xcd, 0x5a, 0x3c, 0x2e, 0x98, 0xb8, 0x12, 0x81, 0x0e, 0xd0, 0x38, 0x0b, 0xd9, 0x84, - 0x79, 0x8b, 0x96, 0xda, 0xa9, 0xf7, 0x1a, 0x7b, 0xfb, 0x6b, 0x36, 0x88, 0x8e, 0x73, 0xde, 0x51, - 0xc0, 0xa3, 0x85, 0xbd, 0x9d, 0x37, 0xa9, 0x15, 0x30, 0x2e, 0x85, 0xdb, 0xf7, 0x41, 0xf3, 0x5a, - 0x30, 0xdc, 0x06, 0xf5, 0x31, 0x5d, 0xb4, 0x14, 0xd1, 0x2c, 0x16, 0x47, 0xf8, 0x37, 0x50, 0xe7, - 0x64, 0x32, 0xa3, 0xd2, 0x65, 0x1d, 0x67, 0x97, 0x7b, 0x1b, 0x07, 0x4a, 0x77, 0x1f, 0xc0, 0x55, - 0x4f, 0xa1, 0x09, 0xd4, 0x88, 0x12, 0x37, 0xd3, 0xd0, 0x6c, 0x3d, 0x4d, 0x4c, 0x15, 0x0b, 0x00, - 0x67, 0x78, 0xf7, 0xab, 0x02, 0xb6, 0x0a, 0xde, 0x80, 0x45, 0x1c, 0xfe, 0x0b, 0x36, 0xa5, 0xc3, - 0x32, 0xa9, 0xad, 0xa5, 0x89, 0xb9, 0xf9, 0x42, 0xb8, 0x2b, 0x51, 0xf8, 0x18, 0x68, 0x72, 0x5a, - 0x1c, 0x36, 0xc9, 0x4a, 0xb0, 0x77, 0x45, 0x33, 0x83, 0x1c, 0xbb, 0x4a, 0xcc, 0x7f, 0x56, 0x37, - 0x01, 0x15, 0xcf, 0xb8, 0x24, 0x8b, 0x34, 0x21, 0x8b, 0xb8, 0xfc, 0x48, 0x35, 0x4b, 0x23, 0xd2, - 0x63, 0x89, 0xc2, 0x3e, 0x68, 0x90, 0x30, 0x2c, 0x68, 0xf2, 0x0b, 0x75, 0xfb, 0xaf, 0x34, 0x31, - 0x1b, 0x87, 0x15, 0x8c, 0x97, 0x63, 0xba, 0x97, 0x1b, 0xa0, 0x59, 0x34, 0xf2, 0x7a, 0xe2, 0x3b, - 0x14, 0xbe, 0x07, 0x9a, 0x58, 0x2a, 0x97, 0x70, 0x22, 0xbb, 0x69, 0xec, 0xdd, 0x5e, 0xfa, 0xb3, - 0x72, 0x37, 0x50, 0x38, 0xf6, 0x04, 0x10, 0x23, 0x11, 0x5d, 0x8d, 0xc5, 0x73, 0xca, 0x49, 0x35, - 0x93, 0x15, 0x86, 0x4b, 0x55, 0xf8, 0x10, 0x34, 0xf2, 0x2d, 0x38, 0x5e, 0x84, 0x34, 0x2f, 0xb3, - 0x9b, 0x53, 0x1a, 0x87, 0xd5, 0xd3, 0xd5, 0xf5, 0x2b, 0x5e, 0xa6, 0xc1, 0x37, 0x40, 0xa7, 0x79, - 0xe1, 0x62, 0x7b, 0xc4, 0x70, 0xdd, 0x58, 0x73, 0xb8, 0xec, 0x9d, 0x3c, 0x99, 0x5e, 0x20, 0x31, - 0xae, 0xc4, 0xe0, 0x00, 0xa8, 0xc2, 0xce, 0xb8, 0x55, 0x97, 0xaa, 0xbb, 0x6b, 0xaa, 0x8a, 0x8f, - 0xb0, 0x9b, 0xb9, 0xb2, 0x2a, 0x6e, 0x31, 0xce, 0x84, 0xba, 0x5f, 0x14, 0xb0, 0x73, 0xcd, 0xe5, - 0x67, 0x7e, 0xcc, 0xe1, 0xbb, 0x15, 0xa7, 0xd1, 0x7a, 0x4e, 0x0b, 0xb6, 0xf4, 0xb9, 0x5c, 0x8b, - 0x02, 0x59, 0x72, 0xf9, 0x15, 0x50, 0x7d, 0x4e, 0xa7, 0x85, 0x37, 0x37, 0xd7, 0xec, 0x42, 0x96, - 0x57, 0xb5, 0xf1, 0x54, 0x48, 0xe0, 0x4c, 0xc9, 0x46, 0xa7, 0x17, 0x46, 0xed, 0xec, 0xc2, 0xa8, - 0x9d, 0x5f, 0x18, 0xb5, 0x8f, 0xa9, 0xa1, 0x9c, 0xa6, 0x86, 0x72, 0x96, 0x1a, 0xca, 0x79, 0x6a, - 0x28, 0xdf, 0x52, 0x43, 0xf9, 0x74, 0x69, 0xd4, 0xde, 0x6a, 0x85, 0xe6, 0xf7, 0x00, 0x00, 0x00, - 0xff, 0xff, 0x65, 0x85, 0x5a, 0x9b, 0x75, 0x06, 0x00, 0x00, + // 801 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4d, 0x8f, 0xe3, 0x44, + 0x10, 0x8d, 0x27, 0x13, 0xd6, 0xee, 0xec, 0x88, 0xdd, 0x16, 0x87, 0x68, 0x00, 0x7b, 0x14, 0x84, + 0x88, 0x34, 0xd0, 0x26, 0x23, 0x40, 0x2b, 0x38, 0x8d, 0x61, 0xf9, 0x90, 0x60, 0x19, 0x7a, 0xe7, + 0x80, 0x10, 0x07, 0x7a, 0xec, 0x5a, 0xc7, 0x24, 0x76, 0x5b, 0xdd, 0x9d, 0x48, 0xb9, 0xf1, 0x0f, + 0xe0, 0x47, 0x21, 0x34, 0xc7, 0x3d, 0xee, 0xc9, 0x62, 0xbc, 0x12, 0x3f, 0x62, 0x4f, 0xa8, 0xdb, + 0x9f, 0x43, 0x80, 0xcd, 0xcd, 0xfd, 0xaa, 0xde, 0xab, 0x7a, 0xe5, 0x2a, 0xf4, 0xf9, 0xf2, 0x81, + 0x24, 0x09, 0xf7, 0x97, 0xeb, 0x2b, 0x10, 0x19, 0x28, 0x90, 0xfe, 0x06, 0xb2, 0x88, 0x0b, 0xbf, + 0x0e, 0xb0, 0x3c, 0xf1, 0xa3, 0x44, 0x86, 0x7c, 0x03, 0x62, 0xeb, 0x6f, 0xe6, 0x6c, 0x95, 0x2f, + 0xd8, 0xdc, 0x8f, 0x21, 0x03, 0xc1, 0x14, 0x44, 0x24, 0x17, 0x5c, 0x71, 0xfc, 0x66, 0x95, 0x4e, + 0x58, 0x9e, 0x90, 0x36, 0x9d, 0x34, 0xe9, 0xc7, 0xef, 0xc5, 0x89, 0x5a, 0xac, 0xaf, 0x48, 0xc8, + 0x53, 0x3f, 0xe6, 0x31, 0xf7, 0x0d, 0xeb, 0x6a, 0xfd, 0xc4, 0xbc, 0xcc, 0xc3, 0x7c, 0x55, 0x6a, + 0xc7, 0xd3, 0x5e, 0xf1, 0x90, 0x0b, 0xf0, 0x37, 0x3b, 0x15, 0x8f, 0x3f, 0xe8, 0x72, 0x52, 0x16, + 0x2e, 0x92, 0x4c, 0xf7, 0x97, 0x2f, 0x63, 0x0d, 0x48, 0x3f, 0x05, 0xc5, 0xfe, 0x8d, 0xe5, 0xff, + 0x17, 0x4b, 0xac, 0x33, 0x95, 0xa4, 0xb0, 0x43, 0xf8, 0xe8, 0x65, 0x04, 0x19, 0x2e, 0x20, 0x65, + 0xff, 0xe4, 0x4d, 0xff, 0x1a, 0x22, 0xfb, 0x61, 0x16, 0xe5, 0x3c, 0xc9, 0x14, 0x3e, 0x45, 0x0e, + 0x8b, 0x22, 0x01, 0x52, 0x82, 0x9c, 0x58, 0x27, 0xc3, 0x99, 0x13, 0x1c, 0x95, 0x85, 0xe7, 0x9c, + 0x37, 0x20, 0xed, 0xe2, 0x18, 0x10, 0x0a, 0x79, 0x16, 0x25, 0x2a, 0xe1, 0x99, 0x9c, 0x1c, 0x9c, + 0x58, 0xb3, 0xf1, 0xd9, 0x9c, 0xfc, 0xef, 0x7c, 0x49, 0x53, 0xe9, 0xd3, 0x96, 0x18, 0xe0, 0xeb, + 0xc2, 0x1b, 0x94, 0x85, 0x87, 0x3a, 0x8c, 0xf6, 0x84, 0xf1, 0x0c, 0xd9, 0x0b, 0x2e, 0x55, 0xc6, + 0x52, 0x98, 0x0c, 0x4f, 0xac, 0x99, 0x13, 0xdc, 0x2d, 0x0b, 0xcf, 0xfe, 0xb2, 0xc6, 0x68, 0x1b, + 0xc5, 0x17, 0xc8, 0x51, 0x4c, 0xc4, 0xa0, 0x28, 0x3c, 0x99, 0x1c, 0x9a, 0x7e, 0xde, 0xea, 0xf7, + 0xa3, 0xff, 0x10, 0xd9, 0xcc, 0xc9, 0xb7, 0x57, 0x3f, 0x43, 0xa8, 0x93, 0x40, 0x40, 0x16, 0x42, + 0x65, 0xf1, 0xb2, 0x61, 0xd2, 0x4e, 0x04, 0x87, 0xc8, 0x56, 0x3c, 0xe7, 0x2b, 0x1e, 0x6f, 0x27, + 0xa3, 0x93, 0xe1, 0x6c, 0x7c, 0xf6, 0xe1, 0x9e, 0x06, 0xc9, 0x65, 0xcd, 0x7b, 0x98, 0x29, 0xb1, + 0x0d, 0xee, 0xd5, 0x26, 0xed, 0x06, 0xa6, 0xad, 0xb0, 0x36, 0x98, 0xf1, 0x08, 0x1e, 0x69, 0x83, + 0xaf, 0x74, 0x06, 0x1f, 0xd5, 0x18, 0x6d, 0xa3, 0xc7, 0x9f, 0xa0, 0xa3, 0x5b, 0xb2, 0xf8, 0x1e, + 0x1a, 0x2e, 0x61, 0x3b, 0xb1, 0x34, 0x8b, 0xea, 0x4f, 0xfc, 0x1a, 0x1a, 0x6d, 0xd8, 0x6a, 0x0d, + 0xe6, 0x7f, 0x38, 0xb4, 0x7a, 0x7c, 0x7c, 0xf0, 0xc0, 0x9a, 0xfe, 0x6a, 0x21, 0xbc, 0x3b, 0x7e, + 0xec, 0xa1, 0x91, 0x00, 0x16, 0x55, 0x22, 0x76, 0xe0, 0x94, 0x85, 0x37, 0xa2, 0x1a, 0xa0, 0x15, + 0x8e, 0xdf, 0x46, 0x77, 0x24, 0x88, 0x4d, 0x92, 0xc5, 0x46, 0xd3, 0x0e, 0xc6, 0x65, 0xe1, 0xdd, + 0x79, 0x5c, 0x41, 0xb4, 0x89, 0xe1, 0x39, 0x1a, 0x2b, 0x10, 0x69, 0x92, 0x31, 0xa5, 0x53, 0x87, + 0x26, 0xf5, 0xd5, 0xb2, 0xf0, 0xc6, 0x97, 0x1d, 0x4c, 0xfb, 0x39, 0xd3, 0x3f, 0x2c, 0x74, 0xb7, + 0xe9, 0xe8, 0x82, 0x0b, 0x85, 0xdf, 0x40, 0x87, 0xe6, 0x37, 0x1b, 0x3f, 0x81, 0x5d, 0x16, 0xde, + 0xa1, 0x99, 0x80, 0x41, 0xf1, 0x17, 0xc8, 0x36, 0x2b, 0x1b, 0xf2, 0x55, 0xe5, 0x2e, 0x38, 0xd5, + 0x73, 0xba, 0xa8, 0xb1, 0x17, 0x85, 0xf7, 0xfa, 0xee, 0x39, 0x92, 0x26, 0x4c, 0x5b, 0xb2, 0x2e, + 0x93, 0x73, 0xa1, 0x4c, 0x8f, 0xa3, 0xaa, 0x8c, 0x2e, 0x4f, 0x0d, 0xaa, 0x8d, 0xb0, 0x3c, 0x6f, + 0x68, 0x66, 0x8f, 0x9c, 0xca, 0xc8, 0x79, 0x07, 0xd3, 0x7e, 0xce, 0xf4, 0xf9, 0x01, 0x3a, 0x6a, + 0x8c, 0x3c, 0x5e, 0x25, 0x21, 0xe0, 0x9f, 0x90, 0xad, 0x2f, 0x3b, 0x62, 0x8a, 0x19, 0x37, 0xe3, + 0xb3, 0xf7, 0x7b, 0x8b, 0xd3, 0x1e, 0x28, 0xc9, 0x97, 0xb1, 0x06, 0x24, 0xd1, 0xd9, 0xdd, 0x6e, + 0x7e, 0x03, 0x8a, 0x75, 0x87, 0xd1, 0x61, 0xb4, 0x55, 0xc5, 0x9f, 0xa1, 0x71, 0x7d, 0x8a, 0x97, + 0xdb, 0x1c, 0xea, 0x36, 0xa7, 0x35, 0x65, 0x7c, 0xde, 0x85, 0x5e, 0xdc, 0x7e, 0xd2, 0x3e, 0x0d, + 0x7f, 0x8f, 0x1c, 0xa8, 0x1b, 0xd7, 0x27, 0xac, 0x37, 0xfc, 0x9d, 0x3d, 0x37, 0x3c, 0xb8, 0x5f, + 0x17, 0x73, 0x1a, 0x44, 0xd2, 0x4e, 0x0c, 0x5f, 0xa0, 0x91, 0x1e, 0xa7, 0x9c, 0x0c, 0x8d, 0xea, + 0xe9, 0x9e, 0xaa, 0xfa, 0x47, 0x04, 0x47, 0xb5, 0xf2, 0x48, 0xbf, 0x24, 0xad, 0x84, 0xa6, 0xbf, + 0x5b, 0xe8, 0xfe, 0xad, 0x29, 0x7f, 0x9d, 0x48, 0x85, 0x7f, 0xdc, 0x99, 0x34, 0xd9, 0x6f, 0xd2, + 0x9a, 0x6d, 0xe6, 0xdc, 0xde, 0x66, 0x83, 0xf4, 0xa6, 0xfc, 0x1d, 0x1a, 0x25, 0x0a, 0xd2, 0x66, + 0x36, 0xef, 0xee, 0xe9, 0xc2, 0xb4, 0xd7, 0xd9, 0xf8, 0x4a, 0x4b, 0xd0, 0x4a, 0x29, 0x20, 0xd7, + 0x37, 0xee, 0xe0, 0xe9, 0x8d, 0x3b, 0x78, 0x76, 0xe3, 0x0e, 0x7e, 0x29, 0x5d, 0xeb, 0xba, 0x74, + 0xad, 0xa7, 0xa5, 0x6b, 0x3d, 0x2b, 0x5d, 0xeb, 0xcf, 0xd2, 0xb5, 0x7e, 0x7b, 0xee, 0x0e, 0x7e, + 0xb0, 0x1b, 0xcd, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x03, 0x95, 0x92, 0xa5, 0xfa, 0x06, 0x00, + 0x00, } func (m *Endpoint) Marshal() (dAtA []byte, err error) { @@ -270,6 +274,13 @@ func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.NodeName != nil { + i -= len(*m.NodeName) + copy(dAtA[i:], *m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) + i-- + dAtA[i] = 0x32 + } if len(m.Topology) > 0 { keysForTopology := make([]string, 0, len(m.Topology)) for k := range m.Topology { @@ -355,6 +366,26 @@ func (m *EndpointConditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Terminating != nil { + i-- + if *m.Terminating { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Serving != nil { + i-- + if *m.Serving { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } if m.Ready != nil { i-- if *m.Ready { @@ -571,6 +602,10 @@ func (m *Endpoint) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } + if m.NodeName != nil { + l = len(*m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -583,6 +618,12 @@ func (m *EndpointConditions) Size() (n int) { if m.Ready != nil { n += 2 } + if m.Serving != nil { + n += 2 + } + if m.Terminating != nil { + n += 2 + } return n } @@ -678,6 +719,7 @@ func (this *Endpoint) String() string { `Hostname:` + valueToStringGenerated(this.Hostname) + `,`, `TargetRef:` + strings.Replace(fmt.Sprintf("%v", this.TargetRef), "ObjectReference", "v1.ObjectReference", 1) + `,`, `Topology:` + mapStringForTopology + `,`, + `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, `}`, }, "") return s @@ -688,6 +730,8 @@ func (this *EndpointConditions) String() string { } s := strings.Join([]string{`&EndpointConditions{`, `Ready:` + valueToStringGenerated(this.Ready) + `,`, + `Serving:` + valueToStringGenerated(this.Serving) + `,`, + `Terminating:` + valueToStringGenerated(this.Terminating) + `,`, `}`, }, "") return s @@ -1042,6 +1086,39 @@ func (m *Endpoint) Unmarshal(dAtA []byte) error { } m.Topology[mapkey] = mapvalue iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.NodeName = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1116,6 +1193,48 @@ func (m *EndpointConditions) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.Ready = &b + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Serving", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Serving = &b + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminating", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Terminating = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/discovery/v1alpha1/generated.proto b/vendor/k8s.io/api/discovery/v1alpha1/generated.proto index 2cbbdcdb0..4b66a6c57 100644 --- a/vendor/k8s.io/api/discovery/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1alpha1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.discovery.v1alpha1; @@ -45,8 +45,8 @@ message Endpoint { // hostname of this endpoint. This field may be used by consumers of // endpoints to distinguish endpoints from each other (e.g. in DNS names). // Multiple endpoints which use the same hostname should be considered - // fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) - // validation. + // fungible (e.g. multiple A values in DNS). Must be lowercase and pass + // DNS label (RFC 1123) validation. // +optional optional string hostname = 3; @@ -67,8 +67,15 @@ message Endpoint { // endpoint is located. This should match the corresponding node label. // * topology.kubernetes.io/region: the value indicates the region where the // endpoint is located. This should match the corresponding node label. + // This field is deprecated and will be removed in future api versions. // +optional map topology = 5; + + // nodeName represents the name of the Node hosting this endpoint. This can + // be used to determine endpoints local to a Node. This field can be enabled + // with the EndpointSliceNodeName feature gate. + // +optional + optional string nodeName = 6; } // EndpointConditions represents the current condition of an endpoint. @@ -76,9 +83,25 @@ message EndpointConditions { // ready indicates that this endpoint is prepared to receive traffic, // according to whatever system is managing the endpoint. A nil value // indicates an unknown state. In most cases consumers should interpret this - // unknown state as ready. + // unknown state as ready. For compatibility reasons, ready should never be + // "true" for terminating endpoints. // +optional optional bool ready = 1; + + // serving is identical to ready except that it is set regardless of the + // terminating state of endpoints. This condition should be set to true for + // a ready endpoint that is terminating. If nil, consumers should defer to + // the ready condition. This field can be enabled with the + // EndpointSliceTerminatingCondition feature gate. + // +optional + optional bool serving = 2; + + // terminating indicates that this endpoint is terminating. A nil value + // indicates an unknown state. Consumers should interpret this unknown state + // to mean that the endpoint is not terminating. This field can be enabled + // with the EndpointSliceTerminatingCondition feature gate. + // +optional + optional bool terminating = 3; } // EndpointPort represents a Port used by an EndpointSlice diff --git a/vendor/k8s.io/api/discovery/v1alpha1/types.go b/vendor/k8s.io/api/discovery/v1alpha1/types.go index cf50b501c..34b706ea8 100644 --- a/vendor/k8s.io/api/discovery/v1alpha1/types.go +++ b/vendor/k8s.io/api/discovery/v1alpha1/types.go @@ -86,8 +86,8 @@ type Endpoint struct { // hostname of this endpoint. This field may be used by consumers of // endpoints to distinguish endpoints from each other (e.g. in DNS names). // Multiple endpoints which use the same hostname should be considered - // fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) - // validation. + // fungible (e.g. multiple A values in DNS). Must be lowercase and pass + // DNS label (RFC 1123) validation. // +optional Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` // targetRef is a reference to a Kubernetes object that represents this @@ -106,8 +106,14 @@ type Endpoint struct { // endpoint is located. This should match the corresponding node label. // * topology.kubernetes.io/region: the value indicates the region where the // endpoint is located. This should match the corresponding node label. + // This field is deprecated and will be removed in future api versions. // +optional Topology map[string]string `json:"topology,omitempty" protobuf:"bytes,5,opt,name=topology"` + // nodeName represents the name of the Node hosting this endpoint. This can + // be used to determine endpoints local to a Node. This field can be enabled + // with the EndpointSliceNodeName feature gate. + // +optional + NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,6,opt,name=nodeName"` } // EndpointConditions represents the current condition of an endpoint. @@ -115,9 +121,25 @@ type EndpointConditions struct { // ready indicates that this endpoint is prepared to receive traffic, // according to whatever system is managing the endpoint. A nil value // indicates an unknown state. In most cases consumers should interpret this - // unknown state as ready. + // unknown state as ready. For compatibility reasons, ready should never be + // "true" for terminating endpoints. // +optional Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"` + + // serving is identical to ready except that it is set regardless of the + // terminating state of endpoints. This condition should be set to true for + // a ready endpoint that is terminating. If nil, consumers should defer to + // the ready condition. This field can be enabled with the + // EndpointSliceTerminatingCondition feature gate. + // +optional + Serving *bool `json:"serving,omitempty" protobuf:"bytes,2,name=serving"` + + // terminating indicates that this endpoint is terminating. A nil value + // indicates an unknown state. Consumers should interpret this unknown state + // to mean that the endpoint is not terminating. This field can be enabled + // with the EndpointSliceTerminatingCondition feature gate. + // +optional + Terminating *bool `json:"terminating,omitempty" protobuf:"bytes,3,name=terminating"` } // EndpointPort represents a Port used by an EndpointSlice diff --git a/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go index 1ba2d60d4..f6c983689 100644 --- a/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go @@ -31,9 +31,10 @@ var map_Endpoint = map[string]string{ "": "Endpoint represents a single logical \"backend\" implementing a service.", "addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.", "conditions": "conditions contains information about the current status of the endpoint.", - "hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) validation.", + "hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS label (RFC 1123) validation.", "targetRef": "targetRef is a reference to a Kubernetes object that represents this endpoint.", - "topology": "topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\n where the endpoint is located. This should match the corresponding\n node label.\n* topology.kubernetes.io/zone: the value indicates the zone where the\n endpoint is located. This should match the corresponding node label.\n* topology.kubernetes.io/region: the value indicates the region where the\n endpoint is located. This should match the corresponding node label.", + "topology": "topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\n where the endpoint is located. This should match the corresponding\n node label.\n* topology.kubernetes.io/zone: the value indicates the zone where the\n endpoint is located. This should match the corresponding node label.\n* topology.kubernetes.io/region: the value indicates the region where the\n endpoint is located. This should match the corresponding node label.\nThis field is deprecated and will be removed in future api versions.", + "nodeName": "nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node. This field can be enabled with the EndpointSliceNodeName feature gate.", } func (Endpoint) SwaggerDoc() map[string]string { @@ -41,8 +42,10 @@ func (Endpoint) SwaggerDoc() map[string]string { } var map_EndpointConditions = map[string]string{ - "": "EndpointConditions represents the current condition of an endpoint.", - "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready.", + "": "EndpointConditions represents the current condition of an endpoint.", + "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \"true\" for terminating endpoints.", + "serving": "serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.", + "terminating": "terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.", } func (EndpointConditions) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go index c72f64acf..13e54d500 100644 --- a/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go @@ -51,6 +51,11 @@ func (in *Endpoint) DeepCopyInto(out *Endpoint) { (*out)[key] = val } } + if in.NodeName != nil { + in, out := &in.NodeName, &out.NodeName + *out = new(string) + **out = **in + } return } @@ -72,6 +77,16 @@ func (in *EndpointConditions) DeepCopyInto(out *EndpointConditions) { *out = new(bool) **out = **in } + if in.Serving != nil { + in, out := &in.Serving, &out.Serving + *out = new(bool) + **out = **in + } + if in.Terminating != nil { + in, out := &in.Terminating, &out.Terminating + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go b/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go index ce0046c51..6caab402c 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go @@ -200,54 +200,57 @@ func init() { } var fileDescriptor_ece80bbc872d519b = []byte{ - // 745 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcf, 0x6b, 0xdb, 0x48, - 0x14, 0xb6, 0xe2, 0x88, 0x95, 0xc6, 0x31, 0x9b, 0x0c, 0x7b, 0x30, 0xde, 0x20, 0x19, 0x2f, 0x2c, - 0x66, 0x43, 0xa4, 0x75, 0xc8, 0x2e, 0x61, 0xf7, 0x14, 0xed, 0x86, 0xb6, 0xd0, 0x36, 0x66, 0x1a, - 0x28, 0x94, 0x1e, 0x3a, 0x96, 0x26, 0xb2, 0x6a, 0x5b, 0x23, 0x34, 0x63, 0x83, 0x6f, 0xfd, 0x13, - 0xfa, 0xf7, 0xf4, 0x5a, 0x28, 0x39, 0xe6, 0x98, 0x93, 0xa8, 0xd5, 0xff, 0x22, 0xa7, 0x32, 0xa3, - 0x5f, 0x76, 0xdd, 0x1f, 0xbe, 0xcd, 0x7c, 0xf3, 0xbe, 0xef, 0xbd, 0xf7, 0xcd, 0x7b, 0xe0, 0x62, - 0x7c, 0xc6, 0xac, 0x80, 0xda, 0xe3, 0xd9, 0x90, 0xc4, 0x21, 0xe1, 0x84, 0xd9, 0x73, 0x12, 0x7a, - 0x34, 0xb6, 0xf3, 0x07, 0x1c, 0x05, 0xb6, 0x17, 0x30, 0x97, 0xce, 0x49, 0xbc, 0xb0, 0xe7, 0xfd, - 0x21, 0xe1, 0xb8, 0x6f, 0xfb, 0x24, 0x24, 0x31, 0xe6, 0xc4, 0xb3, 0xa2, 0x98, 0x72, 0x0a, 0x0f, - 0xb3, 0x68, 0x0b, 0x47, 0x81, 0x55, 0x46, 0x5b, 0x79, 0x74, 0xfb, 0xd8, 0x0f, 0xf8, 0x68, 0x36, - 0xb4, 0x5c, 0x3a, 0xb5, 0x7d, 0xea, 0x53, 0x5b, 0x92, 0x86, 0xb3, 0x6b, 0x79, 0x93, 0x17, 0x79, - 0xca, 0xc4, 0xda, 0xdd, 0x95, 0xd4, 0x2e, 0x8d, 0x89, 0x3d, 0xdf, 0x48, 0xd8, 0x3e, 0xad, 0x62, - 0xa6, 0xd8, 0x1d, 0x05, 0xa1, 0xa8, 0x2e, 0x1a, 0xfb, 0x02, 0x60, 0xf6, 0x94, 0x70, 0xfc, 0x35, - 0x96, 0xfd, 0x2d, 0x56, 0x3c, 0x0b, 0x79, 0x30, 0x25, 0x1b, 0x84, 0xbf, 0x7f, 0x44, 0x60, 0xee, - 0x88, 0x4c, 0xf1, 0x97, 0xbc, 0xee, 0xbb, 0x3a, 0xd0, 0x2e, 0x42, 0x2f, 0xa2, 0x41, 0xc8, 0xe1, - 0x11, 0xd0, 0xb1, 0xe7, 0xc5, 0x84, 0x31, 0xc2, 0x5a, 0x4a, 0xa7, 0xde, 0xd3, 0x9d, 0x66, 0x9a, - 0x98, 0xfa, 0x79, 0x01, 0xa2, 0xea, 0x1d, 0x7a, 0x00, 0xb8, 0x34, 0xf4, 0x02, 0x1e, 0xd0, 0x90, - 0xb5, 0x76, 0x3a, 0x4a, 0xaf, 0x71, 0xf2, 0xa7, 0xf5, 0x3d, 0x7b, 0xad, 0x22, 0xd1, 0x7f, 0x25, - 0xcf, 0x81, 0x37, 0x89, 0x59, 0x4b, 0x13, 0x13, 0x54, 0x18, 0x5a, 0xd1, 0x85, 0x3d, 0xa0, 0x8d, - 0x28, 0xe3, 0x21, 0x9e, 0x92, 0x56, 0xbd, 0xa3, 0xf4, 0x74, 0x67, 0x2f, 0x4d, 0x4c, 0xed, 0x61, - 0x8e, 0xa1, 0xf2, 0x15, 0x0e, 0x80, 0xce, 0x71, 0xec, 0x13, 0x8e, 0xc8, 0x75, 0x6b, 0x57, 0x96, - 0xf3, 0xdb, 0x6a, 0x39, 0xe2, 0x83, 0xac, 0x79, 0xdf, 0xba, 0x1c, 0xbe, 0x26, 0xae, 0x08, 0x22, - 0x31, 0x09, 0x5d, 0x92, 0x75, 0x78, 0x55, 0x30, 0x51, 0x25, 0x02, 0x87, 0x40, 0xe3, 0x34, 0xa2, - 0x13, 0xea, 0x2f, 0x5a, 0x6a, 0xa7, 0xde, 0x6b, 0x9c, 0x9c, 0x6e, 0xd7, 0x9f, 0x75, 0x95, 0xd3, - 0x2e, 0x42, 0x1e, 0x2f, 0x9c, 0xfd, 0xbc, 0x47, 0xad, 0x80, 0x51, 0xa9, 0xdb, 0xfe, 0x17, 0x34, - 0xd7, 0x82, 0xe1, 0x3e, 0xa8, 0x8f, 0xc9, 0xa2, 0xa5, 0x88, 0x5e, 0x91, 0x38, 0xc2, 0x5f, 0x80, - 0x3a, 0xc7, 0x93, 0x19, 0x91, 0x1e, 0xeb, 0x28, 0xbb, 0xfc, 0xb3, 0x73, 0xa6, 0x74, 0xff, 0x02, - 0x70, 0xd3, 0x52, 0x68, 0x02, 0x35, 0x26, 0xd8, 0xcb, 0x34, 0x34, 0x47, 0x4f, 0x13, 0x53, 0x45, - 0x02, 0x40, 0x19, 0xde, 0xfd, 0xa0, 0x80, 0xbd, 0x82, 0x37, 0xa0, 0x31, 0x87, 0x87, 0x60, 0x57, - 0x1a, 0x2c, 0x93, 0x3a, 0x5a, 0x9a, 0x98, 0xbb, 0x4f, 0x85, 0xb9, 0x12, 0x85, 0x0f, 0x80, 0x26, - 0x67, 0xc5, 0xa5, 0x93, 0xac, 0x04, 0xe7, 0x48, 0x34, 0x33, 0xc8, 0xb1, 0xfb, 0xc4, 0xfc, 0x75, - 0x73, 0x0f, 0xac, 0xe2, 0x19, 0x95, 0x64, 0x91, 0x26, 0xa2, 0x31, 0x97, 0xff, 0xa8, 0x66, 0x69, - 0x44, 0x7a, 0x24, 0x51, 0xd8, 0x07, 0x0d, 0x1c, 0x45, 0x05, 0x4d, 0xfe, 0xa0, 0xee, 0xfc, 0x9c, - 0x26, 0x66, 0xe3, 0xbc, 0x82, 0xd1, 0x6a, 0x4c, 0x77, 0xb9, 0x03, 0x9a, 0x45, 0x23, 0xcf, 0x26, - 0x81, 0x4b, 0xe0, 0x2b, 0xa0, 0x89, 0x95, 0xf2, 0x30, 0xc7, 0xb2, 0x9b, 0xf5, 0x91, 0x2c, 0x37, - 0xc3, 0x8a, 0xc6, 0xbe, 0x00, 0x98, 0x25, 0xa2, 0xab, 0xa9, 0x78, 0x42, 0x38, 0xae, 0x46, 0xb2, - 0xc2, 0x50, 0xa9, 0x0a, 0xff, 0x07, 0x8d, 0x7c, 0x07, 0xae, 0x16, 0x11, 0xc9, 0xcb, 0xec, 0xe6, - 0x94, 0xc6, 0x79, 0xf5, 0x74, 0xbf, 0x7e, 0x45, 0xab, 0x34, 0xf8, 0x1c, 0xe8, 0x24, 0x2f, 0x5c, - 0xec, 0x8e, 0x98, 0xad, 0xdf, 0xb7, 0x9b, 0x2d, 0xe7, 0x20, 0xcf, 0xa5, 0x17, 0x08, 0x43, 0x95, - 0x16, 0xbc, 0x04, 0xaa, 0x70, 0x93, 0xb5, 0xea, 0x52, 0xf4, 0x8f, 0xed, 0x44, 0xc5, 0x37, 0x38, - 0xcd, 0x5c, 0x58, 0x15, 0x37, 0x86, 0x32, 0x9d, 0xee, 0x7b, 0x05, 0x1c, 0xac, 0x79, 0xfc, 0x38, - 0x60, 0x1c, 0xbe, 0xdc, 0xf0, 0xd9, 0xda, 0xce, 0x67, 0xc1, 0x96, 0x2e, 0x97, 0x4b, 0x51, 0x20, - 0x2b, 0x1e, 0x0f, 0x80, 0x1a, 0x70, 0x32, 0x2d, 0x9c, 0x39, 0xda, 0xae, 0x09, 0x59, 0x5d, 0xd5, - 0xc5, 0x23, 0xa1, 0x80, 0x32, 0x21, 0xe7, 0xf8, 0x66, 0x69, 0xd4, 0x6e, 0x97, 0x46, 0xed, 0x6e, - 0x69, 0xd4, 0xde, 0xa4, 0x86, 0x72, 0x93, 0x1a, 0xca, 0x6d, 0x6a, 0x28, 0x77, 0xa9, 0xa1, 0x7c, - 0x4c, 0x0d, 0xe5, 0xed, 0x27, 0xa3, 0xf6, 0xe2, 0xa7, 0x5c, 0xf2, 0x73, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x29, 0x1a, 0xa2, 0x6f, 0x6d, 0x06, 0x00, 0x00, + // 798 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4d, 0x8f, 0xe3, 0x44, + 0x10, 0x8d, 0x27, 0x63, 0xc6, 0xee, 0xec, 0x88, 0xdd, 0x16, 0x87, 0x68, 0x58, 0xd9, 0xa3, 0x20, + 0x50, 0xc4, 0x68, 0x6d, 0x66, 0xb5, 0x42, 0x2b, 0x38, 0x8d, 0x61, 0x04, 0x48, 0xb0, 0x1b, 0xf5, + 0x46, 0x42, 0x42, 0x1c, 0xe8, 0xd8, 0xb5, 0x8e, 0x49, 0xec, 0xb6, 0xba, 0x3b, 0x91, 0x72, 0xe3, + 0x1f, 0xc0, 0x7f, 0x42, 0x42, 0x73, 0xdc, 0xe3, 0x9e, 0x2c, 0x62, 0xf8, 0x15, 0x7b, 0x42, 0xdd, + 0xfe, 0x4a, 0x08, 0x1f, 0xb9, 0x75, 0xbf, 0xaa, 0xf7, 0xaa, 0x5e, 0x75, 0x17, 0xba, 0x5d, 0x3c, + 0x15, 0x5e, 0xc2, 0xfc, 0xc5, 0x6a, 0x06, 0x3c, 0x03, 0x09, 0xc2, 0x5f, 0x43, 0x16, 0x31, 0xee, + 0xd7, 0x01, 0x9a, 0x27, 0x7e, 0x94, 0x88, 0x90, 0xad, 0x81, 0x6f, 0xfc, 0xf5, 0xf5, 0x0c, 0x24, + 0xbd, 0xf6, 0x63, 0xc8, 0x80, 0x53, 0x09, 0x91, 0x97, 0x73, 0x26, 0x19, 0x7e, 0x58, 0x65, 0x7b, + 0x34, 0x4f, 0xbc, 0x36, 0xdb, 0xab, 0xb3, 0x2f, 0x1e, 0xc5, 0x89, 0x9c, 0xaf, 0x66, 0x5e, 0xc8, + 0x52, 0x3f, 0x66, 0x31, 0xf3, 0x35, 0x69, 0xb6, 0x7a, 0xa9, 0x6f, 0xfa, 0xa2, 0x4f, 0x95, 0xd8, + 0xc5, 0x68, 0xa7, 0x74, 0xc8, 0x38, 0xf8, 0xeb, 0x83, 0x82, 0x17, 0x4f, 0xba, 0x9c, 0x94, 0x86, + 0xf3, 0x24, 0x53, 0xdd, 0xe5, 0x8b, 0x58, 0x01, 0xc2, 0x4f, 0x41, 0xd2, 0x7f, 0x62, 0xf9, 0xff, + 0xc6, 0xe2, 0xab, 0x4c, 0x26, 0x29, 0x1c, 0x10, 0x3e, 0xfe, 0x3f, 0x82, 0x08, 0xe7, 0x90, 0xd2, + 0xbf, 0xf3, 0x46, 0x7f, 0xf6, 0x91, 0x75, 0x9b, 0x45, 0x39, 0x4b, 0x32, 0x89, 0xaf, 0x90, 0x4d, + 0xa3, 0x88, 0x83, 0x10, 0x20, 0x86, 0xc6, 0x65, 0x7f, 0x6c, 0x07, 0xe7, 0x65, 0xe1, 0xda, 0x37, + 0x0d, 0x48, 0xba, 0x38, 0x8e, 0x10, 0x0a, 0x59, 0x16, 0x25, 0x32, 0x61, 0x99, 0x18, 0x9e, 0x5c, + 0x1a, 0xe3, 0xc1, 0xe3, 0x8f, 0xbc, 0xff, 0x1a, 0xaf, 0xd7, 0x14, 0xfa, 0xac, 0xe5, 0x05, 0xf8, + 0xae, 0x70, 0x7b, 0x65, 0xe1, 0xa2, 0x0e, 0x23, 0x3b, 0xba, 0x78, 0x8c, 0xac, 0x39, 0x13, 0x32, + 0xa3, 0x29, 0x0c, 0xfb, 0x97, 0xc6, 0xd8, 0x0e, 0xee, 0x95, 0x85, 0x6b, 0x7d, 0x59, 0x63, 0xa4, + 0x8d, 0xe2, 0x09, 0xb2, 0x25, 0xe5, 0x31, 0x48, 0x02, 0x2f, 0x87, 0xa7, 0xba, 0x9d, 0xf7, 0x76, + 0xdb, 0x51, 0x0f, 0xe4, 0xad, 0xaf, 0xbd, 0xe7, 0xb3, 0x1f, 0x21, 0x54, 0x49, 0xc0, 0x21, 0x0b, + 0xa1, 0x72, 0x38, 0x6d, 0x98, 0xa4, 0x13, 0xc1, 0x33, 0x64, 0x49, 0x96, 0xb3, 0x25, 0x8b, 0x37, + 0x43, 0xf3, 0xb2, 0x3f, 0x1e, 0x3c, 0x7e, 0x72, 0x9c, 0x3f, 0x6f, 0x5a, 0xd3, 0x6e, 0x33, 0xc9, + 0x37, 0xc1, 0xfd, 0xda, 0xa3, 0xd5, 0xc0, 0xa4, 0xd5, 0x55, 0xfe, 0x32, 0x16, 0xc1, 0x33, 0xe5, + 0xef, 0xad, 0xce, 0xdf, 0xb3, 0x1a, 0x23, 0x6d, 0xf4, 0xe2, 0x53, 0x74, 0xbe, 0x27, 0x8b, 0xef, + 0xa3, 0xfe, 0x02, 0x36, 0x43, 0x43, 0xb1, 0x88, 0x3a, 0xe2, 0x77, 0x90, 0xb9, 0xa6, 0xcb, 0x15, + 0xe8, 0xd7, 0xb0, 0x49, 0x75, 0xf9, 0xe4, 0xe4, 0xa9, 0x31, 0xfa, 0xd9, 0x40, 0xf8, 0x70, 0xfa, + 0xd8, 0x45, 0x26, 0x07, 0x1a, 0x55, 0x22, 0x56, 0x60, 0x97, 0x85, 0x6b, 0x12, 0x05, 0x90, 0x0a, + 0xc7, 0xef, 0xa3, 0x33, 0x01, 0x7c, 0x9d, 0x64, 0xb1, 0xd6, 0xb4, 0x82, 0x41, 0x59, 0xb8, 0x67, + 0x2f, 0x2a, 0x88, 0x34, 0x31, 0x7c, 0x8d, 0x06, 0x12, 0x78, 0x9a, 0x64, 0x54, 0xaa, 0xd4, 0xbe, + 0x4e, 0x7d, 0xbb, 0x2c, 0xdc, 0xc1, 0xb4, 0x83, 0xc9, 0x6e, 0xce, 0xe8, 0x37, 0x03, 0xdd, 0x6b, + 0x3a, 0x9a, 0x30, 0x2e, 0xf1, 0x43, 0x74, 0xaa, 0x5f, 0x59, 0xfb, 0x09, 0xac, 0xb2, 0x70, 0x4f, + 0xf5, 0x04, 0x34, 0x8a, 0xbf, 0x40, 0x96, 0xfe, 0xb0, 0x21, 0x5b, 0x56, 0xee, 0x82, 0x2b, 0x35, + 0xa7, 0x49, 0x8d, 0xbd, 0x29, 0xdc, 0x77, 0x0f, 0x97, 0xd1, 0x6b, 0xc2, 0xa4, 0x25, 0xab, 0x32, + 0x39, 0xe3, 0x52, 0xf7, 0x68, 0x56, 0x65, 0x54, 0x79, 0xa2, 0x51, 0x65, 0x84, 0xe6, 0x79, 0x43, + 0xd3, 0xdf, 0xc8, 0xae, 0x8c, 0xdc, 0x74, 0x30, 0xd9, 0xcd, 0x19, 0x6d, 0x4f, 0xd0, 0x79, 0x63, + 0xe4, 0xc5, 0x32, 0x09, 0x01, 0xff, 0x80, 0x2c, 0xb5, 0xd7, 0x11, 0x95, 0x54, 0xbb, 0xd9, 0xdf, + 0x8b, 0x76, 0x3d, 0xbd, 0x7c, 0x11, 0x2b, 0x40, 0x78, 0x2a, 0xbb, 0xfb, 0x9a, 0xdf, 0x80, 0xa4, + 0xdd, 0x5e, 0x74, 0x18, 0x69, 0x55, 0xf1, 0xe7, 0x68, 0x50, 0x2f, 0xe2, 0x74, 0x93, 0x43, 0xdd, + 0xe6, 0xa8, 0xa6, 0x0c, 0x6e, 0xba, 0xd0, 0x9b, 0xfd, 0x2b, 0xd9, 0xa5, 0xe1, 0x6f, 0x91, 0x0d, + 0x75, 0xe3, 0x6a, 0x81, 0xd5, 0x07, 0xff, 0xe0, 0xb8, 0x0f, 0x1e, 0x3c, 0xa8, 0x6b, 0xd9, 0x0d, + 0x22, 0x48, 0xa7, 0x85, 0x9f, 0x23, 0x53, 0x4d, 0x53, 0x0c, 0xfb, 0x5a, 0xf4, 0xc3, 0xe3, 0x44, + 0xd5, 0x33, 0x04, 0xe7, 0xb5, 0xb0, 0xa9, 0x6e, 0x82, 0x54, 0x3a, 0xa3, 0x5f, 0x0d, 0xf4, 0x60, + 0x6f, 0xc6, 0x5f, 0x27, 0x42, 0xe2, 0xef, 0x0f, 0xe6, 0xec, 0x1d, 0x37, 0x67, 0xc5, 0xd6, 0x53, + 0x6e, 0x37, 0xb3, 0x41, 0x76, 0x66, 0x3c, 0x41, 0x66, 0x22, 0x21, 0x6d, 0x26, 0x73, 0x75, 0x9c, + 0x09, 0xdd, 0x5d, 0xe7, 0xe2, 0x2b, 0xa5, 0x40, 0x2a, 0xa1, 0xe0, 0xd1, 0xdd, 0xd6, 0xe9, 0xbd, + 0xda, 0x3a, 0xbd, 0xd7, 0x5b, 0xa7, 0xf7, 0x53, 0xe9, 0x18, 0x77, 0xa5, 0x63, 0xbc, 0x2a, 0x1d, + 0xe3, 0x75, 0xe9, 0x18, 0xbf, 0x97, 0x8e, 0xf1, 0xcb, 0x1f, 0x4e, 0xef, 0xbb, 0xb3, 0x5a, 0xf2, + 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa6, 0x35, 0xe6, 0xf5, 0xf2, 0x06, 0x00, 0x00, } func (m *Endpoint) Marshal() (dAtA []byte, err error) { @@ -270,6 +273,13 @@ func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.NodeName != nil { + i -= len(*m.NodeName) + copy(dAtA[i:], *m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) + i-- + dAtA[i] = 0x32 + } if len(m.Topology) > 0 { keysForTopology := make([]string, 0, len(m.Topology)) for k := range m.Topology { @@ -355,6 +365,26 @@ func (m *EndpointConditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Terminating != nil { + i-- + if *m.Terminating { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Serving != nil { + i-- + if *m.Serving { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } if m.Ready != nil { i-- if *m.Ready { @@ -571,6 +601,10 @@ func (m *Endpoint) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } + if m.NodeName != nil { + l = len(*m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -583,6 +617,12 @@ func (m *EndpointConditions) Size() (n int) { if m.Ready != nil { n += 2 } + if m.Serving != nil { + n += 2 + } + if m.Terminating != nil { + n += 2 + } return n } @@ -678,6 +718,7 @@ func (this *Endpoint) String() string { `Hostname:` + valueToStringGenerated(this.Hostname) + `,`, `TargetRef:` + strings.Replace(fmt.Sprintf("%v", this.TargetRef), "ObjectReference", "v1.ObjectReference", 1) + `,`, `Topology:` + mapStringForTopology + `,`, + `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, `}`, }, "") return s @@ -688,6 +729,8 @@ func (this *EndpointConditions) String() string { } s := strings.Join([]string{`&EndpointConditions{`, `Ready:` + valueToStringGenerated(this.Ready) + `,`, + `Serving:` + valueToStringGenerated(this.Serving) + `,`, + `Terminating:` + valueToStringGenerated(this.Terminating) + `,`, `}`, }, "") return s @@ -1042,6 +1085,39 @@ func (m *Endpoint) Unmarshal(dAtA []byte) error { } m.Topology[mapkey] = mapvalue iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.NodeName = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1116,6 +1192,48 @@ func (m *EndpointConditions) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.Ready = &b + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Serving", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Serving = &b + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminating", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Terminating = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.proto b/vendor/k8s.io/api/discovery/v1beta1/generated.proto index adf10c0e4..e5d21caad 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.discovery.v1beta1; @@ -45,8 +45,8 @@ message Endpoint { // hostname of this endpoint. This field may be used by consumers of // endpoints to distinguish endpoints from each other (e.g. in DNS names). // Multiple endpoints which use the same hostname should be considered - // fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) - // validation. + // fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS + // Label (RFC 1123) validation. // +optional optional string hostname = 3; @@ -67,8 +67,15 @@ message Endpoint { // endpoint is located. This should match the corresponding node label. // * topology.kubernetes.io/region: the value indicates the region where the // endpoint is located. This should match the corresponding node label. + // This field is deprecated and will be removed in future api versions. // +optional map topology = 5; + + // nodeName represents the name of the Node hosting this endpoint. This can + // be used to determine endpoints local to a Node. This field can be enabled + // with the EndpointSliceNodeName feature gate. + // +optional + optional string nodeName = 6; } // EndpointConditions represents the current condition of an endpoint. @@ -76,9 +83,25 @@ message EndpointConditions { // ready indicates that this endpoint is prepared to receive traffic, // according to whatever system is managing the endpoint. A nil value // indicates an unknown state. In most cases consumers should interpret this - // unknown state as ready. + // unknown state as ready. For compatibility reasons, ready should never be + // "true" for terminating endpoints. // +optional optional bool ready = 1; + + // serving is identical to ready except that it is set regardless of the + // terminating state of endpoints. This condition should be set to true for + // a ready endpoint that is terminating. If nil, consumers should defer to + // the ready condition. This field can be enabled with the + // EndpointSliceTerminatingCondition feature gate. + // +optional + optional bool serving = 2; + + // terminating indicates that this endpoint is terminating. A nil value + // indicates an unknown state. Consumers should interpret this unknown state + // to mean that the endpoint is not terminating. This field can be enabled + // with the EndpointSliceTerminatingCondition feature gate. + // +optional + optional bool terminating = 3; } // EndpointPort represents a Port used by an EndpointSlice diff --git a/vendor/k8s.io/api/discovery/v1beta1/types.go b/vendor/k8s.io/api/discovery/v1beta1/types.go index 5cafea747..e14088e8b 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/types.go +++ b/vendor/k8s.io/api/discovery/v1beta1/types.go @@ -60,12 +60,6 @@ type EndpointSlice struct { type AddressType string const ( - // AddressTypeIP represents an IP Address. - // This address type has been deprecated and has been replaced by the IPv4 - // and IPv6 adddress types. New resources with this address type will be - // considered invalid. This will be fully removed in 1.18. - // +deprecated - AddressTypeIP = AddressType("IP") // AddressTypeIPv4 represents an IPv4 Address. AddressTypeIPv4 = AddressType(v1.IPv4Protocol) // AddressTypeIPv6 represents an IPv6 Address. @@ -88,8 +82,8 @@ type Endpoint struct { // hostname of this endpoint. This field may be used by consumers of // endpoints to distinguish endpoints from each other (e.g. in DNS names). // Multiple endpoints which use the same hostname should be considered - // fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) - // validation. + // fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS + // Label (RFC 1123) validation. // +optional Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` // targetRef is a reference to a Kubernetes object that represents this @@ -108,8 +102,14 @@ type Endpoint struct { // endpoint is located. This should match the corresponding node label. // * topology.kubernetes.io/region: the value indicates the region where the // endpoint is located. This should match the corresponding node label. + // This field is deprecated and will be removed in future api versions. // +optional Topology map[string]string `json:"topology,omitempty" protobuf:"bytes,5,opt,name=topology"` + // nodeName represents the name of the Node hosting this endpoint. This can + // be used to determine endpoints local to a Node. This field can be enabled + // with the EndpointSliceNodeName feature gate. + // +optional + NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,6,opt,name=nodeName"` } // EndpointConditions represents the current condition of an endpoint. @@ -117,9 +117,25 @@ type EndpointConditions struct { // ready indicates that this endpoint is prepared to receive traffic, // according to whatever system is managing the endpoint. A nil value // indicates an unknown state. In most cases consumers should interpret this - // unknown state as ready. + // unknown state as ready. For compatibility reasons, ready should never be + // "true" for terminating endpoints. // +optional Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"` + + // serving is identical to ready except that it is set regardless of the + // terminating state of endpoints. This condition should be set to true for + // a ready endpoint that is terminating. If nil, consumers should defer to + // the ready condition. This field can be enabled with the + // EndpointSliceTerminatingCondition feature gate. + // +optional + Serving *bool `json:"serving,omitempty" protobuf:"bytes,2,name=serving"` + + // terminating indicates that this endpoint is terminating. A nil value + // indicates an unknown state. Consumers should interpret this unknown state + // to mean that the endpoint is not terminating. This field can be enabled + // with the EndpointSliceTerminatingCondition feature gate. + // +optional + Terminating *bool `json:"terminating,omitempty" protobuf:"bytes,3,name=terminating"` } // EndpointPort represents a Port used by an EndpointSlice diff --git a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go index d67cc7214..d48b93d8b 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go @@ -31,9 +31,10 @@ var map_Endpoint = map[string]string{ "": "Endpoint represents a single logical \"backend\" implementing a service.", "addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.", "conditions": "conditions contains information about the current status of the endpoint.", - "hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) validation.", + "hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS Label (RFC 1123) validation.", "targetRef": "targetRef is a reference to a Kubernetes object that represents this endpoint.", - "topology": "topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\n where the endpoint is located. This should match the corresponding\n node label.\n* topology.kubernetes.io/zone: the value indicates the zone where the\n endpoint is located. This should match the corresponding node label.\n* topology.kubernetes.io/region: the value indicates the region where the\n endpoint is located. This should match the corresponding node label.", + "topology": "topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\n where the endpoint is located. This should match the corresponding\n node label.\n* topology.kubernetes.io/zone: the value indicates the zone where the\n endpoint is located. This should match the corresponding node label.\n* topology.kubernetes.io/region: the value indicates the region where the\n endpoint is located. This should match the corresponding node label.\nThis field is deprecated and will be removed in future api versions.", + "nodeName": "nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node. This field can be enabled with the EndpointSliceNodeName feature gate.", } func (Endpoint) SwaggerDoc() map[string]string { @@ -41,8 +42,10 @@ func (Endpoint) SwaggerDoc() map[string]string { } var map_EndpointConditions = map[string]string{ - "": "EndpointConditions represents the current condition of an endpoint.", - "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready.", + "": "EndpointConditions represents the current condition of an endpoint.", + "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \"true\" for terminating endpoints.", + "serving": "serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.", + "terminating": "terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.", } func (EndpointConditions) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go index 8490ec73f..7076553d2 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go @@ -51,6 +51,11 @@ func (in *Endpoint) DeepCopyInto(out *Endpoint) { (*out)[key] = val } } + if in.NodeName != nil { + in, out := &in.NodeName, &out.NodeName + *out = new(string) + **out = **in + } return } @@ -72,6 +77,16 @@ func (in *EndpointConditions) DeepCopyInto(out *EndpointConditions) { *out = new(bool) **out = **in } + if in.Serving != nil { + in, out := &in.Serving, &out.Serving + *out = new(bool) + **out = **in + } + if in.Terminating != nil { + in, out := &in.Terminating, &out.Terminating + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/api/events/v1/generated.proto b/vendor/k8s.io/api/events/v1/generated.proto index 18e3d0182..690c99e4c 100644 --- a/vendor/k8s.io/api/events/v1/generated.proto +++ b/vendor/k8s.io/api/events/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.events.v1; @@ -30,8 +30,12 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; option go_package = "v1"; // Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. +// Events have a limited retention time and triggers and messages may evolve +// with time. Event consumers should not rely on the timing of an event +// with a given Reason reflecting a consistent underlying trigger, or the +// continued existence of events with that Reason. Events should be +// treated as informative, best-effort, supplemental data. message Event { - // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // eventTime is the time when this Event was first observed. It is required. @@ -43,22 +47,18 @@ message Event { // reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. // This field cannot be empty for new Events. - // +optional optional string reportingController = 4; // reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. // This field cannot be empty for new Events and it can have at most 128 characters. - // +optional optional string reportingInstance = 5; // action is what action was taken/failed regarding to the regarding object. It is machine-readable. - // This field can have at most 128 characters. - // +optional + // This field cannot be empty for new Events and it can have at most 128 characters. optional string action = 6; // reason is why the action was taken. It is human-readable. - // This field can have at most 128 characters. - // +optional + // This field cannot be empty for new Events and it can have at most 128 characters. optional string reason = 7; // regarding contains the object this Event is about. In most cases it's an Object reporting controller @@ -80,7 +80,7 @@ message Event { // type is the type of this event (Normal, Warning), new types could be added in the future. // It is machine-readable. - // +optional + // This field cannot be empty for new Events. optional string type = 11; // deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type. diff --git a/vendor/k8s.io/api/events/v1/types.go b/vendor/k8s.io/api/events/v1/types.go index 07ede5542..4bf715872 100644 --- a/vendor/k8s.io/api/events/v1/types.go +++ b/vendor/k8s.io/api/events/v1/types.go @@ -25,10 +25,15 @@ import ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. +// Events have a limited retention time and triggers and messages may evolve +// with time. Event consumers should not rely on the timing of an event +// with a given Reason reflecting a consistent underlying trigger, or the +// continued existence of events with that Reason. Events should be +// treated as informative, best-effort, supplemental data. type Event struct { metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` // eventTime is the time when this Event was first observed. It is required. EventTime metav1.MicroTime `json:"eventTime" protobuf:"bytes,2,opt,name=eventTime"` @@ -39,22 +44,18 @@ type Event struct { // reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. // This field cannot be empty for new Events. - // +optional ReportingController string `json:"reportingController,omitempty" protobuf:"bytes,4,opt,name=reportingController"` // reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. // This field cannot be empty for new Events and it can have at most 128 characters. - // +optional ReportingInstance string `json:"reportingInstance,omitempty" protobuf:"bytes,5,opt,name=reportingInstance"` // action is what action was taken/failed regarding to the regarding object. It is machine-readable. - // This field can have at most 128 characters. - // +optional + // This field cannot be empty for new Events and it can have at most 128 characters. Action string `json:"action,omitempty" protobuf:"bytes,6,name=action"` // reason is why the action was taken. It is human-readable. - // This field can have at most 128 characters. - // +optional + // This field cannot be empty for new Events and it can have at most 128 characters. Reason string `json:"reason,omitempty" protobuf:"bytes,7,name=reason"` // regarding contains the object this Event is about. In most cases it's an Object reporting controller @@ -76,7 +77,7 @@ type Event struct { // type is the type of this event (Normal, Warning), new types could be added in the future. // It is machine-readable. - // +optional + // This field cannot be empty for new Events. Type string `json:"type,omitempty" protobuf:"bytes,11,opt,name=type"` // deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type. diff --git a/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go index e0467436e..7255727bb 100644 --- a/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go @@ -28,17 +28,17 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Event = map[string]string{ - "": "Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system.", + "": "Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data.", "eventTime": "eventTime is the time when this Event was first observed. It is required.", "series": "series is data about the Event series this event represents or nil if it's a singleton Event.", "reportingController": "reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. This field cannot be empty for new Events.", "reportingInstance": "reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. This field cannot be empty for new Events and it can have at most 128 characters.", - "action": "action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field can have at most 128 characters.", - "reason": "reason is why the action was taken. It is human-readable. This field can have at most 128 characters.", + "action": "action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field cannot be empty for new Events and it can have at most 128 characters.", + "reason": "reason is why the action was taken. It is human-readable. This field cannot be empty for new Events and it can have at most 128 characters.", "regarding": "regarding contains the object this Event is about. In most cases it's an Object reporting controller implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.", "related": "related is the optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object.", "note": "note is a human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.", - "type": "type is the type of this event (Normal, Warning), new types could be added in the future. It is machine-readable.", + "type": "type is the type of this event (Normal, Warning), new types could be added in the future. It is machine-readable. This field cannot be empty for new Events.", "deprecatedSource": "deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type.", "deprecatedFirstTimestamp": "deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.", "deprecatedLastTimestamp": "deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.", diff --git a/vendor/k8s.io/api/events/v1beta1/generated.proto b/vendor/k8s.io/api/events/v1beta1/generated.proto index 79bde87c4..90b57d8d0 100644 --- a/vendor/k8s.io/api/events/v1beta1/generated.proto +++ b/vendor/k8s.io/api/events/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.events.v1beta1; @@ -30,8 +30,12 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; option go_package = "v1beta1"; // Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. +// Events have a limited retention time and triggers and messages may evolve +// with time. Event consumers should not rely on the timing of an event +// with a given Reason reflecting a consistent underlying trigger, or the +// continued existence of events with that Reason. Events should be +// treated as informative, best-effort, supplemental data. message Event { - // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // eventTime is the time when this Event was first observed. It is required. diff --git a/vendor/k8s.io/api/events/v1beta1/types.go b/vendor/k8s.io/api/events/v1beta1/types.go index e2ed214b0..796e56ea7 100644 --- a/vendor/k8s.io/api/events/v1beta1/types.go +++ b/vendor/k8s.io/api/events/v1beta1/types.go @@ -27,10 +27,15 @@ import ( // +k8s:prerelease-lifecycle-gen:deprecated=1.22 // Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. +// Events have a limited retention time and triggers and messages may evolve +// with time. Event consumers should not rely on the timing of an event +// with a given Reason reflecting a consistent underlying trigger, or the +// continued existence of events with that Reason. Events should be +// treated as informative, best-effort, supplemental data. type Event struct { metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` // eventTime is the time when this Event was first observed. It is required. EventTime metav1.MicroTime `json:"eventTime" protobuf:"bytes,2,opt,name=eventTime"` diff --git a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go index 8c987f899..7f8e162cd 100644 --- a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go @@ -28,7 +28,7 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Event = map[string]string{ - "": "Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system.", + "": "Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data.", "eventTime": "eventTime is the time when this Event was first observed. It is required.", "series": "series is data about the Event series this event represents or nil if it's a singleton Event.", "reportingController": "reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. This field cannot be empty for new Events.", diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto index a81cce680..a4ca5b563 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.extensions.v1beta1; diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go index a1ef1a10b..bd75c51bc 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -52,7 +52,7 @@ type ScaleStatus struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.1 // +k8s:prerelease-lifecycle-gen:deprecated=1.2 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // represents a scaling request for a resource. type Scale struct { @@ -76,7 +76,7 @@ type Scale struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.1 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,Deployment // DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for @@ -153,7 +153,7 @@ type DeploymentSpec struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.2 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // DEPRECATED. // DeploymentRollback stores the information required to rollback a deployment. @@ -313,7 +313,7 @@ type DeploymentCondition struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.1 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,DeploymentList // DeploymentList is a list of Deployments. @@ -491,7 +491,7 @@ type DaemonSetCondition struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.1 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,DaemonSet // DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for @@ -534,7 +534,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.1 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,DaemonSetList // DaemonSetList is a collection of daemon sets. @@ -808,7 +808,7 @@ type IngressBackend struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.2 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,ReplicaSet // DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for @@ -840,7 +840,7 @@ type ReplicaSet struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.2 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,ReplicaSetList // ReplicaSetList is a collection of ReplicaSets. @@ -946,7 +946,7 @@ type ReplicaSetCondition struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.2 // +k8s:prerelease-lifecycle-gen:deprecated=1.11 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=policy,v1beta1,PodSecurityPolicy // PodSecurityPolicy governs the ability to make requests that affect the Security Context @@ -1308,7 +1308,7 @@ const AllowAllRuntimeClassNames = "*" // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.2 // +k8s:prerelease-lifecycle-gen:deprecated=1.11 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=policy,v1beta1,PodSecurityPolicyList // PodSecurityPolicyList is a list of PodSecurityPolicy objects. @@ -1328,7 +1328,7 @@ type PodSecurityPolicyList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.3 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=networking.k8s.io,v1,NetworkPolicy // DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. @@ -1502,7 +1502,7 @@ type NetworkPolicyPeer struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.3 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=networking.k8s.io,v1,NetworkPolicyList // DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go index 8630905bf..5023dd31a 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go @@ -45,7 +45,7 @@ func (in *DaemonSet) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *DaemonSet) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -69,7 +69,7 @@ func (in *DaemonSetList) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *DaemonSetList) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -93,7 +93,7 @@ func (in *Deployment) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *Deployment) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -117,7 +117,7 @@ func (in *DeploymentList) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *DeploymentList) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -135,7 +135,7 @@ func (in *DeploymentRollback) APILifecycleDeprecated() (major, minor int) { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *DeploymentRollback) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -207,7 +207,7 @@ func (in *NetworkPolicy) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *NetworkPolicy) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -231,7 +231,7 @@ func (in *NetworkPolicyList) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *NetworkPolicyList) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -255,7 +255,7 @@ func (in *PodSecurityPolicy) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *PodSecurityPolicy) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -279,7 +279,7 @@ func (in *PodSecurityPolicyList) APILifecycleReplacement() schema.GroupVersionKi // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *PodSecurityPolicyList) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -303,7 +303,7 @@ func (in *ReplicaSet) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *ReplicaSet) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -327,7 +327,7 @@ func (in *ReplicaSetList) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *ReplicaSetList) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -345,5 +345,5 @@ func (in *Scale) APILifecycleDeprecated() (major, minor int) { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *Scale) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go index 31b8b5d53..a3d4d0c60 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=flowcontrol.apiserver.k8s.io diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto index 0801dd6c1..7b19a273e 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.flowcontrol.v1alpha1; diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go index a67c6dd02..1e9701fc3 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go @@ -51,9 +51,19 @@ const ( FlowSchemaMaxMatchingPrecedence int32 = 10000 ) +// Constants for apiserver response headers. +const ( + ResponseHeaderMatchedPriorityLevelConfigurationUID = "X-Kubernetes-PF-PriorityLevel-UID" + ResponseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchema-UID" +) + // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.18 +// +k8s:prerelease-lifecycle-gen:deprecated=1.20 +// +k8s:prerelease-lifecycle-gen:removed=1.21 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta1,FlowSchema // FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with // similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". @@ -74,6 +84,10 @@ type FlowSchema struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.18 +// +k8s:prerelease-lifecycle-gen:deprecated=1.20 +// +k8s:prerelease-lifecycle-gen:removed=1.21 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta1,FlowSchemaList // FlowSchemaList is a list of FlowSchema objects. type FlowSchemaList struct { @@ -321,6 +335,10 @@ type FlowSchemaConditionType string // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.18 +// +k8s:prerelease-lifecycle-gen:deprecated=1.20 +// +k8s:prerelease-lifecycle-gen:removed=1.21 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta1,PriorityLevelConfiguration // PriorityLevelConfiguration represents the configuration of a priority level. type PriorityLevelConfiguration struct { @@ -340,6 +358,10 @@ type PriorityLevelConfiguration struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.18 +// +k8s:prerelease-lifecycle-gen:deprecated=1.20 +// +k8s:prerelease-lifecycle-gen:removed=1.21 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta1,PriorityLevelConfigurationList // PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. type PriorityLevelConfigurationList struct { diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..4152aa2a9 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,121 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + schema "k8s.io/apimachinery/pkg/runtime/schema" +) + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) { + return 1, 18 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) { + return 1, 20 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *FlowSchema) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "FlowSchema"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *FlowSchema) APILifecycleRemoved() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchemaList) APILifecycleIntroduced() (major, minor int) { + return 1, 18 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) { + return 1, 20 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *FlowSchemaList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "FlowSchemaList"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *FlowSchemaList) APILifecycleRemoved() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfiguration) APILifecycleIntroduced() (major, minor int) { + return 1, 18 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int) { + return 1, 20 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *PriorityLevelConfiguration) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "PriorityLevelConfiguration"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *PriorityLevelConfiguration) APILifecycleRemoved() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfigurationList) APILifecycleIntroduced() (major, minor int) { + return 1, 18 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor int) { + return 1, 20 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *PriorityLevelConfigurationList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "PriorityLevelConfigurationList"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *PriorityLevelConfigurationList) APILifecycleRemoved() (major, minor int) { + return 1, 21 +} diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/doc.go b/vendor/k8s.io/api/flowcontrol/v1beta1/doc.go new file mode 100644 index 000000000..50897b7eb --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true + +// +groupName=flowcontrol.apiserver.k8s.io + +// Package v1beta1 holds api types of version v1alpha1 for group "flowcontrol.apiserver.k8s.io". +package v1beta1 // import "k8s.io/api/flowcontrol/v1beta1" diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go new file mode 100644 index 000000000..5a0c75560 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go @@ -0,0 +1,5433 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} } +func (*FlowDistinguisherMethod) ProtoMessage() {} +func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{0} +} +func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowDistinguisherMethod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowDistinguisherMethod) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowDistinguisherMethod.Merge(m, src) +} +func (m *FlowDistinguisherMethod) XXX_Size() int { + return m.Size() +} +func (m *FlowDistinguisherMethod) XXX_DiscardUnknown() { + xxx_messageInfo_FlowDistinguisherMethod.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo + +func (m *FlowSchema) Reset() { *m = FlowSchema{} } +func (*FlowSchema) ProtoMessage() {} +func (*FlowSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{1} +} +func (m *FlowSchema) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchema.Merge(m, src) +} +func (m *FlowSchema) XXX_Size() int { + return m.Size() +} +func (m *FlowSchema) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchema proto.InternalMessageInfo + +func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} } +func (*FlowSchemaCondition) ProtoMessage() {} +func (*FlowSchemaCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{2} +} +func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaCondition.Merge(m, src) +} +func (m *FlowSchemaCondition) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaCondition) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo + +func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} } +func (*FlowSchemaList) ProtoMessage() {} +func (*FlowSchemaList) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{3} +} +func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaList) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaList.Merge(m, src) +} +func (m *FlowSchemaList) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaList) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaList.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo + +func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} } +func (*FlowSchemaSpec) ProtoMessage() {} +func (*FlowSchemaSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{4} +} +func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaSpec.Merge(m, src) +} +func (m *FlowSchemaSpec) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaSpec) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo + +func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} } +func (*FlowSchemaStatus) ProtoMessage() {} +func (*FlowSchemaStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{5} +} +func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaStatus.Merge(m, src) +} +func (m *FlowSchemaStatus) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaStatus) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo + +func (m *GroupSubject) Reset() { *m = GroupSubject{} } +func (*GroupSubject) ProtoMessage() {} +func (*GroupSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{6} +} +func (m *GroupSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupSubject.Merge(m, src) +} +func (m *GroupSubject) XXX_Size() int { + return m.Size() +} +func (m *GroupSubject) XXX_DiscardUnknown() { + xxx_messageInfo_GroupSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupSubject proto.InternalMessageInfo + +func (m *LimitResponse) Reset() { *m = LimitResponse{} } +func (*LimitResponse) ProtoMessage() {} +func (*LimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{7} +} +func (m *LimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitResponse.Merge(m, src) +} +func (m *LimitResponse) XXX_Size() int { + return m.Size() +} +func (m *LimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LimitResponse proto.InternalMessageInfo + +func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} } +func (*LimitedPriorityLevelConfiguration) ProtoMessage() {} +func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{8} +} +func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitedPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitedPriorityLevelConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitedPriorityLevelConfiguration.Merge(m, src) +} +func (m *LimitedPriorityLevelConfiguration) XXX_Size() int { + return m.Size() +} +func (m *LimitedPriorityLevelConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_LimitedPriorityLevelConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo + +func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} } +func (*NonResourcePolicyRule) ProtoMessage() {} +func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{9} +} +func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourcePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourcePolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourcePolicyRule.Merge(m, src) +} +func (m *NonResourcePolicyRule) XXX_Size() int { + return m.Size() +} +func (m *NonResourcePolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourcePolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo + +func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} } +func (*PolicyRulesWithSubjects) ProtoMessage() {} +func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{10} +} +func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRulesWithSubjects) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRulesWithSubjects) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRulesWithSubjects.Merge(m, src) +} +func (m *PolicyRulesWithSubjects) XXX_Size() int { + return m.Size() +} +func (m *PolicyRulesWithSubjects) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRulesWithSubjects.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo + +func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} } +func (*PriorityLevelConfiguration) ProtoMessage() {} +func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{11} +} +func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfiguration.Merge(m, src) +} +func (m *PriorityLevelConfiguration) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} } +func (*PriorityLevelConfigurationCondition) ProtoMessage() {} +func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{12} +} +func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationCondition.Merge(m, src) +} +func (m *PriorityLevelConfigurationCondition) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationCondition) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} } +func (*PriorityLevelConfigurationList) ProtoMessage() {} +func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{13} +} +func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationList.Merge(m, src) +} +func (m *PriorityLevelConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationList.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} } +func (*PriorityLevelConfigurationReference) ProtoMessage() {} +func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{14} +} +func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationReference.Merge(m, src) +} +func (m *PriorityLevelConfigurationReference) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationReference) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationReference.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} } +func (*PriorityLevelConfigurationSpec) ProtoMessage() {} +func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{15} +} +func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationSpec.Merge(m, src) +} +func (m *PriorityLevelConfigurationSpec) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} } +func (*PriorityLevelConfigurationStatus) ProtoMessage() {} +func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{16} +} +func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationStatus.Merge(m, src) +} +func (m *PriorityLevelConfigurationStatus) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo + +func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} } +func (*QueuingConfiguration) ProtoMessage() {} +func (*QueuingConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{17} +} +func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueuingConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *QueuingConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueuingConfiguration.Merge(m, src) +} +func (m *QueuingConfiguration) XXX_Size() int { + return m.Size() +} +func (m *QueuingConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_QueuingConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo + +func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} } +func (*ResourcePolicyRule) ProtoMessage() {} +func (*ResourcePolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{18} +} +func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourcePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourcePolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourcePolicyRule.Merge(m, src) +} +func (m *ResourcePolicyRule) XXX_Size() int { + return m.Size() +} +func (m *ResourcePolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_ResourcePolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo + +func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} } +func (*ServiceAccountSubject) ProtoMessage() {} +func (*ServiceAccountSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{19} +} +func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceAccountSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountSubject.Merge(m, src) +} +func (m *ServiceAccountSubject) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountSubject) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{20} +} +func (m *Subject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Subject) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subject.Merge(m, src) +} +func (m *Subject) XXX_Size() int { + return m.Size() +} +func (m *Subject) XXX_DiscardUnknown() { + xxx_messageInfo_Subject.DiscardUnknown(m) +} + +var xxx_messageInfo_Subject proto.InternalMessageInfo + +func (m *UserSubject) Reset() { *m = UserSubject{} } +func (*UserSubject) ProtoMessage() {} +func (*UserSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{21} +} +func (m *UserSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserSubject.Merge(m, src) +} +func (m *UserSubject) XXX_Size() int { + return m.Size() +} +func (m *UserSubject) XXX_DiscardUnknown() { + xxx_messageInfo_UserSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_UserSubject proto.InternalMessageInfo + +func init() { + proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowDistinguisherMethod") + proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowSchema") + proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowSchemaCondition") + proto.RegisterType((*FlowSchemaList)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowSchemaList") + proto.RegisterType((*FlowSchemaSpec)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowSchemaSpec") + proto.RegisterType((*FlowSchemaStatus)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowSchemaStatus") + proto.RegisterType((*GroupSubject)(nil), "k8s.io.api.flowcontrol.v1beta1.GroupSubject") + proto.RegisterType((*LimitResponse)(nil), "k8s.io.api.flowcontrol.v1beta1.LimitResponse") + proto.RegisterType((*LimitedPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration") + proto.RegisterType((*NonResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1beta1.NonResourcePolicyRule") + proto.RegisterType((*PolicyRulesWithSubjects)(nil), "k8s.io.api.flowcontrol.v1beta1.PolicyRulesWithSubjects") + proto.RegisterType((*PriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta1.PriorityLevelConfiguration") + proto.RegisterType((*PriorityLevelConfigurationCondition)(nil), "k8s.io.api.flowcontrol.v1beta1.PriorityLevelConfigurationCondition") + proto.RegisterType((*PriorityLevelConfigurationList)(nil), "k8s.io.api.flowcontrol.v1beta1.PriorityLevelConfigurationList") + proto.RegisterType((*PriorityLevelConfigurationReference)(nil), "k8s.io.api.flowcontrol.v1beta1.PriorityLevelConfigurationReference") + proto.RegisterType((*PriorityLevelConfigurationSpec)(nil), "k8s.io.api.flowcontrol.v1beta1.PriorityLevelConfigurationSpec") + proto.RegisterType((*PriorityLevelConfigurationStatus)(nil), "k8s.io.api.flowcontrol.v1beta1.PriorityLevelConfigurationStatus") + proto.RegisterType((*QueuingConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta1.QueuingConfiguration") + proto.RegisterType((*ResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1beta1.ResourcePolicyRule") + proto.RegisterType((*ServiceAccountSubject)(nil), "k8s.io.api.flowcontrol.v1beta1.ServiceAccountSubject") + proto.RegisterType((*Subject)(nil), "k8s.io.api.flowcontrol.v1beta1.Subject") + proto.RegisterType((*UserSubject)(nil), "k8s.io.api.flowcontrol.v1beta1.UserSubject") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto", fileDescriptor_80171c2a4e3669de) +} + +var fileDescriptor_80171c2a4e3669de = []byte{ + // 1494 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0xcb, 0x73, 0xdb, 0x44, + 0x18, 0x8f, 0x1c, 0x3b, 0x89, 0xbf, 0x3c, 0xbb, 0x69, 0x27, 0x9e, 0x74, 0xc6, 0x4e, 0xc5, 0x0c, + 0x05, 0xda, 0xca, 0x6d, 0x69, 0x69, 0x81, 0xe1, 0x11, 0xa5, 0x50, 0x4a, 0x93, 0x34, 0xdd, 0xb4, + 0xc0, 0x94, 0xce, 0x50, 0x59, 0xde, 0xd8, 0x6a, 0x6c, 0x49, 0xd5, 0xae, 0x9c, 0x09, 0xbd, 0x30, + 0xfc, 0x05, 0x9c, 0xe1, 0xc8, 0x81, 0x3b, 0xff, 0x00, 0x47, 0x3a, 0x9c, 0x7a, 0xec, 0xc9, 0x50, + 0x73, 0xe2, 0xc0, 0x1d, 0x7a, 0x62, 0x76, 0xb5, 0x92, 0x2c, 0xbf, 0xe4, 0x69, 0x67, 0x7a, 0xe2, + 0x66, 0x7d, 0x8f, 0xdf, 0xf7, 0xd8, 0xdf, 0x7e, 0xfb, 0x19, 0xae, 0xee, 0x5f, 0xa6, 0x9a, 0xe5, + 0x94, 0xf7, 0xfd, 0x0a, 0xf1, 0x6c, 0xc2, 0x08, 0x2d, 0xb7, 0x88, 0x5d, 0x75, 0xbc, 0xb2, 0x54, + 0x18, 0xae, 0x55, 0xde, 0x6b, 0x38, 0x07, 0xa6, 0x63, 0x33, 0xcf, 0x69, 0x94, 0x5b, 0xe7, 0x2a, + 0x84, 0x19, 0xe7, 0xca, 0x35, 0x62, 0x13, 0xcf, 0x60, 0xa4, 0xaa, 0xb9, 0x9e, 0xc3, 0x1c, 0x54, + 0x0c, 0xec, 0x35, 0xc3, 0xb5, 0xb4, 0x2e, 0x7b, 0x4d, 0xda, 0xaf, 0x9e, 0xa9, 0x59, 0xac, 0xee, + 0x57, 0x34, 0xd3, 0x69, 0x96, 0x6b, 0x4e, 0xcd, 0x29, 0x0b, 0xb7, 0x8a, 0xbf, 0x27, 0xbe, 0xc4, + 0x87, 0xf8, 0x15, 0xc0, 0xad, 0x5e, 0x88, 0xc3, 0x37, 0x0d, 0xb3, 0x6e, 0xd9, 0xc4, 0x3b, 0x2c, + 0xbb, 0xfb, 0x35, 0x2e, 0xa0, 0xe5, 0x26, 0x61, 0x46, 0xb9, 0xd5, 0x97, 0xc4, 0x6a, 0x79, 0x98, + 0x97, 0xe7, 0xdb, 0xcc, 0x6a, 0x92, 0x3e, 0x87, 0xb7, 0xd2, 0x1c, 0xa8, 0x59, 0x27, 0x4d, 0xa3, + 0xd7, 0x4f, 0xbd, 0x03, 0x2b, 0x1f, 0x37, 0x9c, 0x83, 0x2b, 0x16, 0x65, 0x96, 0x5d, 0xf3, 0x2d, + 0x5a, 0x27, 0xde, 0x16, 0x61, 0x75, 0xa7, 0x8a, 0x3e, 0x80, 0x2c, 0x3b, 0x74, 0x49, 0x41, 0x59, + 0x53, 0x5e, 0xcb, 0xeb, 0xa7, 0x1e, 0xb5, 0x4b, 0x13, 0x9d, 0x76, 0x29, 0x7b, 0xeb, 0xd0, 0x25, + 0xcf, 0xda, 0xa5, 0xe3, 0x43, 0xdc, 0xb8, 0x1a, 0x0b, 0x47, 0xf5, 0xfb, 0x0c, 0x00, 0xb7, 0xda, + 0x15, 0xa1, 0xd1, 0x3d, 0x98, 0xe1, 0xe5, 0x56, 0x0d, 0x66, 0x08, 0xcc, 0xd9, 0xf3, 0x67, 0xb5, + 0xb8, 0xd7, 0x51, 0xd6, 0x9a, 0xbb, 0x5f, 0xe3, 0x02, 0xaa, 0x71, 0x6b, 0xad, 0x75, 0x4e, 0xbb, + 0x51, 0xb9, 0x4f, 0x4c, 0xb6, 0x45, 0x98, 0xa1, 0x23, 0x99, 0x05, 0xc4, 0x32, 0x1c, 0xa1, 0xa2, + 0x1d, 0xc8, 0x52, 0x97, 0x98, 0x85, 0x8c, 0x40, 0xd7, 0xb4, 0xd1, 0x27, 0xa9, 0xc5, 0xb9, 0xed, + 0xba, 0xc4, 0xd4, 0xe7, 0xc2, 0x0a, 0xf9, 0x17, 0x16, 0x48, 0xe8, 0x0b, 0x98, 0xa2, 0xcc, 0x60, + 0x3e, 0x2d, 0x4c, 0xf6, 0x65, 0x9c, 0x86, 0x29, 0xfc, 0xf4, 0x05, 0x89, 0x3a, 0x15, 0x7c, 0x63, + 0x89, 0xa7, 0x3e, 0xc9, 0xc0, 0x72, 0x6c, 0xbc, 0xe1, 0xd8, 0x55, 0x8b, 0x59, 0x8e, 0x8d, 0xde, + 0x4d, 0x74, 0xfd, 0x64, 0x4f, 0xd7, 0x57, 0x06, 0xb8, 0xc4, 0x1d, 0x47, 0x6f, 0x47, 0xe9, 0x66, + 0x84, 0xfb, 0x89, 0x64, 0xf0, 0x67, 0xed, 0xd2, 0x62, 0xe4, 0x96, 0xcc, 0x07, 0xb5, 0x00, 0x35, + 0x0c, 0xca, 0x6e, 0x79, 0x86, 0x4d, 0x03, 0x58, 0xab, 0x49, 0x64, 0xd5, 0x6f, 0x8c, 0x77, 0x4e, + 0xdc, 0x43, 0x5f, 0x95, 0x21, 0xd1, 0x66, 0x1f, 0x1a, 0x1e, 0x10, 0x01, 0xbd, 0x0a, 0x53, 0x1e, + 0x31, 0xa8, 0x63, 0x17, 0xb2, 0x22, 0xe5, 0xa8, 0x5f, 0x58, 0x48, 0xb1, 0xd4, 0xa2, 0xd7, 0x61, + 0xba, 0x49, 0x28, 0x35, 0x6a, 0xa4, 0x90, 0x13, 0x86, 0x8b, 0xd2, 0x70, 0x7a, 0x2b, 0x10, 0xe3, + 0x50, 0xaf, 0xfe, 0xa2, 0xc0, 0x42, 0xdc, 0xa7, 0x4d, 0x8b, 0x32, 0x74, 0xb7, 0x8f, 0x7b, 0xda, + 0x78, 0x35, 0x71, 0x6f, 0xc1, 0xbc, 0x25, 0x19, 0x6e, 0x26, 0x94, 0x74, 0xf1, 0xee, 0x06, 0xe4, + 0x2c, 0x46, 0x9a, 0xbc, 0xeb, 0x93, 0x3d, 0xed, 0x4a, 0x21, 0x89, 0x3e, 0x2f, 0x61, 0x73, 0xd7, + 0x38, 0x00, 0x0e, 0x70, 0xd4, 0xbf, 0x26, 0xbb, 0x2b, 0xe0, 0x7c, 0x44, 0x3f, 0x29, 0xb0, 0xea, + 0x7a, 0x96, 0xe3, 0x59, 0xec, 0x70, 0x93, 0xb4, 0x48, 0x63, 0xc3, 0xb1, 0xf7, 0xac, 0x9a, 0xef, + 0x19, 0xbc, 0x95, 0xb2, 0xa8, 0x8d, 0xb4, 0xc8, 0x3b, 0x43, 0x11, 0x30, 0xd9, 0x23, 0x1e, 0xb1, + 0x4d, 0xa2, 0xab, 0x32, 0xa5, 0xd5, 0x11, 0xc6, 0x23, 0x52, 0x41, 0x9f, 0x02, 0x6a, 0x1a, 0x8c, + 0x77, 0xb4, 0xb6, 0xe3, 0x11, 0x93, 0x54, 0x39, 0xaa, 0x20, 0x64, 0x2e, 0x66, 0xc7, 0x56, 0x9f, + 0x05, 0x1e, 0xe0, 0x85, 0xbe, 0x55, 0x60, 0xb9, 0xda, 0x3f, 0x64, 0x24, 0x2f, 0x2f, 0x8d, 0xd3, + 0xe8, 0x01, 0x33, 0x4a, 0x5f, 0xe9, 0xb4, 0x4b, 0xcb, 0x03, 0x14, 0x78, 0x50, 0x30, 0x74, 0x17, + 0x72, 0x9e, 0xdf, 0x20, 0xb4, 0x90, 0x15, 0xc7, 0x9b, 0x1a, 0x75, 0xc7, 0x69, 0x58, 0xe6, 0x21, + 0xe6, 0x2e, 0x9f, 0x5b, 0xac, 0xbe, 0xeb, 0x8b, 0x59, 0x45, 0xe3, 0xb3, 0x16, 0x2a, 0x1c, 0x80, + 0xaa, 0x0f, 0x61, 0xa9, 0x77, 0x68, 0xa0, 0x1a, 0x80, 0x19, 0xde, 0x53, 0x5a, 0x50, 0x44, 0xd8, + 0x37, 0xc7, 0x67, 0x55, 0x74, 0xc7, 0xe3, 0x79, 0x19, 0x89, 0x28, 0xee, 0x82, 0x56, 0xcf, 0xc2, + 0xdc, 0x55, 0xcf, 0xf1, 0x5d, 0x99, 0x23, 0x5a, 0x83, 0xac, 0x6d, 0x34, 0xc3, 0xe9, 0x13, 0x4d, + 0xc4, 0x6d, 0xa3, 0x49, 0xb0, 0xd0, 0xa8, 0x3f, 0x2a, 0x30, 0xbf, 0x69, 0x35, 0x2d, 0x86, 0x09, + 0x75, 0x1d, 0x9b, 0x12, 0x74, 0x31, 0x31, 0xb1, 0x4e, 0xf4, 0x4c, 0xac, 0x23, 0x09, 0xe3, 0xae, + 0x59, 0xf5, 0x25, 0x4c, 0x3f, 0xf0, 0x89, 0x6f, 0xd9, 0x35, 0x39, 0xaf, 0x2f, 0xa4, 0x15, 0x78, + 0x33, 0x30, 0x4f, 0xb0, 0x4d, 0x9f, 0xe5, 0x23, 0x40, 0x6a, 0x70, 0x88, 0xa8, 0xfe, 0xad, 0xc0, + 0x09, 0x11, 0x98, 0x54, 0x87, 0xb3, 0x18, 0xdd, 0x85, 0x82, 0x41, 0xa9, 0xef, 0x91, 0xea, 0x86, + 0x63, 0x9b, 0xbe, 0xc7, 0xf9, 0x7f, 0xb8, 0x5b, 0x37, 0x3c, 0x42, 0x45, 0x35, 0x39, 0x7d, 0x4d, + 0x56, 0x53, 0x58, 0x1f, 0x62, 0x87, 0x87, 0x22, 0xa0, 0xfb, 0x30, 0xdf, 0xe8, 0xae, 0x5d, 0x96, + 0x79, 0x26, 0xad, 0xcc, 0x44, 0xc3, 0xf4, 0x63, 0x32, 0x83, 0x64, 0xd3, 0x71, 0x12, 0x5a, 0x3d, + 0x80, 0x63, 0xdb, 0xfc, 0x0e, 0x53, 0xc7, 0xf7, 0x4c, 0x12, 0x13, 0x10, 0x95, 0x20, 0xd7, 0x22, + 0x5e, 0x25, 0x20, 0x51, 0x5e, 0xcf, 0x73, 0xfa, 0x7d, 0xc6, 0x05, 0x38, 0x90, 0xa3, 0xf7, 0x60, + 0xd1, 0x8e, 0x3d, 0x6f, 0xe3, 0x4d, 0x5a, 0x98, 0x12, 0xa6, 0xcb, 0x9d, 0x76, 0x69, 0x71, 0x3b, + 0xa9, 0xc2, 0xbd, 0xb6, 0x6a, 0x3b, 0x03, 0x2b, 0x43, 0xf8, 0x8e, 0x6e, 0xc3, 0x0c, 0x95, 0xbf, + 0x25, 0x87, 0x4f, 0xa6, 0xd5, 0x2e, 0x7d, 0xe3, 0x69, 0x1b, 0x82, 0xe1, 0x08, 0x0a, 0x39, 0x30, + 0xef, 0xc9, 0x14, 0x44, 0x4c, 0x39, 0x75, 0xcf, 0xa7, 0x61, 0xf7, 0x77, 0x27, 0x6e, 0x2e, 0xee, + 0x06, 0xc4, 0x49, 0x7c, 0xf4, 0x10, 0x96, 0xba, 0xca, 0x0e, 0x62, 0x4e, 0x8a, 0x98, 0x17, 0xd3, + 0x62, 0x0e, 0x3c, 0x14, 0xbd, 0x20, 0xc3, 0x2e, 0x6d, 0xf7, 0xc0, 0xe2, 0xbe, 0x40, 0xea, 0x6f, + 0x19, 0x18, 0x31, 0x88, 0x5f, 0xc2, 0x52, 0x75, 0x2f, 0xb1, 0x54, 0xbd, 0xff, 0xfc, 0x2f, 0xcc, + 0xd0, 0x25, 0xab, 0xde, 0xb3, 0x64, 0x7d, 0xf8, 0x02, 0x31, 0x46, 0x2f, 0x5d, 0xff, 0x64, 0xe0, + 0x95, 0xe1, 0xce, 0xf1, 0x12, 0x76, 0x3d, 0x31, 0xd2, 0x2e, 0xf5, 0x8c, 0xb4, 0x93, 0x63, 0x40, + 0xfc, 0xbf, 0x94, 0xf5, 0x2c, 0x65, 0xbf, 0x2b, 0x50, 0x1c, 0xde, 0xb7, 0x97, 0xb0, 0xa4, 0x7d, + 0x95, 0x5c, 0xd2, 0xde, 0x79, 0x7e, 0x92, 0x0d, 0x59, 0xda, 0xae, 0x8e, 0xe2, 0x56, 0xb4, 0x5e, + 0x8d, 0xf1, 0xc4, 0xfe, 0x3a, 0xb2, 0x55, 0x62, 0x1b, 0x4c, 0xf9, 0x97, 0x90, 0xf0, 0xfe, 0xc8, + 0x36, 0x2a, 0x0d, 0xd2, 0x24, 0x36, 0x93, 0x84, 0xac, 0xc3, 0x74, 0x23, 0x78, 0x1b, 0xe5, 0xa5, + 0x5e, 0x1f, 0xeb, 0x49, 0x1a, 0xf5, 0x94, 0x06, 0xcf, 0xb0, 0x34, 0xc3, 0x21, 0xbc, 0xfa, 0x83, + 0x02, 0x6b, 0x69, 0x97, 0x15, 0x1d, 0x0c, 0x58, 0x76, 0x5e, 0x60, 0x91, 0x1d, 0x7f, 0xf9, 0xf9, + 0x59, 0x81, 0xa3, 0x83, 0x76, 0x0a, 0x4e, 0x7f, 0xbe, 0x48, 0x44, 0x5b, 0x40, 0x44, 0xff, 0x9b, + 0x42, 0x8a, 0xa5, 0x16, 0x9d, 0x86, 0x99, 0xba, 0x61, 0x57, 0x77, 0xad, 0xaf, 0xc3, 0xfd, 0x36, + 0x22, 0xe0, 0x27, 0x52, 0x8e, 0x23, 0x0b, 0x74, 0x05, 0x96, 0x84, 0xdf, 0x26, 0xb1, 0x6b, 0xac, + 0x2e, 0x7a, 0x25, 0xae, 0x72, 0x2e, 0x7e, 0x0f, 0x6e, 0xf6, 0xe8, 0x71, 0x9f, 0x87, 0xfa, 0xaf, + 0x02, 0xe8, 0x79, 0xde, 0xf9, 0x53, 0x90, 0x37, 0x5c, 0x4b, 0x2c, 0x7b, 0xc1, 0x15, 0xc8, 0xeb, + 0xf3, 0x9d, 0x76, 0x29, 0xbf, 0xbe, 0x73, 0x2d, 0x10, 0xe2, 0x58, 0xcf, 0x8d, 0xc3, 0x27, 0x30, + 0x78, 0xea, 0xa4, 0x71, 0x18, 0x98, 0xe2, 0x58, 0x8f, 0x2e, 0xc3, 0x9c, 0xd9, 0xf0, 0x29, 0x23, + 0xde, 0xae, 0xe9, 0xb8, 0x44, 0x8c, 0x8c, 0x19, 0xfd, 0xa8, 0xac, 0x69, 0x6e, 0xa3, 0x4b, 0x87, + 0x13, 0x96, 0x48, 0x03, 0xe0, 0x84, 0xa7, 0xae, 0xc1, 0xe3, 0xe4, 0x44, 0x9c, 0x05, 0x7e, 0x60, + 0xdb, 0x91, 0x14, 0x77, 0x59, 0xa8, 0xf7, 0xe1, 0xd8, 0x2e, 0xf1, 0x5a, 0x96, 0x49, 0xd6, 0x4d, + 0xd3, 0xf1, 0x6d, 0x16, 0xae, 0xad, 0x65, 0xc8, 0x47, 0x66, 0xf2, 0x4e, 0x1c, 0x91, 0xf1, 0xf3, + 0x11, 0x16, 0x8e, 0x6d, 0xa2, 0x4b, 0x98, 0x19, 0x7e, 0x09, 0x33, 0x30, 0x1d, 0xc3, 0x67, 0xf7, + 0x2d, 0xbb, 0x2a, 0x91, 0x8f, 0x87, 0xd6, 0xd7, 0x2d, 0xbb, 0xfa, 0xac, 0x5d, 0x9a, 0x95, 0x66, + 0xfc, 0x13, 0x0b, 0x43, 0x74, 0x0d, 0xb2, 0x3e, 0x25, 0x9e, 0xbc, 0x5e, 0xa7, 0xd2, 0xc8, 0x7c, + 0x9b, 0x12, 0x2f, 0xdc, 0x7c, 0x66, 0x38, 0x32, 0x17, 0x60, 0x01, 0x81, 0xb6, 0x20, 0x57, 0xe3, + 0x87, 0x22, 0xa7, 0xfe, 0xe9, 0x34, 0xac, 0xee, 0x75, 0x3e, 0xa0, 0x81, 0x90, 0xe0, 0x00, 0x05, + 0x3d, 0x80, 0x05, 0x9a, 0x68, 0xa1, 0x38, 0xae, 0x31, 0x36, 0x99, 0x81, 0x8d, 0xd7, 0x51, 0xa7, + 0x5d, 0x5a, 0x48, 0xaa, 0x70, 0x4f, 0x00, 0xb5, 0x0c, 0xb3, 0x5d, 0x05, 0xa6, 0xcf, 0x3f, 0xfd, + 0xcc, 0xa3, 0xa7, 0xc5, 0x89, 0xc7, 0x4f, 0x8b, 0x13, 0x4f, 0x9e, 0x16, 0x27, 0xbe, 0xe9, 0x14, + 0x95, 0x47, 0x9d, 0xa2, 0xf2, 0xb8, 0x53, 0x54, 0x9e, 0x74, 0x8a, 0xca, 0x1f, 0x9d, 0xa2, 0xf2, + 0xdd, 0x9f, 0xc5, 0x89, 0x3b, 0xd3, 0x32, 0xb3, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x3c, 0xd4, + 0x36, 0xaf, 0xfa, 0x13, 0x00, 0x00, +} + +func (m *FlowDistinguisherMethod) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowDistinguisherMethod) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowDistinguisherMethod) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchema) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchema) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchema) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.DistinguisherMethod != nil { + { + size, err := m.DistinguisherMethod.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MatchingPrecedence)) + i-- + dAtA[i] = 0x10 + { + size, err := m.PriorityLevelConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GroupSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Queuing != nil { + { + size, err := m.Queuing.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LimitedPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitedPriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitedPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LimitResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.AssuredConcurrencyShares)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *NonResourcePolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NonResourcePolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourcePolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PolicyRulesWithSubjects) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PolicyRulesWithSubjects) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRulesWithSubjects) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NonResourceRules) > 0 { + for iNdEx := len(m.NonResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NonResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.ResourceRules) > 0 { + for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Subjects) > 0 { + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Limited != nil { + { + size, err := m.Limited.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueuingConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueuingConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueuingConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.QueueLengthLimit)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.HandSize)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Queues)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ResourcePolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourcePolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourcePolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Namespaces[iNdEx]) + copy(dAtA[i:], m.Namespaces[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i-- + if m.ClusterScope { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ServiceAccountSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccountSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceAccountSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Subject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Subject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ServiceAccount != nil { + { + size, err := m.ServiceAccount.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Group != nil { + { + size, err := m.Group.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.User != nil { + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *UserSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *FlowDistinguisherMethod) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchema) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchemaCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchemaList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *FlowSchemaSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PriorityLevelConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MatchingPrecedence)) + if m.DistinguisherMethod != nil { + l = m.DistinguisherMethod.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *FlowSchemaStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *GroupSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Queuing != nil { + l = m.Queuing.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *LimitedPriorityLevelConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.AssuredConcurrencyShares)) + l = m.LimitResponse.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NonResourcePolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRulesWithSubjects) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceRules) > 0 { + for _, e := range m.ResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceRules) > 0 { + for _, e := range m.NonResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PriorityLevelConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PriorityLevelConfigurationReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Limited != nil { + l = m.Limited.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PriorityLevelConfigurationStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *QueuingConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Queues)) + n += 1 + sovGenerated(uint64(m.HandSize)) + n += 1 + sovGenerated(uint64(m.QueueLengthLimit)) + return n +} + +func (m *ResourcePolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceAccountSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Subject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + if m.User != nil { + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Group != nil { + l = m.Group.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ServiceAccount != nil { + l = m.ServiceAccount.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *UserSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *FlowDistinguisherMethod) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowDistinguisherMethod{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchema) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowSchema{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "FlowSchemaSpec", "FlowSchemaSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "FlowSchemaStatus", "FlowSchemaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowSchemaCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]FlowSchema{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "FlowSchema", "FlowSchema", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&FlowSchemaList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRulesWithSubjects{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRulesWithSubjects", "PolicyRulesWithSubjects", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&FlowSchemaSpec{`, + `PriorityLevelConfiguration:` + strings.Replace(strings.Replace(this.PriorityLevelConfiguration.String(), "PriorityLevelConfigurationReference", "PriorityLevelConfigurationReference", 1), `&`, ``, 1) + `,`, + `MatchingPrecedence:` + fmt.Sprintf("%v", this.MatchingPrecedence) + `,`, + `DistinguisherMethod:` + strings.Replace(this.DistinguisherMethod.String(), "FlowDistinguisherMethod", "FlowDistinguisherMethod", 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]FlowSchemaCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "FlowSchemaCondition", "FlowSchemaCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&FlowSchemaStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *GroupSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GroupSubject{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *LimitResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitResponse{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Queuing:` + strings.Replace(this.Queuing.String(), "QueuingConfiguration", "QueuingConfiguration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitedPriorityLevelConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitedPriorityLevelConfiguration{`, + `AssuredConcurrencyShares:` + fmt.Sprintf("%v", this.AssuredConcurrencyShares) + `,`, + `LimitResponse:` + strings.Replace(strings.Replace(this.LimitResponse.String(), "LimitResponse", "LimitResponse", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NonResourcePolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NonResourcePolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRulesWithSubjects) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" + repeatedStringForResourceRules := "[]ResourcePolicyRule{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "ResourcePolicyRule", "ResourcePolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForNonResourceRules := "[]NonResourcePolicyRule{" + for _, f := range this.NonResourceRules { + repeatedStringForNonResourceRules += strings.Replace(strings.Replace(f.String(), "NonResourcePolicyRule", "NonResourcePolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForNonResourceRules += "}" + s := strings.Join([]string{`&PolicyRulesWithSubjects{`, + `Subjects:` + repeatedStringForSubjects + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `NonResourceRules:` + repeatedStringForNonResourceRules + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PriorityLevelConfigurationSpec", "PriorityLevelConfigurationSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PriorityLevelConfigurationStatus", "PriorityLevelConfigurationStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PriorityLevelConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityLevelConfiguration", "PriorityLevelConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PriorityLevelConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationSpec{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Limited:` + strings.Replace(this.Limited.String(), "LimitedPriorityLevelConfiguration", "LimitedPriorityLevelConfiguration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]PriorityLevelConfigurationCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "PriorityLevelConfigurationCondition", "PriorityLevelConfigurationCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&PriorityLevelConfigurationStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *QueuingConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QueuingConfiguration{`, + `Queues:` + fmt.Sprintf("%v", this.Queues) + `,`, + `HandSize:` + fmt.Sprintf("%v", this.HandSize) + `,`, + `QueueLengthLimit:` + fmt.Sprintf("%v", this.QueueLengthLimit) + `,`, + `}`, + }, "") + return s +} +func (this *ResourcePolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourcePolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ClusterScope:` + fmt.Sprintf("%v", this.ClusterScope) + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccountSubject{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Subject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Subject{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `User:` + strings.Replace(this.User.String(), "UserSubject", "UserSubject", 1) + `,`, + `Group:` + strings.Replace(this.Group.String(), "GroupSubject", "GroupSubject", 1) + `,`, + `ServiceAccount:` + strings.Replace(this.ServiceAccount.String(), "ServiceAccountSubject", "ServiceAccountSubject", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UserSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UserSubject{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *FlowDistinguisherMethod) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowDistinguisherMethod: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowDistinguisherMethod: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = FlowDistinguisherMethodType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchema) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchema: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchema: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = FlowSchemaConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, FlowSchema{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PriorityLevelConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PriorityLevelConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchingPrecedence", wireType) + } + m.MatchingPrecedence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MatchingPrecedence |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DistinguisherMethod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DistinguisherMethod == nil { + m.DistinguisherMethod = &FlowDistinguisherMethod{} + } + if err := m.DistinguisherMethod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRulesWithSubjects{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, FlowSchemaCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = LimitResponseType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Queuing", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Queuing == nil { + m.Queuing = &QueuingConfiguration{} + } + if err := m.Queuing.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitedPriorityLevelConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitedPriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AssuredConcurrencyShares", wireType) + } + m.AssuredConcurrencyShares = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AssuredConcurrencyShares |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LimitResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LimitResponse.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NonResourcePolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NonResourcePolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NonResourcePolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRulesWithSubjects) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRulesWithSubjects: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRulesWithSubjects: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceRules = append(m.ResourceRules, ResourcePolicyRule{}) + if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceRules = append(m.NonResourceRules, NonResourcePolicyRule{}) + if err := m.NonResourceRules[len(m.NonResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = PriorityLevelConfigurationConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PriorityLevelConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = PriorityLevelEnablement(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limited", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Limited == nil { + m.Limited = &LimitedPriorityLevelConfiguration{} + } + if err := m.Limited.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, PriorityLevelConfigurationCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueuingConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueuingConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueuingConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Queues", wireType) + } + m.Queues = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Queues |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HandSize", wireType) + } + m.HandSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HandSize |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QueueLengthLimit", wireType) + } + m.QueueLengthLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.QueueLengthLimit |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourcePolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourcePolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourcePolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterScope", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ClusterScope = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccountSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Subject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Subject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Subject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = SubjectKind(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.User == nil { + m.User = &UserSubject{} + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Group == nil { + m.Group = &GroupSubject{} + } + if err := m.Group.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccount", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ServiceAccount == nil { + m.ServiceAccount = &ServiceAccountSubject{} + } + if err := m.ServiceAccount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto new file mode 100644 index 000000000..9ddfc5465 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto @@ -0,0 +1,434 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.flowcontrol.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// FlowDistinguisherMethod specifies the method of a flow distinguisher. +message FlowDistinguisherMethod { + // `type` is the type of flow distinguisher method + // The supported types are "ByUser" and "ByNamespace". + // Required. + optional string type = 1; +} + +// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with +// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". +message FlowSchema { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // `spec` is the specification of the desired behavior of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional FlowSchemaSpec spec = 2; + + // `status` is the current status of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional FlowSchemaStatus status = 3; +} + +// FlowSchemaCondition describes conditions for a FlowSchema. +message FlowSchemaCondition { + // `type` is the type of the condition. + // Required. + optional string type = 1; + + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + optional string status = 2; + + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + optional string reason = 4; + + // `message` is a human-readable message indicating details about last transition. + optional string message = 5; +} + +// FlowSchemaList is a list of FlowSchema objects. +message FlowSchemaList { + // `metadata` is the standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // `items` is a list of FlowSchemas. + repeated FlowSchema items = 2; +} + +// FlowSchemaSpec describes how the FlowSchema's specification looks like. +message FlowSchemaSpec { + // `priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot + // be resolved, the FlowSchema will be ignored and marked as invalid in its status. + // Required. + optional PriorityLevelConfigurationReference priorityLevelConfiguration = 1; + + // `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen + // FlowSchema is among those with the numerically lowest (which we take to be logically highest) + // MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. + // Note that if the precedence is not specified, it will be set to 1000 as default. + // +optional + optional int32 matchingPrecedence = 2; + + // `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. + // `nil` specifies that the distinguisher is disabled and thus will always be the empty string. + // +optional + optional FlowDistinguisherMethod distinguisherMethod = 3; + + // `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if + // at least one member of rules matches the request. + // if it is an empty slice, there will be no requests matching the FlowSchema. + // +listType=atomic + // +optional + repeated PolicyRulesWithSubjects rules = 4; +} + +// FlowSchemaStatus represents the current state of a FlowSchema. +message FlowSchemaStatus { + // `conditions` is a list of the current states of FlowSchema. + // +listType=map + // +listMapKey=type + // +optional + repeated FlowSchemaCondition conditions = 1; +} + +// GroupSubject holds detailed information for group-kind subject. +message GroupSubject { + // name is the user group that matches, or "*" to match all user groups. + // See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some + // well-known group names. + // Required. + optional string name = 1; +} + +// LimitResponse defines how to handle requests that can not be executed right now. +// +union +message LimitResponse { + // `type` is "Queue" or "Reject". + // "Queue" means that requests that can not be executed upon arrival + // are held in a queue until they can be executed or a queuing limit + // is reached. + // "Reject" means that requests that can not be executed upon arrival + // are rejected. + // Required. + // +unionDiscriminator + optional string type = 1; + + // `queuing` holds the configuration parameters for queuing. + // This field may be non-empty only if `type` is `"Queue"`. + // +optional + optional QueuingConfiguration queuing = 2; +} + +// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. +// It addresses two issues: +// * How are requests for this priority level limited? +// * What should be done with requests that exceed the limit? +message LimitedPriorityLevelConfiguration { + // `assuredConcurrencyShares` (ACS) configures the execution + // limit, which is a limit on the number of requests of this + // priority level that may be exeucting at a given time. ACS must + // be a positive number. The server's concurrency limit (SCL) is + // divided among the concurrency-controlled priority levels in + // proportion to their assured concurrency shares. This produces + // the assured concurrency value (ACV) --- the number of requests + // that may be executing at a time --- for each such priority + // level: + // + // ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) + // + // bigger numbers of ACS mean more reserved concurrent requests (at the + // expense of every other PL). + // This field has a default value of 30. + // +optional + optional int32 assuredConcurrencyShares = 1; + + // `limitResponse` indicates what to do with requests that can not be executed right now + optional LimitResponse limitResponse = 2; +} + +// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the +// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member +// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request. +message NonResourcePolicyRule { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs. If it is present, it must be the only entry. + // +listType=set + // Required. + repeated string verbs = 1; + + // `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. + // For example: + // - "/healthz" is legal + // - "/hea*" is illegal + // - "/hea" is legal but matches nothing + // - "/hea/*" also matches nothing + // - "/healthz/*" matches all per-component health checks. + // "*" matches all non-resource urls. if it is present, it must be the only entry. + // +listType=set + // Required. + repeated string nonResourceURLs = 6; +} + +// PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject +// making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches +// a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member +// of resourceRules or nonResourceRules matches the request. +message PolicyRulesWithSubjects { + // subjects is the list of normal user, serviceaccount, or group that this rule cares about. + // There must be at least one member in this slice. + // A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. + // +listType=atomic + // Required. + repeated Subject subjects = 1; + + // `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the + // target resource. + // At least one of `resourceRules` and `nonResourceRules` has to be non-empty. + // +listType=atomic + // +optional + repeated ResourcePolicyRule resourceRules = 2; + + // `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb + // and the target non-resource URL. + // +listType=atomic + // +optional + repeated NonResourcePolicyRule nonResourceRules = 3; +} + +// PriorityLevelConfiguration represents the configuration of a priority level. +message PriorityLevelConfiguration { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // `spec` is the specification of the desired behavior of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional PriorityLevelConfigurationSpec spec = 2; + + // `status` is the current status of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional PriorityLevelConfigurationStatus status = 3; +} + +// PriorityLevelConfigurationCondition defines the condition of priority level. +message PriorityLevelConfigurationCondition { + // `type` is the type of the condition. + // Required. + optional string type = 1; + + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + optional string status = 2; + + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + optional string reason = 4; + + // `message` is a human-readable message indicating details about last transition. + optional string message = 5; +} + +// PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. +message PriorityLevelConfigurationList { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // `items` is a list of request-priorities. + repeated PriorityLevelConfiguration items = 2; +} + +// PriorityLevelConfigurationReference contains information that points to the "request-priority" being used. +message PriorityLevelConfigurationReference { + // `name` is the name of the priority level configuration being referenced + // Required. + optional string name = 1; +} + +// PriorityLevelConfigurationSpec specifies the configuration of a priority level. +// +union +message PriorityLevelConfigurationSpec { + // `type` indicates whether this priority level is subject to + // limitation on request execution. A value of `"Exempt"` means + // that requests of this priority level are not subject to a limit + // (and thus are never queued) and do not detract from the + // capacity made available to other priority levels. A value of + // `"Limited"` means that (a) requests of this priority level + // _are_ subject to limits and (b) some of the server's limited + // capacity is made available exclusively to this priority level. + // Required. + // +unionDiscriminator + optional string type = 1; + + // `limited` specifies how requests are handled for a Limited priority level. + // This field must be non-empty if and only if `type` is `"Limited"`. + // +optional + optional LimitedPriorityLevelConfiguration limited = 2; +} + +// PriorityLevelConfigurationStatus represents the current state of a "request-priority". +message PriorityLevelConfigurationStatus { + // `conditions` is the current state of "request-priority". + // +listType=map + // +listMapKey=type + // +optional + repeated PriorityLevelConfigurationCondition conditions = 1; +} + +// QueuingConfiguration holds the configuration parameters for queuing +message QueuingConfiguration { + // `queues` is the number of queues for this priority level. The + // queues exist independently at each apiserver. The value must be + // positive. Setting it to 1 effectively precludes + // shufflesharding and thus makes the distinguisher method of + // associated flow schemas irrelevant. This field has a default + // value of 64. + // +optional + optional int32 queues = 1; + + // `handSize` is a small positive number that configures the + // shuffle sharding of requests into queues. When enqueuing a request + // at this priority level the request's flow identifier (a string + // pair) is hashed and the hash value is used to shuffle the list + // of queues and deal a hand of the size specified here. The + // request is put into one of the shortest queues in that hand. + // `handSize` must be no larger than `queues`, and should be + // significantly smaller (so that a few heavy flows do not + // saturate most of the queues). See the user-facing + // documentation for more extensive guidance on setting this + // field. This field has a default value of 8. + // +optional + optional int32 handSize = 2; + + // `queueLengthLimit` is the maximum number of requests allowed to + // be waiting in a given queue of this priority level at a time; + // excess requests are rejected. This value must be positive. If + // not specified, it will be defaulted to 50. + // +optional + optional int32 queueLengthLimit = 3; +} + +// ResourcePolicyRule is a predicate that matches some resource +// requests, testing the request's verb and the target resource. A +// ResourcePolicyRule matches a resource request if and only if: (a) +// at least one member of verbs matches the request, (b) at least one +// member of apiGroups matches the request, (c) at least one member of +// resources matches the request, and (d) least one member of +// namespaces matches the request. +message ResourcePolicyRule { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs and, if present, must be the only entry. + // +listType=set + // Required. + repeated string verbs = 1; + + // `apiGroups` is a list of matching API groups and may not be empty. + // "*" matches all API groups and, if present, must be the only entry. + // +listType=set + // Required. + repeated string apiGroups = 2; + + // `resources` is a list of matching resources (i.e., lowercase + // and plural) with, if desired, subresource. For example, [ + // "services", "nodes/status" ]. This list may not be empty. + // "*" matches all resources and, if present, must be the only entry. + // Required. + // +listType=set + repeated string resources = 3; + + // `clusterScope` indicates whether to match requests that do not + // specify a namespace (which happens either because the resource + // is not namespaced or the request targets all namespaces). + // If this field is omitted or false then the `namespaces` field + // must contain a non-empty list. + // +optional + optional bool clusterScope = 4; + + // `namespaces` is a list of target namespaces that restricts + // matches. A request that specifies a target namespace matches + // only if either (a) this list contains that target namespace or + // (b) this list contains "*". Note that "*" matches any + // specified namespace but does not match a request that _does + // not specify_ a namespace (see the `clusterScope` field for + // that). + // This list may be empty, but only if `clusterScope` is true. + // +optional + // +listType=set + repeated string namespaces = 5; +} + +// ServiceAccountSubject holds detailed information for service-account-kind subject. +message ServiceAccountSubject { + // `namespace` is the namespace of matching ServiceAccount objects. + // Required. + optional string namespace = 1; + + // `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name. + // Required. + optional string name = 2; +} + +// Subject matches the originator of a request, as identified by the request authentication system. There are three +// ways of matching an originator; by user, group, or service account. +// +union +message Subject { + // Required + // +unionDiscriminator + optional string kind = 1; + + // +optional + optional UserSubject user = 2; + + // +optional + optional GroupSubject group = 3; + + // +optional + optional ServiceAccountSubject serviceAccount = 4; +} + +// UserSubject holds detailed information for user-kind subject. +message UserSubject { + // `name` is the username that matches, or "*" to match all usernames. + // Required. + optional string name = 1; +} + diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/register.go b/vendor/k8s.io/api/flowcontrol/v1beta1/register.go new file mode 100644 index 000000000..e78549314 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the name of api group +const GroupName = "flowcontrol.apiserver.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder installs the api group to a scheme + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme adds api to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &FlowSchema{}, + &FlowSchemaList{}, + &PriorityLevelConfiguration{}, + &PriorityLevelConfigurationList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go new file mode 100644 index 000000000..ece834e92 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go @@ -0,0 +1,529 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// These are valid wildcards. +const ( + APIGroupAll = "*" + ResourceAll = "*" + VerbAll = "*" + NonResourceAll = "*" + NameAll = "*" + + NamespaceEvery = "*" // matches every particular namespace +) + +// System preset priority level names +const ( + PriorityLevelConfigurationNameExempt = "exempt" + PriorityLevelConfigurationNameCatchAll = "catch-all" + FlowSchemaNameExempt = "exempt" + FlowSchemaNameCatchAll = "catch-all" +) + +// Conditions +const ( + FlowSchemaConditionDangling = "Dangling" + + PriorityLevelConfigurationConditionConcurrencyShared = "ConcurrencyShared" +) + +// Constants used by api validation. +const ( + FlowSchemaMaxMatchingPrecedence int32 = 10000 +) + +// Constants for apiserver response headers. +const ( + ResponseHeaderMatchedPriorityLevelConfigurationUID = "X-Kubernetes-PF-PriorityLevel-UID" + ResponseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchema-UID" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.20 + +// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with +// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". +type FlowSchema struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `spec` is the specification of the desired behavior of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec FlowSchemaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // `status` is the current status of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status FlowSchemaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.20 + +// FlowSchemaList is a list of FlowSchema objects. +type FlowSchemaList struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // `items` is a list of FlowSchemas. + Items []FlowSchema `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// FlowSchemaSpec describes how the FlowSchema's specification looks like. +type FlowSchemaSpec struct { + // `priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot + // be resolved, the FlowSchema will be ignored and marked as invalid in its status. + // Required. + PriorityLevelConfiguration PriorityLevelConfigurationReference `json:"priorityLevelConfiguration" protobuf:"bytes,1,opt,name=priorityLevelConfiguration"` + // `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen + // FlowSchema is among those with the numerically lowest (which we take to be logically highest) + // MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. + // Note that if the precedence is not specified, it will be set to 1000 as default. + // +optional + MatchingPrecedence int32 `json:"matchingPrecedence" protobuf:"varint,2,opt,name=matchingPrecedence"` + // `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. + // `nil` specifies that the distinguisher is disabled and thus will always be the empty string. + // +optional + DistinguisherMethod *FlowDistinguisherMethod `json:"distinguisherMethod,omitempty" protobuf:"bytes,3,opt,name=distinguisherMethod"` + // `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if + // at least one member of rules matches the request. + // if it is an empty slice, there will be no requests matching the FlowSchema. + // +listType=atomic + // +optional + Rules []PolicyRulesWithSubjects `json:"rules,omitempty" protobuf:"bytes,4,rep,name=rules"` +} + +// FlowDistinguisherMethodType is the type of flow distinguisher method +type FlowDistinguisherMethodType string + +// These are valid flow-distinguisher methods. +const ( + // FlowDistinguisherMethodByUserType specifies that the flow distinguisher is the username in the request. + // This type is used to provide some insulation between users. + FlowDistinguisherMethodByUserType FlowDistinguisherMethodType = "ByUser" + + // FlowDistinguisherMethodByNamespaceType specifies that the flow distinguisher is the namespace of the + // object that the request acts upon. If the object is not namespaced, or if the request is a non-resource + // request, then the distinguisher will be the empty string. An example usage of this type is to provide + // some insulation between tenants in a situation where there are multiple tenants and each namespace + // is dedicated to a tenant. + FlowDistinguisherMethodByNamespaceType FlowDistinguisherMethodType = "ByNamespace" +) + +// FlowDistinguisherMethod specifies the method of a flow distinguisher. +type FlowDistinguisherMethod struct { + // `type` is the type of flow distinguisher method + // The supported types are "ByUser" and "ByNamespace". + // Required. + Type FlowDistinguisherMethodType `json:"type" protobuf:"bytes,1,opt,name=type"` +} + +// PriorityLevelConfigurationReference contains information that points to the "request-priority" being used. +type PriorityLevelConfigurationReference struct { + // `name` is the name of the priority level configuration being referenced + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject +// making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches +// a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member +// of resourceRules or nonResourceRules matches the request. +type PolicyRulesWithSubjects struct { + // subjects is the list of normal user, serviceaccount, or group that this rule cares about. + // There must be at least one member in this slice. + // A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. + // +listType=atomic + // Required. + Subjects []Subject `json:"subjects" protobuf:"bytes,1,rep,name=subjects"` + // `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the + // target resource. + // At least one of `resourceRules` and `nonResourceRules` has to be non-empty. + // +listType=atomic + // +optional + ResourceRules []ResourcePolicyRule `json:"resourceRules,omitempty" protobuf:"bytes,2,opt,name=resourceRules"` + // `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb + // and the target non-resource URL. + // +listType=atomic + // +optional + NonResourceRules []NonResourcePolicyRule `json:"nonResourceRules,omitempty" protobuf:"bytes,3,opt,name=nonResourceRules"` +} + +// Subject matches the originator of a request, as identified by the request authentication system. There are three +// ways of matching an originator; by user, group, or service account. +// +union +type Subject struct { + // Required + // +unionDiscriminator + Kind SubjectKind `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // +optional + User *UserSubject `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + // +optional + Group *GroupSubject `json:"group,omitempty" protobuf:"bytes,3,opt,name=group"` + // +optional + ServiceAccount *ServiceAccountSubject `json:"serviceAccount,omitempty" protobuf:"bytes,4,opt,name=serviceAccount"` +} + +// SubjectKind is the kind of subject. +type SubjectKind string + +// Supported subject's kinds. +const ( + SubjectKindUser SubjectKind = "User" + SubjectKindGroup SubjectKind = "Group" + SubjectKindServiceAccount SubjectKind = "ServiceAccount" +) + +// UserSubject holds detailed information for user-kind subject. +type UserSubject struct { + // `name` is the username that matches, or "*" to match all usernames. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// GroupSubject holds detailed information for group-kind subject. +type GroupSubject struct { + // name is the user group that matches, or "*" to match all user groups. + // See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some + // well-known group names. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// ServiceAccountSubject holds detailed information for service-account-kind subject. +type ServiceAccountSubject struct { + // `namespace` is the namespace of matching ServiceAccount objects. + // Required. + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name. + // Required. + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` +} + +// ResourcePolicyRule is a predicate that matches some resource +// requests, testing the request's verb and the target resource. A +// ResourcePolicyRule matches a resource request if and only if: (a) +// at least one member of verbs matches the request, (b) at least one +// member of apiGroups matches the request, (c) at least one member of +// resources matches the request, and (d) least one member of +// namespaces matches the request. +type ResourcePolicyRule struct { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs and, if present, must be the only entry. + // +listType=set + // Required. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + + // `apiGroups` is a list of matching API groups and may not be empty. + // "*" matches all API groups and, if present, must be the only entry. + // +listType=set + // Required. + APIGroups []string `json:"apiGroups" protobuf:"bytes,2,rep,name=apiGroups"` + + // `resources` is a list of matching resources (i.e., lowercase + // and plural) with, if desired, subresource. For example, [ + // "services", "nodes/status" ]. This list may not be empty. + // "*" matches all resources and, if present, must be the only entry. + // Required. + // +listType=set + Resources []string `json:"resources" protobuf:"bytes,3,rep,name=resources"` + + // `clusterScope` indicates whether to match requests that do not + // specify a namespace (which happens either because the resource + // is not namespaced or the request targets all namespaces). + // If this field is omitted or false then the `namespaces` field + // must contain a non-empty list. + // +optional + ClusterScope bool `json:"clusterScope,omitempty" protobuf:"varint,4,opt,name=clusterScope"` + + // `namespaces` is a list of target namespaces that restricts + // matches. A request that specifies a target namespace matches + // only if either (a) this list contains that target namespace or + // (b) this list contains "*". Note that "*" matches any + // specified namespace but does not match a request that _does + // not specify_ a namespace (see the `clusterScope` field for + // that). + // This list may be empty, but only if `clusterScope` is true. + // +optional + // +listType=set + Namespaces []string `json:"namespaces" protobuf:"bytes,5,rep,name=namespaces"` +} + +// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the +// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member +// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request. +type NonResourcePolicyRule struct { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs. If it is present, it must be the only entry. + // +listType=set + // Required. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + // `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. + // For example: + // - "/healthz" is legal + // - "/hea*" is illegal + // - "/hea" is legal but matches nothing + // - "/hea/*" also matches nothing + // - "/healthz/*" matches all per-component health checks. + // "*" matches all non-resource urls. if it is present, it must be the only entry. + // +listType=set + // Required. + NonResourceURLs []string `json:"nonResourceURLs" protobuf:"bytes,6,rep,name=nonResourceURLs"` +} + +// FlowSchemaStatus represents the current state of a FlowSchema. +type FlowSchemaStatus struct { + // `conditions` is a list of the current states of FlowSchema. + // +listType=map + // +listMapKey=type + // +optional + Conditions []FlowSchemaCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` +} + +// FlowSchemaCondition describes conditions for a FlowSchema. +type FlowSchemaCondition struct { + // `type` is the type of the condition. + // Required. + Type FlowSchemaConditionType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + Status ConditionStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // `message` is a human-readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// FlowSchemaConditionType is a valid value for FlowSchemaStatusCondition.Type +type FlowSchemaConditionType string + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.20 + +// PriorityLevelConfiguration represents the configuration of a priority level. +type PriorityLevelConfiguration struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `spec` is the specification of the desired behavior of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec PriorityLevelConfigurationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // `status` is the current status of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status PriorityLevelConfigurationStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.20 + +// PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. +type PriorityLevelConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `items` is a list of request-priorities. + Items []PriorityLevelConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// PriorityLevelConfigurationSpec specifies the configuration of a priority level. +// +union +type PriorityLevelConfigurationSpec struct { + // `type` indicates whether this priority level is subject to + // limitation on request execution. A value of `"Exempt"` means + // that requests of this priority level are not subject to a limit + // (and thus are never queued) and do not detract from the + // capacity made available to other priority levels. A value of + // `"Limited"` means that (a) requests of this priority level + // _are_ subject to limits and (b) some of the server's limited + // capacity is made available exclusively to this priority level. + // Required. + // +unionDiscriminator + Type PriorityLevelEnablement `json:"type" protobuf:"bytes,1,opt,name=type"` + + // `limited` specifies how requests are handled for a Limited priority level. + // This field must be non-empty if and only if `type` is `"Limited"`. + // +optional + Limited *LimitedPriorityLevelConfiguration `json:"limited,omitempty" protobuf:"bytes,2,opt,name=limited"` +} + +// PriorityLevelEnablement indicates whether limits on execution are enabled for the priority level +type PriorityLevelEnablement string + +// Supported priority level enablement values. +const ( + // PriorityLevelEnablementExempt means that requests are not subject to limits + PriorityLevelEnablementExempt PriorityLevelEnablement = "Exempt" + + // PriorityLevelEnablementLimited means that requests are subject to limits + PriorityLevelEnablementLimited PriorityLevelEnablement = "Limited" +) + +// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. +// It addresses two issues: +// * How are requests for this priority level limited? +// * What should be done with requests that exceed the limit? +type LimitedPriorityLevelConfiguration struct { + // `assuredConcurrencyShares` (ACS) configures the execution + // limit, which is a limit on the number of requests of this + // priority level that may be exeucting at a given time. ACS must + // be a positive number. The server's concurrency limit (SCL) is + // divided among the concurrency-controlled priority levels in + // proportion to their assured concurrency shares. This produces + // the assured concurrency value (ACV) --- the number of requests + // that may be executing at a time --- for each such priority + // level: + // + // ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) + // + // bigger numbers of ACS mean more reserved concurrent requests (at the + // expense of every other PL). + // This field has a default value of 30. + // +optional + AssuredConcurrencyShares int32 `json:"assuredConcurrencyShares" protobuf:"varint,1,opt,name=assuredConcurrencyShares"` + + // `limitResponse` indicates what to do with requests that can not be executed right now + LimitResponse LimitResponse `json:"limitResponse,omitempty" protobuf:"bytes,2,opt,name=limitResponse"` +} + +// LimitResponse defines how to handle requests that can not be executed right now. +// +union +type LimitResponse struct { + // `type` is "Queue" or "Reject". + // "Queue" means that requests that can not be executed upon arrival + // are held in a queue until they can be executed or a queuing limit + // is reached. + // "Reject" means that requests that can not be executed upon arrival + // are rejected. + // Required. + // +unionDiscriminator + Type LimitResponseType `json:"type" protobuf:"bytes,1,opt,name=type"` + + // `queuing` holds the configuration parameters for queuing. + // This field may be non-empty only if `type` is `"Queue"`. + // +optional + Queuing *QueuingConfiguration `json:"queuing,omitempty" protobuf:"bytes,2,opt,name=queuing"` +} + +// LimitResponseType identifies how a Limited priority level handles a request that can not be executed right now +type LimitResponseType string + +// Supported limit responses. +const ( + // LimitResponseTypeQueue means that requests that can not be executed right now are queued until they can be executed or a queuing limit is hit + LimitResponseTypeQueue LimitResponseType = "Queue" + + // LimitResponseTypeReject means that requests that can not be executed right now are rejected + LimitResponseTypeReject LimitResponseType = "Reject" +) + +// QueuingConfiguration holds the configuration parameters for queuing +type QueuingConfiguration struct { + // `queues` is the number of queues for this priority level. The + // queues exist independently at each apiserver. The value must be + // positive. Setting it to 1 effectively precludes + // shufflesharding and thus makes the distinguisher method of + // associated flow schemas irrelevant. This field has a default + // value of 64. + // +optional + Queues int32 `json:"queues" protobuf:"varint,1,opt,name=queues"` + + // `handSize` is a small positive number that configures the + // shuffle sharding of requests into queues. When enqueuing a request + // at this priority level the request's flow identifier (a string + // pair) is hashed and the hash value is used to shuffle the list + // of queues and deal a hand of the size specified here. The + // request is put into one of the shortest queues in that hand. + // `handSize` must be no larger than `queues`, and should be + // significantly smaller (so that a few heavy flows do not + // saturate most of the queues). See the user-facing + // documentation for more extensive guidance on setting this + // field. This field has a default value of 8. + // +optional + HandSize int32 `json:"handSize" protobuf:"varint,2,opt,name=handSize"` + + // `queueLengthLimit` is the maximum number of requests allowed to + // be waiting in a given queue of this priority level at a time; + // excess requests are rejected. This value must be positive. If + // not specified, it will be defaulted to 50. + // +optional + QueueLengthLimit int32 `json:"queueLengthLimit" protobuf:"varint,3,opt,name=queueLengthLimit"` +} + +// PriorityLevelConfigurationConditionType is a valid value for PriorityLevelConfigurationStatusCondition.Type +type PriorityLevelConfigurationConditionType string + +// PriorityLevelConfigurationStatus represents the current state of a "request-priority". +type PriorityLevelConfigurationStatus struct { + // `conditions` is the current state of "request-priority". + // +listType=map + // +listMapKey=type + // +optional + Conditions []PriorityLevelConfigurationCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` +} + +// PriorityLevelConfigurationCondition defines the condition of priority level. +type PriorityLevelConfigurationCondition struct { + // `type` is the type of the condition. + // Required. + Type PriorityLevelConfigurationConditionType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + Status ConditionStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // `message` is a human-readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// ConditionStatus is the status of the condition. +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000..8343a883f --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,258 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_FlowDistinguisherMethod = map[string]string{ + "": "FlowDistinguisherMethod specifies the method of a flow distinguisher.", + "type": "`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required.", +} + +func (FlowDistinguisherMethod) SwaggerDoc() map[string]string { + return map_FlowDistinguisherMethod +} + +var map_FlowSchema = map[string]string{ + "": "FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with similar attributes and is identified by a pair of strings: the name of the FlowSchema and a \"flow distinguisher\".", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "`spec` is the specification of the desired behavior of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "`status` is the current status of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (FlowSchema) SwaggerDoc() map[string]string { + return map_FlowSchema +} + +var map_FlowSchemaCondition = map[string]string{ + "": "FlowSchemaCondition describes conditions for a FlowSchema.", + "type": "`type` is the type of the condition. Required.", + "status": "`status` is the status of the condition. Can be True, False, Unknown. Required.", + "lastTransitionTime": "`lastTransitionTime` is the last time the condition transitioned from one status to another.", + "reason": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.", + "message": "`message` is a human-readable message indicating details about last transition.", +} + +func (FlowSchemaCondition) SwaggerDoc() map[string]string { + return map_FlowSchemaCondition +} + +var map_FlowSchemaList = map[string]string{ + "": "FlowSchemaList is a list of FlowSchema objects.", + "metadata": "`metadata` is the standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "`items` is a list of FlowSchemas.", +} + +func (FlowSchemaList) SwaggerDoc() map[string]string { + return map_FlowSchemaList +} + +var map_FlowSchemaSpec = map[string]string{ + "": "FlowSchemaSpec describes how the FlowSchema's specification looks like.", + "priorityLevelConfiguration": "`priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot be resolved, the FlowSchema will be ignored and marked as invalid in its status. Required.", + "matchingPrecedence": "`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default.", + "distinguisherMethod": "`distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. `nil` specifies that the distinguisher is disabled and thus will always be the empty string.", + "rules": "`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema.", +} + +func (FlowSchemaSpec) SwaggerDoc() map[string]string { + return map_FlowSchemaSpec +} + +var map_FlowSchemaStatus = map[string]string{ + "": "FlowSchemaStatus represents the current state of a FlowSchema.", + "conditions": "`conditions` is a list of the current states of FlowSchema.", +} + +func (FlowSchemaStatus) SwaggerDoc() map[string]string { + return map_FlowSchemaStatus +} + +var map_GroupSubject = map[string]string{ + "": "GroupSubject holds detailed information for group-kind subject.", + "name": "name is the user group that matches, or \"*\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required.", +} + +func (GroupSubject) SwaggerDoc() map[string]string { + return map_GroupSubject +} + +var map_LimitResponse = map[string]string{ + "": "LimitResponse defines how to handle requests that can not be executed right now.", + "type": "`type` is \"Queue\" or \"Reject\". \"Queue\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \"Reject\" means that requests that can not be executed upon arrival are rejected. Required.", + "queuing": "`queuing` holds the configuration parameters for queuing. This field may be non-empty only if `type` is `\"Queue\"`.", +} + +func (LimitResponse) SwaggerDoc() map[string]string { + return map_LimitResponse +} + +var map_LimitedPriorityLevelConfiguration = map[string]string{ + "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", + "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", + "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", +} + +func (LimitedPriorityLevelConfiguration) SwaggerDoc() map[string]string { + return map_LimitedPriorityLevelConfiguration +} + +var map_NonResourcePolicyRule = map[string]string{ + "": "NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.", + "verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs. If it is present, it must be the only entry. Required.", + "nonResourceURLs": "`nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example:\n - \"/healthz\" is legal\n - \"/hea*\" is illegal\n - \"/hea\" is legal but matches nothing\n - \"/hea/*\" also matches nothing\n - \"/healthz/*\" matches all per-component health checks.\n\"*\" matches all non-resource urls. if it is present, it must be the only entry. Required.", +} + +func (NonResourcePolicyRule) SwaggerDoc() map[string]string { + return map_NonResourcePolicyRule +} + +var map_PolicyRulesWithSubjects = map[string]string{ + "": "PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member of resourceRules or nonResourceRules matches the request.", + "subjects": "subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required.", + "resourceRules": "`resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty.", + "nonResourceRules": "`nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL.", +} + +func (PolicyRulesWithSubjects) SwaggerDoc() map[string]string { + return map_PolicyRulesWithSubjects +} + +var map_PriorityLevelConfiguration = map[string]string{ + "": "PriorityLevelConfiguration represents the configuration of a priority level.", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "`spec` is the specification of the desired behavior of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "`status` is the current status of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (PriorityLevelConfiguration) SwaggerDoc() map[string]string { + return map_PriorityLevelConfiguration +} + +var map_PriorityLevelConfigurationCondition = map[string]string{ + "": "PriorityLevelConfigurationCondition defines the condition of priority level.", + "type": "`type` is the type of the condition. Required.", + "status": "`status` is the status of the condition. Can be True, False, Unknown. Required.", + "lastTransitionTime": "`lastTransitionTime` is the last time the condition transitioned from one status to another.", + "reason": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.", + "message": "`message` is a human-readable message indicating details about last transition.", +} + +func (PriorityLevelConfigurationCondition) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationCondition +} + +var map_PriorityLevelConfigurationList = map[string]string{ + "": "PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects.", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "`items` is a list of request-priorities.", +} + +func (PriorityLevelConfigurationList) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationList +} + +var map_PriorityLevelConfigurationReference = map[string]string{ + "": "PriorityLevelConfigurationReference contains information that points to the \"request-priority\" being used.", + "name": "`name` is the name of the priority level configuration being referenced Required.", +} + +func (PriorityLevelConfigurationReference) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationReference +} + +var map_PriorityLevelConfigurationSpec = map[string]string{ + "": "PriorityLevelConfigurationSpec specifies the configuration of a priority level.", + "type": "`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.", + "limited": "`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `\"Limited\"`.", +} + +func (PriorityLevelConfigurationSpec) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationSpec +} + +var map_PriorityLevelConfigurationStatus = map[string]string{ + "": "PriorityLevelConfigurationStatus represents the current state of a \"request-priority\".", + "conditions": "`conditions` is the current state of \"request-priority\".", +} + +func (PriorityLevelConfigurationStatus) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationStatus +} + +var map_QueuingConfiguration = map[string]string{ + "": "QueuingConfiguration holds the configuration parameters for queuing", + "queues": "`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64.", + "handSize": "`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.", + "queueLengthLimit": "`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50.", +} + +func (QueuingConfiguration) SwaggerDoc() map[string]string { + return map_QueuingConfiguration +} + +var map_ResourcePolicyRule = map[string]string{ + "": "ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) least one member of namespaces matches the request.", + "verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs and, if present, must be the only entry. Required.", + "apiGroups": "`apiGroups` is a list of matching API groups and may not be empty. \"*\" matches all API groups and, if present, must be the only entry. Required.", + "resources": "`resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \"services\", \"nodes/status\" ]. This list may not be empty. \"*\" matches all resources and, if present, must be the only entry. Required.", + "clusterScope": "`clusterScope` indicates whether to match requests that do not specify a namespace (which happens either because the resource is not namespaced or the request targets all namespaces). If this field is omitted or false then the `namespaces` field must contain a non-empty list.", + "namespaces": "`namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains \"*\". Note that \"*\" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true.", +} + +func (ResourcePolicyRule) SwaggerDoc() map[string]string { + return map_ResourcePolicyRule +} + +var map_ServiceAccountSubject = map[string]string{ + "": "ServiceAccountSubject holds detailed information for service-account-kind subject.", + "namespace": "`namespace` is the namespace of matching ServiceAccount objects. Required.", + "name": "`name` is the name of matching ServiceAccount objects, or \"*\" to match regardless of name. Required.", +} + +func (ServiceAccountSubject) SwaggerDoc() map[string]string { + return map_ServiceAccountSubject +} + +var map_Subject = map[string]string{ + "": "Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account.", + "kind": "Required", +} + +func (Subject) SwaggerDoc() map[string]string { + return map_Subject +} + +var map_UserSubject = map[string]string{ + "": "UserSubject holds detailed information for user-kind subject.", + "name": "`name` is the username that matches, or \"*\" to match all usernames. Required.", +} + +func (UserSubject) SwaggerDoc() map[string]string { + return map_UserSubject +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..c8f6e2306 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,541 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowDistinguisherMethod) DeepCopyInto(out *FlowDistinguisherMethod) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowDistinguisherMethod. +func (in *FlowDistinguisherMethod) DeepCopy() *FlowDistinguisherMethod { + if in == nil { + return nil + } + out := new(FlowDistinguisherMethod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchema) DeepCopyInto(out *FlowSchema) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchema. +func (in *FlowSchema) DeepCopy() *FlowSchema { + if in == nil { + return nil + } + out := new(FlowSchema) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlowSchema) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaCondition) DeepCopyInto(out *FlowSchemaCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaCondition. +func (in *FlowSchemaCondition) DeepCopy() *FlowSchemaCondition { + if in == nil { + return nil + } + out := new(FlowSchemaCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaList) DeepCopyInto(out *FlowSchemaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FlowSchema, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaList. +func (in *FlowSchemaList) DeepCopy() *FlowSchemaList { + if in == nil { + return nil + } + out := new(FlowSchemaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlowSchemaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaSpec) DeepCopyInto(out *FlowSchemaSpec) { + *out = *in + out.PriorityLevelConfiguration = in.PriorityLevelConfiguration + if in.DistinguisherMethod != nil { + in, out := &in.DistinguisherMethod, &out.DistinguisherMethod + *out = new(FlowDistinguisherMethod) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRulesWithSubjects, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaSpec. +func (in *FlowSchemaSpec) DeepCopy() *FlowSchemaSpec { + if in == nil { + return nil + } + out := new(FlowSchemaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaStatus) DeepCopyInto(out *FlowSchemaStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]FlowSchemaCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaStatus. +func (in *FlowSchemaStatus) DeepCopy() *FlowSchemaStatus { + if in == nil { + return nil + } + out := new(FlowSchemaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupSubject) DeepCopyInto(out *GroupSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupSubject. +func (in *GroupSubject) DeepCopy() *GroupSubject { + if in == nil { + return nil + } + out := new(GroupSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitResponse) DeepCopyInto(out *LimitResponse) { + *out = *in + if in.Queuing != nil { + in, out := &in.Queuing, &out.Queuing + *out = new(QueuingConfiguration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitResponse. +func (in *LimitResponse) DeepCopy() *LimitResponse { + if in == nil { + return nil + } + out := new(LimitResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitedPriorityLevelConfiguration) DeepCopyInto(out *LimitedPriorityLevelConfiguration) { + *out = *in + in.LimitResponse.DeepCopyInto(&out.LimitResponse) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitedPriorityLevelConfiguration. +func (in *LimitedPriorityLevelConfiguration) DeepCopy() *LimitedPriorityLevelConfiguration { + if in == nil { + return nil + } + out := new(LimitedPriorityLevelConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonResourcePolicyRule) DeepCopyInto(out *NonResourcePolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourcePolicyRule. +func (in *NonResourcePolicyRule) DeepCopy() *NonResourcePolicyRule { + if in == nil { + return nil + } + out := new(NonResourcePolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRulesWithSubjects) DeepCopyInto(out *PolicyRulesWithSubjects) { + *out = *in + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceRules != nil { + in, out := &in.ResourceRules, &out.ResourceRules + *out = make([]ResourcePolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NonResourceRules != nil { + in, out := &in.NonResourceRules, &out.NonResourceRules + *out = make([]NonResourcePolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRulesWithSubjects. +func (in *PolicyRulesWithSubjects) DeepCopy() *PolicyRulesWithSubjects { + if in == nil { + return nil + } + out := new(PolicyRulesWithSubjects) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfiguration) DeepCopyInto(out *PriorityLevelConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfiguration. +func (in *PriorityLevelConfiguration) DeepCopy() *PriorityLevelConfiguration { + if in == nil { + return nil + } + out := new(PriorityLevelConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityLevelConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationCondition) DeepCopyInto(out *PriorityLevelConfigurationCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationCondition. +func (in *PriorityLevelConfigurationCondition) DeepCopy() *PriorityLevelConfigurationCondition { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationList) DeepCopyInto(out *PriorityLevelConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PriorityLevelConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationList. +func (in *PriorityLevelConfigurationList) DeepCopy() *PriorityLevelConfigurationList { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityLevelConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationReference) DeepCopyInto(out *PriorityLevelConfigurationReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationReference. +func (in *PriorityLevelConfigurationReference) DeepCopy() *PriorityLevelConfigurationReference { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationSpec) DeepCopyInto(out *PriorityLevelConfigurationSpec) { + *out = *in + if in.Limited != nil { + in, out := &in.Limited, &out.Limited + *out = new(LimitedPriorityLevelConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationSpec. +func (in *PriorityLevelConfigurationSpec) DeepCopy() *PriorityLevelConfigurationSpec { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationStatus) DeepCopyInto(out *PriorityLevelConfigurationStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PriorityLevelConfigurationCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationStatus. +func (in *PriorityLevelConfigurationStatus) DeepCopy() *PriorityLevelConfigurationStatus { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueuingConfiguration) DeepCopyInto(out *QueuingConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueuingConfiguration. +func (in *QueuingConfiguration) DeepCopy() *QueuingConfiguration { + if in == nil { + return nil + } + out := new(QueuingConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyRule) DeepCopyInto(out *ResourcePolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyRule. +func (in *ResourcePolicyRule) DeepCopy() *ResourcePolicyRule { + if in == nil { + return nil + } + out := new(ResourcePolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountSubject) DeepCopyInto(out *ServiceAccountSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountSubject. +func (in *ServiceAccountSubject) DeepCopy() *ServiceAccountSubject { + if in == nil { + return nil + } + out := new(ServiceAccountSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subject) DeepCopyInto(out *Subject) { + *out = *in + if in.User != nil { + in, out := &in.User, &out.User + *out = new(UserSubject) + **out = **in + } + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(GroupSubject) + **out = **in + } + if in.ServiceAccount != nil { + in, out := &in.ServiceAccount, &out.ServiceAccount + *out = new(ServiceAccountSubject) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject. +func (in *Subject) DeepCopy() *Subject { + if in == nil { + return nil + } + out := new(Subject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSubject) DeepCopyInto(out *UserSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSubject. +func (in *UserSubject) DeepCopy() *UserSubject { + if in == nil { + return nil + } + out := new(UserSubject) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..d0f229718 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,93 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1beta1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) { + return 1, 20 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) { + return 1, 23 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *FlowSchema) APILifecycleRemoved() (major, minor int) { + return 1, 26 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchemaList) APILifecycleIntroduced() (major, minor int) { + return 1, 20 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) { + return 1, 23 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *FlowSchemaList) APILifecycleRemoved() (major, minor int) { + return 1, 26 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfiguration) APILifecycleIntroduced() (major, minor int) { + return 1, 20 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int) { + return 1, 23 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *PriorityLevelConfiguration) APILifecycleRemoved() (major, minor int) { + return 1, 26 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfigurationList) APILifecycleIntroduced() (major, minor int) { + return 1, 20 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor int) { + return 1, 23 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *PriorityLevelConfigurationList) APILifecycleRemoved() (major, minor int) { + return 1, 26 +} diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto index a98ef94c8..74737098b 100644 --- a/vendor/k8s.io/api/networking/v1/generated.proto +++ b/vendor/k8s.io/api/networking/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.networking.v1; diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.proto b/vendor/k8s.io/api/networking/v1beta1/generated.proto index a97c318db..251bbafec 100644 --- a/vendor/k8s.io/api/networking/v1beta1/generated.proto +++ b/vendor/k8s.io/api/networking/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.networking.v1beta1; diff --git a/vendor/k8s.io/api/settings/v1alpha1/doc.go b/vendor/k8s.io/api/node/v1/doc.go similarity index 82% rename from vendor/k8s.io/api/settings/v1alpha1/doc.go rename to vendor/k8s.io/api/node/v1/doc.go index 60066bb6d..12cbcb8a0 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/doc.go +++ b/vendor/k8s.io/api/node/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,6 +18,6 @@ limitations under the License. // +k8s:protobuf-gen=package // +k8s:openapi-gen=true -// +groupName=settings.k8s.io +// +groupName=node.k8s.io -package v1alpha1 // import "k8s.io/api/settings/v1alpha1" +package v1 // import "k8s.io/api/node/v1" diff --git a/vendor/k8s.io/api/node/v1/generated.pb.go b/vendor/k8s.io/api/node/v1/generated.pb.go new file mode 100644 index 000000000..775ade381 --- /dev/null +++ b/vendor/k8s.io/api/node/v1/generated.pb.go @@ -0,0 +1,1411 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" + resource "k8s.io/apimachinery/pkg/api/resource" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Overhead) Reset() { *m = Overhead{} } +func (*Overhead) ProtoMessage() {} +func (*Overhead) Descriptor() ([]byte, []int) { + return fileDescriptor_6ac9be560e26ae98, []int{0} +} +func (m *Overhead) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Overhead) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Overhead) XXX_Merge(src proto.Message) { + xxx_messageInfo_Overhead.Merge(m, src) +} +func (m *Overhead) XXX_Size() int { + return m.Size() +} +func (m *Overhead) XXX_DiscardUnknown() { + xxx_messageInfo_Overhead.DiscardUnknown(m) +} + +var xxx_messageInfo_Overhead proto.InternalMessageInfo + +func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } +func (*RuntimeClass) ProtoMessage() {} +func (*RuntimeClass) Descriptor() ([]byte, []int) { + return fileDescriptor_6ac9be560e26ae98, []int{1} +} +func (m *RuntimeClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClass.Merge(m, src) +} +func (m *RuntimeClass) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClass) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClass.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClass proto.InternalMessageInfo + +func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } +func (*RuntimeClassList) ProtoMessage() {} +func (*RuntimeClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_6ac9be560e26ae98, []int{2} +} +func (m *RuntimeClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassList.Merge(m, src) +} +func (m *RuntimeClassList) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassList) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClassList proto.InternalMessageInfo + +func (m *Scheduling) Reset() { *m = Scheduling{} } +func (*Scheduling) ProtoMessage() {} +func (*Scheduling) Descriptor() ([]byte, []int) { + return fileDescriptor_6ac9be560e26ae98, []int{3} +} +func (m *Scheduling) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scheduling) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scheduling.Merge(m, src) +} +func (m *Scheduling) XXX_Size() int { + return m.Size() +} +func (m *Scheduling) XXX_DiscardUnknown() { + xxx_messageInfo_Scheduling.DiscardUnknown(m) +} + +var xxx_messageInfo_Scheduling proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Overhead)(nil), "k8s.io.api.node.v1.Overhead") + proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.api.node.v1.Overhead.PodFixedEntry") + proto.RegisterType((*RuntimeClass)(nil), "k8s.io.api.node.v1.RuntimeClass") + proto.RegisterType((*RuntimeClassList)(nil), "k8s.io.api.node.v1.RuntimeClassList") + proto.RegisterType((*Scheduling)(nil), "k8s.io.api.node.v1.Scheduling") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.node.v1.Scheduling.NodeSelectorEntry") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1/generated.proto", fileDescriptor_6ac9be560e26ae98) +} + +var fileDescriptor_6ac9be560e26ae98 = []byte{ + // 656 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x31, 0x6f, 0xd3, 0x40, + 0x14, 0xce, 0xa5, 0x54, 0x4d, 0x2f, 0x29, 0x94, 0xa3, 0x43, 0x14, 0x21, 0x27, 0xca, 0x14, 0x90, + 0x7a, 0x6e, 0x2b, 0x84, 0x2a, 0x18, 0x90, 0x0c, 0xad, 0x40, 0x82, 0x02, 0x2e, 0x2c, 0x88, 0x81, + 0x8b, 0xfd, 0x70, 0xdc, 0xc4, 0xbe, 0xe8, 0x7c, 0x8e, 0xc8, 0x86, 0x58, 0x90, 0x98, 0xfa, 0x5f, + 0x18, 0xf8, 0x0b, 0x15, 0x53, 0xc7, 0x4e, 0x2d, 0x0d, 0xff, 0x82, 0x09, 0xdd, 0xd9, 0x4e, 0x5c, + 0x12, 0x42, 0xd9, 0xee, 0xde, 0x7d, 0xdf, 0xf7, 0xde, 0xfb, 0xde, 0x3d, 0x7c, 0xbf, 0xbb, 0x1d, + 0x51, 0x9f, 0x9b, 0xdd, 0xb8, 0x0d, 0x22, 0x04, 0x09, 0x91, 0x39, 0x80, 0xd0, 0xe5, 0xc2, 0x4c, + 0x1f, 0x58, 0xdf, 0x37, 0x43, 0xee, 0x82, 0x39, 0xd8, 0x34, 0x3d, 0x08, 0x41, 0x30, 0x09, 0x2e, + 0xed, 0x0b, 0x2e, 0x39, 0x21, 0x09, 0x86, 0xb2, 0xbe, 0x4f, 0x15, 0x86, 0x0e, 0x36, 0x6b, 0xeb, + 0x9e, 0x2f, 0x3b, 0x71, 0x9b, 0x3a, 0x3c, 0x30, 0x3d, 0xee, 0x71, 0x53, 0x43, 0xdb, 0xf1, 0x7b, + 0x7d, 0xd3, 0x17, 0x7d, 0x4a, 0x24, 0x6a, 0xcd, 0x5c, 0x1a, 0x87, 0x8b, 0x59, 0x69, 0x6a, 0x77, + 0x26, 0x98, 0x80, 0x39, 0x1d, 0x3f, 0x04, 0x31, 0x34, 0xfb, 0x5d, 0x4f, 0x93, 0x04, 0x44, 0x3c, + 0x16, 0x0e, 0xfc, 0x17, 0x2b, 0x32, 0x03, 0x90, 0x6c, 0x56, 0x2e, 0xf3, 0x6f, 0x2c, 0x11, 0x87, + 0xd2, 0x0f, 0xa6, 0xd3, 0xdc, 0xfd, 0x17, 0x21, 0x72, 0x3a, 0x10, 0xb0, 0x3f, 0x79, 0xcd, 0xef, + 0x45, 0x5c, 0x7a, 0x3e, 0x00, 0xd1, 0x01, 0xe6, 0x92, 0x63, 0x84, 0x4b, 0x7d, 0xee, 0xee, 0xfa, + 0x1f, 0xc0, 0xad, 0xa2, 0xc6, 0x42, 0xab, 0xbc, 0x75, 0x9b, 0x4e, 0x9b, 0x4b, 0x33, 0x02, 0x7d, + 0x91, 0x82, 0x77, 0x42, 0x29, 0x86, 0xd6, 0x67, 0x74, 0x74, 0x5a, 0x2f, 0x8c, 0x4e, 0xeb, 0xa5, + 0x2c, 0xfe, 0xeb, 0xb4, 0x5e, 0x9f, 0x76, 0x96, 0xda, 0xa9, 0x59, 0x4f, 0xfd, 0x48, 0x7e, 0x3a, + 0x9b, 0x0b, 0xd9, 0x63, 0x01, 0x7c, 0x39, 0xab, 0xaf, 0x5f, 0xc6, 0x7b, 0xfa, 0x32, 0x66, 0xa1, + 0xf4, 0xe5, 0xd0, 0x1e, 0x77, 0x51, 0xeb, 0xe2, 0x95, 0x0b, 0x45, 0x92, 0x55, 0xbc, 0xd0, 0x85, + 0x61, 0x15, 0x35, 0x50, 0x6b, 0xd9, 0x56, 0x47, 0xf2, 0x08, 0x2f, 0x0e, 0x58, 0x2f, 0x86, 0x6a, + 0xb1, 0x81, 0x5a, 0xe5, 0x2d, 0x9a, 0xeb, 0x78, 0x9c, 0x8b, 0xf6, 0xbb, 0x9e, 0xb6, 0x60, 0x3a, + 0x57, 0x42, 0xbe, 0x57, 0xdc, 0x46, 0xcd, 0xaf, 0x45, 0x5c, 0xb1, 0x13, 0xbf, 0x1f, 0xf6, 0x58, + 0x14, 0x91, 0x77, 0xb8, 0xa4, 0x26, 0xec, 0x32, 0xc9, 0x74, 0xc6, 0xf2, 0xd6, 0xc6, 0x3c, 0xf5, + 0x88, 0x2a, 0xb4, 0x76, 0xb8, 0x7d, 0x00, 0x8e, 0x7c, 0x06, 0x92, 0x59, 0x24, 0x35, 0x15, 0x4f, + 0x62, 0xf6, 0x58, 0x95, 0xdc, 0xc2, 0x4b, 0x1d, 0x16, 0xba, 0x3d, 0x10, 0xba, 0xfc, 0x65, 0xeb, + 0x5a, 0x0a, 0x5f, 0x7a, 0x9c, 0x84, 0xed, 0xec, 0x9d, 0xec, 0xe2, 0x12, 0x4f, 0x07, 0x57, 0x5d, + 0xd0, 0xc5, 0xdc, 0x9c, 0x37, 0x5c, 0xab, 0xa2, 0x26, 0x99, 0xdd, 0xec, 0x31, 0x97, 0xec, 0x61, + 0xac, 0x3e, 0x93, 0x1b, 0xf7, 0xfc, 0xd0, 0xab, 0x5e, 0xd1, 0x4a, 0xc6, 0x2c, 0xa5, 0xfd, 0x31, + 0xca, 0xba, 0xaa, 0x1a, 0x98, 0xdc, 0xed, 0x9c, 0x42, 0xf3, 0x1b, 0xc2, 0xab, 0x79, 0xd7, 0xd4, + 0xaf, 0x20, 0x6f, 0xa7, 0x9c, 0xa3, 0x97, 0x73, 0x4e, 0xb1, 0xb5, 0x6f, 0xab, 0xd9, 0x67, 0xcc, + 0x22, 0x39, 0xd7, 0x76, 0xf0, 0xa2, 0x2f, 0x21, 0x88, 0xaa, 0x45, 0xfd, 0xc9, 0x1b, 0xb3, 0xaa, + 0xcf, 0x97, 0x64, 0xad, 0xa4, 0x62, 0x8b, 0x4f, 0x14, 0xcd, 0x4e, 0xd8, 0xcd, 0xc3, 0x22, 0xce, + 0x35, 0x45, 0x0e, 0x70, 0x45, 0x91, 0xf7, 0xa1, 0x07, 0x8e, 0xe4, 0x22, 0xdd, 0xa0, 0x8d, 0xf9, + 0xd6, 0xd0, 0xbd, 0x1c, 0x25, 0xd9, 0xa3, 0xb5, 0x34, 0x59, 0x25, 0xff, 0x64, 0x5f, 0xd0, 0x26, + 0xaf, 0x71, 0x59, 0xf2, 0x9e, 0x5a, 0x65, 0x9f, 0x87, 0x59, 0x1f, 0x17, 0xa6, 0xa0, 0x36, 0x49, + 0xa5, 0x7a, 0x35, 0x86, 0x59, 0x37, 0x52, 0xe1, 0xf2, 0x24, 0x16, 0xd9, 0x79, 0x9d, 0xda, 0x03, + 0x7c, 0x7d, 0xaa, 0x9e, 0x19, 0x2b, 0xb3, 0x96, 0x5f, 0x99, 0xe5, 0xdc, 0x0a, 0x58, 0xad, 0xa3, + 0x73, 0xa3, 0x70, 0x7c, 0x6e, 0x14, 0x4e, 0xce, 0x8d, 0xc2, 0xc7, 0x91, 0x81, 0x8e, 0x46, 0x06, + 0x3a, 0x1e, 0x19, 0xe8, 0x64, 0x64, 0xa0, 0x1f, 0x23, 0x03, 0x1d, 0xfe, 0x34, 0x0a, 0x6f, 0x8a, + 0x83, 0xcd, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf3, 0x40, 0xe0, 0x08, 0xf3, 0x05, 0x00, 0x00, +} + +func (m *Overhead) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Overhead) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Overhead) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PodFixed) > 0 { + keysForPodFixed := make([]string, 0, len(m.PodFixed)) + for k := range m.PodFixed { + keysForPodFixed = append(keysForPodFixed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) + for iNdEx := len(keysForPodFixed) - 1; iNdEx >= 0; iNdEx-- { + v := m.PodFixed[k8s_io_api_core_v1.ResourceName(keysForPodFixed[iNdEx])] + baseI := i + { + size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForPodFixed[iNdEx]) + copy(dAtA[i:], keysForPodFixed[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPodFixed[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scheduling != nil { + { + size, err := m.Scheduling.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Overhead != nil { + { + size, err := m.Overhead.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.Handler) + copy(dAtA[i:], m.Handler) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Handler))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Scheduling) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Scheduling) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForNodeSelector[iNdEx]) + copy(dAtA[i:], keysForNodeSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Overhead) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PodFixed) > 0 { + for k, v := range m.PodFixed { + _ = k + _ = v + l = ((*resource.Quantity)(&v)).Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *RuntimeClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Handler) + n += 1 + l + sovGenerated(uint64(l)) + if m.Overhead != nil { + l = m.Overhead.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Scheduling != nil { + l = m.Scheduling.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RuntimeClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Scheduling) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Overhead) String() string { + if this == nil { + return "nil" + } + keysForPodFixed := make([]string, 0, len(this.PodFixed)) + for k := range this.PodFixed { + keysForPodFixed = append(keysForPodFixed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) + mapStringForPodFixed := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForPodFixed { + mapStringForPodFixed += fmt.Sprintf("%v: %v,", k, this.PodFixed[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForPodFixed += "}" + s := strings.Join([]string{`&Overhead{`, + `PodFixed:` + mapStringForPodFixed + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Handler:` + fmt.Sprintf("%v", this.Handler) + `,`, + `Overhead:` + strings.Replace(this.Overhead.String(), "Overhead", "Overhead", 1) + `,`, + `Scheduling:` + strings.Replace(this.Scheduling.String(), "Scheduling", "Scheduling", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]RuntimeClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RuntimeClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Scheduling) String() string { + if this == nil { + return "nil" + } + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTolerations += "}" + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + s := strings.Join([]string{`&Scheduling{`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Overhead) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Overhead: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Overhead: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodFixed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodFixed == nil { + m.PodFixed = make(k8s_io_api_core_v1.ResourceList) + } + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.PodFixed[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Handler", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Handler = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Overhead", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Overhead == nil { + m.Overhead = &Overhead{} + } + if err := m.Overhead.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheduling", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scheduling == nil { + m.Scheduling = &Scheduling{} + } + if err := m.Scheduling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RuntimeClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scheduling) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scheduling: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scheduling: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.NodeSelector[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, v11.Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/node/v1/generated.proto b/vendor/k8s.io/api/node/v1/generated.proto new file mode 100644 index 000000000..4a86999f1 --- /dev/null +++ b/vendor/k8s.io/api/node/v1/generated.proto @@ -0,0 +1,109 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.node.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// Overhead structure represents the resource overhead associated with running a pod. +message Overhead { + // PodFixed represents the fixed resource overhead associated with running a pod. + // +optional + map podFixed = 1; +} + +// RuntimeClass defines a class of container runtime supported in the cluster. +// The RuntimeClass is used to determine which container runtime is used to run +// all containers in a pod. RuntimeClasses are manually defined by a +// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is +// responsible for resolving the RuntimeClassName reference before running the +// pod. For more details, see +// https://kubernetes.io/docs/concepts/containers/runtime-class/ +message RuntimeClass { + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Handler specifies the underlying runtime and configuration that the CRI + // implementation will use to handle pods of this class. The possible values + // are specific to the node & CRI configuration. It is assumed that all + // handlers are available on every node, and handlers of the same name are + // equivalent on every node. + // For example, a handler called "runc" might specify that the runc OCI + // runtime (using native Linux containers) will be used to run the containers + // in a pod. + // The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, + // and is immutable. + optional string handler = 2; + + // Overhead represents the resource overhead associated with running a pod for a + // given RuntimeClass. For more details, see + // https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/ + // This field is in beta starting v1.18 + // and is only honored by servers that enable the PodOverhead feature. + // +optional + optional Overhead overhead = 3; + + // Scheduling holds the scheduling constraints to ensure that pods running + // with this RuntimeClass are scheduled to nodes that support it. + // If scheduling is nil, this RuntimeClass is assumed to be supported by all + // nodes. + // +optional + optional Scheduling scheduling = 4; +} + +// RuntimeClassList is a list of RuntimeClass objects. +message RuntimeClassList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated RuntimeClass items = 2; +} + +// Scheduling specifies the scheduling constraints for nodes supporting a +// RuntimeClass. +message Scheduling { + // nodeSelector lists labels that must be present on nodes that support this + // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a + // node matched by this selector. The RuntimeClass nodeSelector is merged + // with a pod's existing nodeSelector. Any conflicts will cause the pod to + // be rejected in admission. + // +optional + map nodeSelector = 1; + + // tolerations are appended (excluding duplicates) to pods running with this + // RuntimeClass during admission, effectively unioning the set of nodes + // tolerated by the pod and the RuntimeClass. + // +optional + // +listType=atomic + repeated k8s.io.api.core.v1.Toleration tolerations = 2; +} + diff --git a/vendor/k8s.io/api/node/v1/register.go b/vendor/k8s.io/api/node/v1/register.go new file mode 100644 index 000000000..d514e9b5a --- /dev/null +++ b/vendor/k8s.io/api/node/v1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "node.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// addKnownTypes adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &RuntimeClass{}, + &RuntimeClassList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/node/v1/types.go b/vendor/k8s.io/api/node/v1/types.go new file mode 100644 index 000000000..b32cc36c4 --- /dev/null +++ b/vendor/k8s.io/api/node/v1/types.go @@ -0,0 +1,107 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RuntimeClass defines a class of container runtime supported in the cluster. +// The RuntimeClass is used to determine which container runtime is used to run +// all containers in a pod. RuntimeClasses are manually defined by a +// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is +// responsible for resolving the RuntimeClassName reference before running the +// pod. For more details, see +// https://kubernetes.io/docs/concepts/containers/runtime-class/ +type RuntimeClass struct { + metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Handler specifies the underlying runtime and configuration that the CRI + // implementation will use to handle pods of this class. The possible values + // are specific to the node & CRI configuration. It is assumed that all + // handlers are available on every node, and handlers of the same name are + // equivalent on every node. + // For example, a handler called "runc" might specify that the runc OCI + // runtime (using native Linux containers) will be used to run the containers + // in a pod. + // The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, + // and is immutable. + Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` + + // Overhead represents the resource overhead associated with running a pod for a + // given RuntimeClass. For more details, see + // https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/ + // This field is in beta starting v1.18 + // and is only honored by servers that enable the PodOverhead feature. + // +optional + Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,3,opt,name=overhead"` + + // Scheduling holds the scheduling constraints to ensure that pods running + // with this RuntimeClass are scheduled to nodes that support it. + // If scheduling is nil, this RuntimeClass is assumed to be supported by all + // nodes. + // +optional + Scheduling *Scheduling `json:"scheduling,omitempty" protobuf:"bytes,4,opt,name=scheduling"` +} + +// Overhead structure represents the resource overhead associated with running a pod. +type Overhead struct { + // PodFixed represents the fixed resource overhead associated with running a pod. + // +optional + PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` +} + +// Scheduling specifies the scheduling constraints for nodes supporting a +// RuntimeClass. +type Scheduling struct { + // nodeSelector lists labels that must be present on nodes that support this + // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a + // node matched by this selector. The RuntimeClass nodeSelector is merged + // with a pod's existing nodeSelector. Any conflicts will cause the pod to + // be rejected in admission. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` + + // tolerations are appended (excluding duplicates) to pods running with this + // RuntimeClass during admission, effectively unioning the set of nodes + // tolerated by the pod and the RuntimeClass. + // +optional + // +listType=atomic + Tolerations []corev1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,2,rep,name=tolerations"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RuntimeClassList is a list of RuntimeClass objects. +type RuntimeClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go new file mode 100644 index 000000000..c68c40e90 --- /dev/null +++ b/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Overhead = map[string]string{ + "": "Overhead structure represents the resource overhead associated with running a pod.", + "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", +} + +func (Overhead) SwaggerDoc() map[string]string { + return map_Overhead +} + +var map_RuntimeClass = map[string]string{ + "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://kubernetes.io/docs/concepts/containers/runtime-class/", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see\n https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/\nThis field is in beta starting v1.18 and is only honored by servers that enable the PodOverhead feature.", + "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", +} + +func (RuntimeClass) SwaggerDoc() map[string]string { + return map_RuntimeClass +} + +var map_RuntimeClassList = map[string]string{ + "": "RuntimeClassList is a list of RuntimeClass objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (RuntimeClassList) SwaggerDoc() map[string]string { + return map_RuntimeClassList +} + +var map_Scheduling = map[string]string{ + "": "Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass.", + "nodeSelector": "nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.", + "tolerations": "tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.", +} + +func (Scheduling) SwaggerDoc() map[string]string { + return map_Scheduling +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/node/v1/zz_generated.deepcopy.go similarity index 54% rename from vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go rename to vendor/k8s.io/api/node/v1/zz_generated.deepcopy.go index ed6c31a32..35084da7e 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/node/v1/zz_generated.deepcopy.go @@ -18,34 +18,66 @@ limitations under the License. // Code generated by deepcopy-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodPreset) DeepCopyInto(out *PodPreset) { +func (in *Overhead) DeepCopyInto(out *Overhead) { + *out = *in + if in.PodFixed != nil { + in, out := &in.PodFixed, &out.PodFixed + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Overhead. +func (in *Overhead) DeepCopy() *Overhead { + if in == nil { + return nil + } + out := new(Overhead) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClass) DeepCopyInto(out *RuntimeClass) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) + if in.Overhead != nil { + in, out := &in.Overhead, &out.Overhead + *out = new(Overhead) + (*in).DeepCopyInto(*out) + } + if in.Scheduling != nil { + in, out := &in.Scheduling, &out.Scheduling + *out = new(Scheduling) + (*in).DeepCopyInto(*out) + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPreset. -func (in *PodPreset) DeepCopy() *PodPreset { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClass. +func (in *RuntimeClass) DeepCopy() *RuntimeClass { if in == nil { return nil } - out := new(PodPreset) + out := new(RuntimeClass) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodPreset) DeepCopyObject() runtime.Object { +func (in *RuntimeClass) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -53,13 +85,13 @@ func (in *PodPreset) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodPresetList) DeepCopyInto(out *PodPresetList) { +func (in *RuntimeClassList) DeepCopyInto(out *RuntimeClassList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]PodPreset, len(*in)) + *out = make([]RuntimeClass, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -67,18 +99,18 @@ func (in *PodPresetList) DeepCopyInto(out *PodPresetList) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPresetList. -func (in *PodPresetList) DeepCopy() *PodPresetList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassList. +func (in *RuntimeClassList) DeepCopy() *RuntimeClassList { if in == nil { return nil } - out := new(PodPresetList) + out := new(RuntimeClassList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodPresetList) DeepCopyObject() runtime.Object { +func (in *RuntimeClassList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -86,33 +118,18 @@ func (in *PodPresetList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodPresetSpec) DeepCopyInto(out *PodPresetSpec) { +func (in *Scheduling) DeepCopyInto(out *Scheduling) { *out = *in - in.Selector.DeepCopyInto(&out.Selector) - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]v1.EnvVar, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.EnvFrom != nil { - in, out := &in.EnvFrom, &out.EnvFrom - *out = make([]v1.EnvFromSource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]v1.Volume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val } } - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]v1.VolumeMount, len(*in)) + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -120,12 +137,12 @@ func (in *PodPresetSpec) DeepCopyInto(out *PodPresetSpec) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPresetSpec. -func (in *PodPresetSpec) DeepCopy() *PodPresetSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduling. +func (in *Scheduling) DeepCopy() *Scheduling { if in == nil { return nil } - out := new(PodPresetSpec) + out := new(Scheduling) in.DeepCopyInto(out) return out } diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.proto b/vendor/k8s.io/api/node/v1alpha1/generated.proto index ac05f839e..310ad490b 100644 --- a/vendor/k8s.io/api/node/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/node/v1alpha1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.node.v1alpha1; @@ -78,8 +78,8 @@ message RuntimeClassSpec { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements - // and is immutable. + // The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) + // requirements, and is immutable. optional string runtimeHandler = 1; // Overhead represents the resource overhead associated with running a pod for a diff --git a/vendor/k8s.io/api/node/v1alpha1/types.go b/vendor/k8s.io/api/node/v1alpha1/types.go index b59767107..03e7e6f33 100644 --- a/vendor/k8s.io/api/node/v1alpha1/types.go +++ b/vendor/k8s.io/api/node/v1alpha1/types.go @@ -56,8 +56,8 @@ type RuntimeClassSpec struct { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements - // and is immutable. + // The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) + // requirements, and is immutable. RuntimeHandler string `json:"runtimeHandler" protobuf:"bytes,1,opt,name=runtimeHandler"` // Overhead represents the resource overhead associated with running a pod for a diff --git a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go index 390000172..d3011466b 100644 --- a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go @@ -58,7 +58,7 @@ func (RuntimeClassList) SwaggerDoc() map[string]string { var map_RuntimeClassSpec = map[string]string{ "": "RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters that are required to describe the RuntimeClass to the Container Runtime Interface (CRI) implementation, as well as any other components that need to understand how the pod will be run. The RuntimeClassSpec is immutable.", - "runtimeHandler": "RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements and is immutable.", + "runtimeHandler": "RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.", "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } diff --git a/vendor/k8s.io/api/node/v1beta1/generated.proto b/vendor/k8s.io/api/node/v1beta1/generated.proto index 49166798d..5c2d9d50a 100644 --- a/vendor/k8s.io/api/node/v1beta1/generated.proto +++ b/vendor/k8s.io/api/node/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.node.v1beta1; @@ -57,8 +57,8 @@ message RuntimeClass { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The Handler must conform to the DNS Label (RFC 1123) requirements, and is - // immutable. + // The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, + // and is immutable. optional string handler = 2; // Overhead represents the resource overhead associated with running a pod for a diff --git a/vendor/k8s.io/api/node/v1beta1/types.go b/vendor/k8s.io/api/node/v1beta1/types.go index 1d2b96312..89559949c 100644 --- a/vendor/k8s.io/api/node/v1beta1/types.go +++ b/vendor/k8s.io/api/node/v1beta1/types.go @@ -48,8 +48,8 @@ type RuntimeClass struct { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The Handler must conform to the DNS Label (RFC 1123) requirements, and is - // immutable. + // The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, + // and is immutable. Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` // Overhead represents the resource overhead associated with running a pod for a diff --git a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go index 681f73f23..a486147f0 100644 --- a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go @@ -39,7 +39,7 @@ func (Overhead) SwaggerDoc() map[string]string { var map_RuntimeClass = map[string]string{ "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.", "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto index 30db726f9..18a1c6578 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.policy.v1beta1; diff --git a/vendor/k8s.io/api/rbac/v1/generated.proto b/vendor/k8s.io/api/rbac/v1/generated.proto index 71fa08341..22c6dae4b 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.rbac.v1; diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto index 60354e6a6..caed7ec30 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.rbac.v1alpha1; diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.proto b/vendor/k8s.io/api/rbac/v1beta1/generated.proto index 44cd6c24a..17d3741f0 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.rbac.v1beta1; diff --git a/vendor/k8s.io/api/scheduling/v1/generated.proto b/vendor/k8s.io/api/scheduling/v1/generated.proto index e7489f539..1f9a7474a 100644 --- a/vendor/k8s.io/api/scheduling/v1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.scheduling.v1; diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto index 8c4a2c460..da27a13e7 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.scheduling.v1alpha1; diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto index eae3c01f3..99bdaabee 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.scheduling.v1beta1; diff --git a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go b/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go deleted file mode 100644 index 7ed066d31..000000000 --- a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go +++ /dev/null @@ -1,1053 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto - -package v1alpha1 - -import ( - fmt "fmt" - - io "io" - - proto "github.com/gogo/protobuf/proto" - v11 "k8s.io/api/core/v1" - - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *PodPreset) Reset() { *m = PodPreset{} } -func (*PodPreset) ProtoMessage() {} -func (*PodPreset) Descriptor() ([]byte, []int) { - return fileDescriptor_48fab0a6ea4b79ce, []int{0} -} -func (m *PodPreset) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodPreset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodPreset) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodPreset.Merge(m, src) -} -func (m *PodPreset) XXX_Size() int { - return m.Size() -} -func (m *PodPreset) XXX_DiscardUnknown() { - xxx_messageInfo_PodPreset.DiscardUnknown(m) -} - -var xxx_messageInfo_PodPreset proto.InternalMessageInfo - -func (m *PodPresetList) Reset() { *m = PodPresetList{} } -func (*PodPresetList) ProtoMessage() {} -func (*PodPresetList) Descriptor() ([]byte, []int) { - return fileDescriptor_48fab0a6ea4b79ce, []int{1} -} -func (m *PodPresetList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodPresetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodPresetList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodPresetList.Merge(m, src) -} -func (m *PodPresetList) XXX_Size() int { - return m.Size() -} -func (m *PodPresetList) XXX_DiscardUnknown() { - xxx_messageInfo_PodPresetList.DiscardUnknown(m) -} - -var xxx_messageInfo_PodPresetList proto.InternalMessageInfo - -func (m *PodPresetSpec) Reset() { *m = PodPresetSpec{} } -func (*PodPresetSpec) ProtoMessage() {} -func (*PodPresetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_48fab0a6ea4b79ce, []int{2} -} -func (m *PodPresetSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodPresetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodPresetSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodPresetSpec.Merge(m, src) -} -func (m *PodPresetSpec) XXX_Size() int { - return m.Size() -} -func (m *PodPresetSpec) XXX_DiscardUnknown() { - xxx_messageInfo_PodPresetSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_PodPresetSpec proto.InternalMessageInfo - -func init() { - proto.RegisterType((*PodPreset)(nil), "k8s.io.api.settings.v1alpha1.PodPreset") - proto.RegisterType((*PodPresetList)(nil), "k8s.io.api.settings.v1alpha1.PodPresetList") - proto.RegisterType((*PodPresetSpec)(nil), "k8s.io.api.settings.v1alpha1.PodPresetSpec") -} - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto", fileDescriptor_48fab0a6ea4b79ce) -} - -var fileDescriptor_48fab0a6ea4b79ce = []byte{ - // 542 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x8e, 0xd2, 0x40, - 0x1c, 0xc6, 0xe9, 0xb2, 0x04, 0x1c, 0xd8, 0x68, 0x1a, 0x0f, 0x0d, 0x31, 0x65, 0xe5, 0xe2, 0x26, - 0xc6, 0x19, 0x59, 0x8d, 0xd1, 0x6b, 0x13, 0x4c, 0x4c, 0x20, 0x6e, 0x4a, 0xb2, 0x89, 0xc6, 0x83, - 0x43, 0xf9, 0x5b, 0x2a, 0xb4, 0xd3, 0xcc, 0x4c, 0x9b, 0x78, 0xf3, 0x11, 0x7c, 0x01, 0x9f, 0x44, - 0x1f, 0x80, 0xe3, 0x1e, 0xf7, 0xb4, 0x91, 0xfa, 0x22, 0x66, 0x86, 0x29, 0xa0, 0x88, 0x72, 0x9b, - 0xff, 0x9f, 0xef, 0xfb, 0xcd, 0xf7, 0x31, 0x45, 0xfd, 0xd9, 0x73, 0x81, 0x23, 0x46, 0x66, 0xd9, - 0x18, 0x78, 0x02, 0x12, 0x04, 0xc9, 0x21, 0x99, 0x30, 0x4e, 0xcc, 0x0f, 0x34, 0x8d, 0x88, 0x00, - 0x29, 0xa3, 0x24, 0x14, 0x24, 0xef, 0xd1, 0x79, 0x3a, 0xa5, 0x3d, 0x12, 0x42, 0x02, 0x9c, 0x4a, - 0x98, 0xe0, 0x94, 0x33, 0xc9, 0xec, 0x7b, 0x2b, 0x35, 0xa6, 0x69, 0x84, 0x4b, 0x35, 0x2e, 0xd5, - 0xed, 0x47, 0x61, 0x24, 0xa7, 0xd9, 0x18, 0x07, 0x2c, 0x26, 0x21, 0x0b, 0x19, 0xd1, 0xa6, 0x71, - 0xf6, 0x41, 0x4f, 0x7a, 0xd0, 0xa7, 0x15, 0xac, 0xdd, 0xdd, 0xba, 0x3a, 0x60, 0x1c, 0x48, 0xbe, - 0x73, 0x61, 0xfb, 0xe9, 0x46, 0x13, 0xd3, 0x60, 0x1a, 0x25, 0xc0, 0x3f, 0x91, 0x74, 0x16, 0xaa, - 0x85, 0x20, 0x31, 0x48, 0xfa, 0x37, 0x17, 0xd9, 0xe7, 0xe2, 0x59, 0x22, 0xa3, 0x18, 0x76, 0x0c, - 0xcf, 0xfe, 0x67, 0x10, 0xc1, 0x14, 0x62, 0xfa, 0xa7, 0xaf, 0xfb, 0xdd, 0x42, 0xb7, 0x2e, 0xd8, - 0xe4, 0x82, 0x83, 0x00, 0x69, 0xbf, 0x47, 0x0d, 0x95, 0x68, 0x42, 0x25, 0x75, 0xac, 0x53, 0xeb, - 0xac, 0x79, 0xfe, 0x18, 0x6f, 0xfe, 0xb0, 0x35, 0x18, 0xa7, 0xb3, 0x50, 0x2d, 0x04, 0x56, 0x6a, - 0x9c, 0xf7, 0xf0, 0xeb, 0xf1, 0x47, 0x08, 0xe4, 0x10, 0x24, 0xf5, 0xec, 0xc5, 0x4d, 0xa7, 0x52, - 0xdc, 0x74, 0xd0, 0x66, 0xe7, 0xaf, 0xa9, 0xf6, 0x10, 0x1d, 0x8b, 0x14, 0x02, 0xe7, 0x48, 0xd3, - 0x1f, 0xe2, 0x7f, 0x3d, 0x07, 0x5e, 0x07, 0x1b, 0xa5, 0x10, 0x78, 0x2d, 0x03, 0x3e, 0x56, 0x93, - 0xaf, 0x31, 0xdd, 0x6f, 0x16, 0x3a, 0x59, 0xab, 0x06, 0x91, 0x90, 0xf6, 0xbb, 0x9d, 0x0a, 0xf8, - 0xb0, 0x0a, 0xca, 0xad, 0x0b, 0xdc, 0x31, 0xf7, 0x34, 0xca, 0xcd, 0x56, 0xfc, 0x01, 0xaa, 0x45, - 0x12, 0x62, 0xe1, 0x1c, 0x9d, 0x56, 0xcf, 0x9a, 0xe7, 0x0f, 0x0e, 0xcc, 0xef, 0x9d, 0x18, 0x66, - 0xed, 0x95, 0x72, 0xfb, 0x2b, 0x48, 0xf7, 0x6b, 0x75, 0x2b, 0xbd, 0x6a, 0x65, 0x53, 0xd4, 0x10, - 0x30, 0x87, 0x40, 0x32, 0x6e, 0xd2, 0x3f, 0x39, 0x30, 0x3d, 0x1d, 0xc3, 0x7c, 0x64, 0xac, 0x9b, - 0x0a, 0xe5, 0xc6, 0x5f, 0x63, 0xed, 0x17, 0xa8, 0x0a, 0x49, 0x6e, 0x0a, 0xb4, 0xb7, 0x0b, 0xa8, - 0x4f, 0x58, 0xb1, 0xfa, 0x49, 0x7e, 0x49, 0xb9, 0xd7, 0x34, 0x90, 0x6a, 0x3f, 0xc9, 0x7d, 0xe5, - 0xb1, 0x07, 0xa8, 0x0e, 0x49, 0xfe, 0x92, 0xb3, 0xd8, 0xa9, 0x6a, 0xfb, 0xfd, 0x3d, 0x76, 0x25, - 0x19, 0xb1, 0x8c, 0x07, 0xe0, 0xdd, 0x36, 0x94, 0xba, 0x59, 0xfb, 0x25, 0xc2, 0xee, 0xa3, 0x7a, - 0xce, 0xe6, 0x59, 0x0c, 0xc2, 0x39, 0xde, 0x1f, 0xe6, 0x52, 0x4b, 0x36, 0x98, 0xd5, 0x2c, 0xfc, - 0xd2, 0x6b, 0xbf, 0x41, 0xad, 0xd5, 0x71, 0xc8, 0xb2, 0x44, 0x0a, 0xa7, 0xa6, 0x59, 0x9d, 0xfd, - 0x2c, 0xad, 0xf3, 0xee, 0x1a, 0x60, 0x6b, 0x6b, 0x29, 0xfc, 0xdf, 0x50, 0x1e, 0x5e, 0x2c, 0xdd, - 0xca, 0xd5, 0xd2, 0xad, 0x5c, 0x2f, 0xdd, 0xca, 0xe7, 0xc2, 0xb5, 0x16, 0x85, 0x6b, 0x5d, 0x15, - 0xae, 0x75, 0x5d, 0xb8, 0xd6, 0x8f, 0xc2, 0xb5, 0xbe, 0xfc, 0x74, 0x2b, 0x6f, 0x1b, 0xe5, 0x7b, - 0xff, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x46, 0x15, 0xf2, 0x97, 0xa4, 0x04, 0x00, 0x00, -} - -func (m *PodPreset) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodPreset) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodPreset) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *PodPresetList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodPresetList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodPresetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *PodPresetSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodPresetSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodPresetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.VolumeMounts) > 0 { - for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if len(m.Volumes) > 0 { - for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.EnvFrom) > 0 { - for iNdEx := len(m.EnvFrom) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.EnvFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Env) > 0 { - for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *PodPreset) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodPresetList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodPresetSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Env) > 0 { - for _, e := range m.Env { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.EnvFrom) > 0 { - for _, e := range m.EnvFrom { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Volumes) > 0 { - for _, e := range m.Volumes { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.VolumeMounts) > 0 { - for _, e := range m.VolumeMounts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *PodPreset) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodPreset{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodPresetSpec", "PodPresetSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodPresetList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]PodPreset{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodPreset", "PodPreset", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PodPresetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *PodPresetSpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForEnv := "[]EnvVar{" - for _, f := range this.Env { - repeatedStringForEnv += fmt.Sprintf("%v", f) + "," - } - repeatedStringForEnv += "}" - repeatedStringForEnvFrom := "[]EnvFromSource{" - for _, f := range this.EnvFrom { - repeatedStringForEnvFrom += fmt.Sprintf("%v", f) + "," - } - repeatedStringForEnvFrom += "}" - repeatedStringForVolumes := "[]Volume{" - for _, f := range this.Volumes { - repeatedStringForVolumes += fmt.Sprintf("%v", f) + "," - } - repeatedStringForVolumes += "}" - repeatedStringForVolumeMounts := "[]VolumeMount{" - for _, f := range this.VolumeMounts { - repeatedStringForVolumeMounts += fmt.Sprintf("%v", f) + "," - } - repeatedStringForVolumeMounts += "}" - s := strings.Join([]string{`&PodPresetSpec{`, - `Selector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Env:` + repeatedStringForEnv + `,`, - `EnvFrom:` + repeatedStringForEnvFrom + `,`, - `Volumes:` + repeatedStringForVolumes + `,`, - `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *PodPreset) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodPreset: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodPreset: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodPresetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodPresetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodPresetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PodPreset{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodPresetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodPresetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = append(m.Env, v11.EnvVar{}) - if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EnvFrom = append(m.EnvFrom, v11.EnvFromSource{}) - if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Volumes = append(m.Volumes, v11.Volume{}) - if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeMounts = append(m.VolumeMounts, v11.VolumeMount{}) - if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/k8s.io/api/settings/v1alpha1/generated.proto b/vendor/k8s.io/api/settings/v1alpha1/generated.proto deleted file mode 100644 index db1ec9312..000000000 --- a/vendor/k8s.io/api/settings/v1alpha1/generated.proto +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.settings.v1alpha1; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1alpha1"; - -// PodPreset is a policy resource that defines additional runtime -// requirements for a Pod. -message PodPreset { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // +optional - optional PodPresetSpec spec = 2; -} - -// PodPresetList is a list of PodPreset objects. -message PodPresetList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is a list of schema objects. - repeated PodPreset items = 2; -} - -// PodPresetSpec is a description of a pod preset. -message PodPresetSpec { - // Selector is a label query over a set of resources, in this case pods. - // Required. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; - - // Env defines the collection of EnvVar to inject into containers. - // +optional - repeated k8s.io.api.core.v1.EnvVar env = 2; - - // EnvFrom defines the collection of EnvFromSource to inject into containers. - // +optional - repeated k8s.io.api.core.v1.EnvFromSource envFrom = 3; - - // Volumes defines the collection of Volume to inject into the pod. - // +optional - repeated k8s.io.api.core.v1.Volume volumes = 4; - - // VolumeMounts defines the collection of VolumeMount to inject into containers. - // +optional - repeated k8s.io.api.core.v1.VolumeMount volumeMounts = 5; -} - diff --git a/vendor/k8s.io/api/settings/v1alpha1/types.go b/vendor/k8s.io/api/settings/v1alpha1/types.go deleted file mode 100644 index 8cc99d440..000000000 --- a/vendor/k8s.io/api/settings/v1alpha1/types.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodPreset is a policy resource that defines additional runtime -// requirements for a Pod. -type PodPreset struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // +optional - Spec PodPresetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// PodPresetSpec is a description of a pod preset. -type PodPresetSpec struct { - // Selector is a label query over a set of resources, in this case pods. - // Required. - Selector metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"` - - // Env defines the collection of EnvVar to inject into containers. - // +optional - Env []v1.EnvVar `json:"env,omitempty" protobuf:"bytes,2,rep,name=env"` - // EnvFrom defines the collection of EnvFromSource to inject into containers. - // +optional - EnvFrom []v1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,3,rep,name=envFrom"` - // Volumes defines the collection of Volume to inject into the pod. - // +optional - Volumes []v1.Volume `json:"volumes,omitempty" protobuf:"bytes,4,rep,name=volumes"` - // VolumeMounts defines the collection of VolumeMount to inject into containers. - // +optional - VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty" protobuf:"bytes,5,rep,name=volumeMounts"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodPresetList is a list of PodPreset objects. -type PodPresetList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of schema objects. - Items []PodPreset `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go deleted file mode 100644 index 0501e0af3..000000000 --- a/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_PodPreset = map[string]string{ - "": "PodPreset is a policy resource that defines additional runtime requirements for a Pod.", -} - -func (PodPreset) SwaggerDoc() map[string]string { - return map_PodPreset -} - -var map_PodPresetList = map[string]string{ - "": "PodPresetList is a list of PodPreset objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", -} - -func (PodPresetList) SwaggerDoc() map[string]string { - return map_PodPresetList -} - -var map_PodPresetSpec = map[string]string{ - "": "PodPresetSpec is a description of a pod preset.", - "selector": "Selector is a label query over a set of resources, in this case pods. Required.", - "env": "Env defines the collection of EnvVar to inject into containers.", - "envFrom": "EnvFrom defines the collection of EnvFromSource to inject into containers.", - "volumes": "Volumes defines the collection of Volume to inject into the pod.", - "volumeMounts": "VolumeMounts defines the collection of VolumeMount to inject into containers.", -} - -func (PodPresetSpec) SwaggerDoc() map[string]string { - return map_PodPresetSpec -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/storage/v1/generated.pb.go b/vendor/k8s.io/api/storage/v1/generated.pb.go index 2c7088c38..f6b97e013 100644 --- a/vendor/k8s.io/api/storage/v1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1/generated.pb.go @@ -298,10 +298,38 @@ func (m *StorageClassList) XXX_DiscardUnknown() { var xxx_messageInfo_StorageClassList proto.InternalMessageInfo +func (m *TokenRequest) Reset() { *m = TokenRequest{} } +func (*TokenRequest) ProtoMessage() {} +func (*TokenRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{9} +} +func (m *TokenRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenRequest.Merge(m, src) +} +func (m *TokenRequest) XXX_Size() int { + return m.Size() +} +func (m *TokenRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TokenRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenRequest proto.InternalMessageInfo + func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } func (*VolumeAttachment) ProtoMessage() {} func (*VolumeAttachment) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{9} + return fileDescriptor_3b530c1983504d8d, []int{10} } func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -329,7 +357,7 @@ var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } func (*VolumeAttachmentList) ProtoMessage() {} func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{10} + return fileDescriptor_3b530c1983504d8d, []int{11} } func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -357,7 +385,7 @@ var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } func (*VolumeAttachmentSource) ProtoMessage() {} func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{11} + return fileDescriptor_3b530c1983504d8d, []int{12} } func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -385,7 +413,7 @@ var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } func (*VolumeAttachmentSpec) ProtoMessage() {} func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{12} + return fileDescriptor_3b530c1983504d8d, []int{13} } func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -413,7 +441,7 @@ var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } func (*VolumeAttachmentStatus) ProtoMessage() {} func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{13} + return fileDescriptor_3b530c1983504d8d, []int{14} } func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -441,7 +469,7 @@ var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo func (m *VolumeError) Reset() { *m = VolumeError{} } func (*VolumeError) ProtoMessage() {} func (*VolumeError) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{14} + return fileDescriptor_3b530c1983504d8d, []int{15} } func (m *VolumeError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -469,7 +497,7 @@ var xxx_messageInfo_VolumeError proto.InternalMessageInfo func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} } func (*VolumeNodeResources) ProtoMessage() {} func (*VolumeNodeResources) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{15} + return fileDescriptor_3b530c1983504d8d, []int{16} } func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -505,6 +533,7 @@ func init() { proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1.StorageClass") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1.StorageClass.ParametersEntry") proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1.StorageClassList") + proto.RegisterType((*TokenRequest)(nil), "k8s.io.api.storage.v1.TokenRequest") proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1.VolumeAttachment") proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1.VolumeAttachmentList") proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1.VolumeAttachmentSource") @@ -520,95 +549,101 @@ func init() { } var fileDescriptor_3b530c1983504d8d = []byte{ - // 1395 bytes of a gzipped FileDescriptorProto + // 1500 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xcf, 0x6f, 0x1b, 0xc5, - 0x17, 0xcf, 0xc6, 0x76, 0x7e, 0x8c, 0x93, 0xc6, 0x99, 0xe4, 0xfb, 0xfd, 0xfa, 0x9b, 0x83, 0x37, - 0x5a, 0x2a, 0x08, 0x85, 0xae, 0x9b, 0x52, 0xaa, 0xaa, 0x52, 0x91, 0xe2, 0xc4, 0xa5, 0x11, 0x71, - 0x12, 0x8d, 0x4b, 0x85, 0x10, 0x20, 0x26, 0xbb, 0x13, 0x67, 0x1b, 0xef, 0xce, 0x76, 0x77, 0x6c, - 0xf0, 0x8d, 0x13, 0x37, 0x24, 0xb8, 0xf2, 0x57, 0x80, 0x04, 0x17, 0x8e, 0x9c, 0xca, 0xad, 0xe2, - 0xd4, 0xd3, 0x8a, 0x2e, 0x67, 0xb8, 0x71, 0xc9, 0x09, 0xcd, 0xec, 0xd8, 0xfb, 0xc3, 0xeb, 0x34, - 0xbd, 0xe4, 0xe6, 0x79, 0xef, 0x7d, 0x3e, 0xef, 0xbd, 0x79, 0x3f, 0x66, 0x0d, 0xde, 0x3b, 0xbd, - 0xe3, 0xeb, 0x16, 0xad, 0x9f, 0xf6, 0x8e, 0x88, 0xe7, 0x10, 0x46, 0xfc, 0x7a, 0x9f, 0x38, 0x26, - 0xf5, 0xea, 0x52, 0x81, 0x5d, 0xab, 0xee, 0x33, 0xea, 0xe1, 0x0e, 0xa9, 0xf7, 0x37, 0xeb, 0x1d, - 0xe2, 0x10, 0x0f, 0x33, 0x62, 0xea, 0xae, 0x47, 0x19, 0x85, 0xff, 0x89, 0xcc, 0x74, 0xec, 0x5a, - 0xba, 0x34, 0xd3, 0xfb, 0x9b, 0x6b, 0xd7, 0x3b, 0x16, 0x3b, 0xe9, 0x1d, 0xe9, 0x06, 0xb5, 0xeb, - 0x1d, 0xda, 0xa1, 0x75, 0x61, 0x7d, 0xd4, 0x3b, 0x16, 0x27, 0x71, 0x10, 0xbf, 0x22, 0x96, 0x35, - 0x2d, 0xe1, 0xcc, 0xa0, 0x5e, 0x9e, 0xa7, 0xb5, 0x5b, 0xb1, 0x8d, 0x8d, 0x8d, 0x13, 0xcb, 0x21, - 0xde, 0xa0, 0xee, 0x9e, 0x76, 0xb8, 0xc0, 0xaf, 0xdb, 0x84, 0xe1, 0x3c, 0x54, 0x7d, 0x12, 0xca, - 0xeb, 0x39, 0xcc, 0xb2, 0xc9, 0x18, 0xe0, 0xf6, 0xcb, 0x00, 0xbe, 0x71, 0x42, 0x6c, 0x9c, 0xc5, - 0x69, 0x3f, 0x2b, 0x60, 0x7e, 0xbb, 0xbd, 0xbb, 0xe3, 0x59, 0x7d, 0xe2, 0xc1, 0xcf, 0xc1, 0x1c, - 0x8f, 0xc8, 0xc4, 0x0c, 0x57, 0x95, 0x75, 0x65, 0xa3, 0x7c, 0xf3, 0x86, 0x1e, 0xdf, 0xd4, 0x88, - 0x58, 0x77, 0x4f, 0x3b, 0x5c, 0xe0, 0xeb, 0xdc, 0x5a, 0xef, 0x6f, 0xea, 0x07, 0x47, 0x8f, 0x89, - 0xc1, 0x5a, 0x84, 0xe1, 0x06, 0x7c, 0x1a, 0xa8, 0x53, 0x61, 0xa0, 0x82, 0x58, 0x86, 0x46, 0xac, - 0xf0, 0x3e, 0x28, 0xfa, 0x2e, 0x31, 0xaa, 0xd3, 0x82, 0xfd, 0xaa, 0x9e, 0x5b, 0x07, 0x7d, 0x14, - 0x51, 0xdb, 0x25, 0x46, 0x63, 0x41, 0x32, 0x16, 0xf9, 0x09, 0x09, 0xbc, 0xf6, 0x93, 0x02, 0x16, - 0x47, 0x56, 0x7b, 0x96, 0xcf, 0xe0, 0x27, 0x63, 0xb1, 0xeb, 0x17, 0x8b, 0x9d, 0xa3, 0x45, 0xe4, - 0x15, 0xe9, 0x67, 0x6e, 0x28, 0x49, 0xc4, 0xdd, 0x04, 0x25, 0x8b, 0x11, 0xdb, 0xaf, 0x4e, 0xaf, - 0x17, 0x36, 0xca, 0x37, 0xd7, 0x5f, 0x16, 0x78, 0x63, 0x51, 0x92, 0x95, 0x76, 0x39, 0x0c, 0x45, - 0x68, 0xed, 0x9f, 0xe9, 0x44, 0xd8, 0x3c, 0x1d, 0x78, 0x17, 0x5c, 0xc1, 0x8c, 0x61, 0xe3, 0x04, - 0x91, 0x27, 0x3d, 0xcb, 0x23, 0xa6, 0x08, 0x7e, 0xae, 0x01, 0xc3, 0x40, 0xbd, 0xb2, 0x95, 0xd2, - 0xa0, 0x8c, 0x25, 0xc7, 0xba, 0xd4, 0xdc, 0x75, 0x8e, 0xe9, 0x81, 0xd3, 0xa2, 0x3d, 0x87, 0x89, - 0x6b, 0x95, 0xd8, 0xc3, 0x94, 0x06, 0x65, 0x2c, 0xa1, 0x01, 0x56, 0xfb, 0xb4, 0xdb, 0xb3, 0xc9, - 0x9e, 0x75, 0x4c, 0x8c, 0x81, 0xd1, 0x25, 0x2d, 0x6a, 0x12, 0xbf, 0x5a, 0x58, 0x2f, 0x6c, 0xcc, - 0x37, 0xea, 0x61, 0xa0, 0xae, 0x3e, 0xca, 0xd1, 0x9f, 0x05, 0xea, 0x4a, 0x8e, 0x1c, 0xe5, 0x92, - 0xc1, 0x7b, 0x60, 0x49, 0x5e, 0xce, 0x36, 0x76, 0xb1, 0x61, 0xb1, 0x41, 0xb5, 0x28, 0x22, 0x5c, - 0x09, 0x03, 0x75, 0xa9, 0x9d, 0x56, 0xa1, 0xac, 0x2d, 0x7c, 0x00, 0x16, 0x8f, 0xfd, 0xf7, 0x3d, - 0xda, 0x73, 0x0f, 0x69, 0xd7, 0x32, 0x06, 0xd5, 0xd2, 0xba, 0xb2, 0x31, 0xdf, 0xd0, 0xc2, 0x40, - 0x5d, 0xbc, 0xdf, 0x4e, 0x28, 0xce, 0xb2, 0x02, 0x94, 0x06, 0x6a, 0x3f, 0x2a, 0x60, 0x76, 0xbb, - 0xbd, 0xbb, 0x4f, 0x4d, 0x72, 0x09, 0x4d, 0xbe, 0x93, 0x6a, 0x72, 0x6d, 0x72, 0xaf, 0xf0, 0x78, - 0x26, 0xb6, 0xf8, 0xdf, 0x51, 0x8b, 0x73, 0x1b, 0x39, 0x9e, 0xeb, 0xa0, 0xe8, 0x60, 0x9b, 0x88, - 0xa8, 0xe7, 0x63, 0xcc, 0x3e, 0xb6, 0x09, 0x12, 0x1a, 0xf8, 0x3a, 0x98, 0x71, 0xa8, 0x49, 0x76, - 0x77, 0x84, 0xef, 0xf9, 0xc6, 0x15, 0x69, 0x33, 0xb3, 0x2f, 0xa4, 0x48, 0x6a, 0xe1, 0x2d, 0xb0, - 0xc0, 0xa8, 0x4b, 0xbb, 0xb4, 0x33, 0xf8, 0x80, 0x0c, 0x86, 0x55, 0xaf, 0x84, 0x81, 0xba, 0xf0, - 0x30, 0x21, 0x47, 0x29, 0x2b, 0xf8, 0x29, 0x28, 0xe3, 0x6e, 0x97, 0x1a, 0x98, 0xe1, 0xa3, 0x2e, - 0x11, 0xa5, 0x2c, 0xdf, 0xbc, 0x36, 0x21, 0xbd, 0xa8, 0x4b, 0xb8, 0x5f, 0x44, 0x7c, 0xda, 0xf3, - 0x0c, 0xe2, 0x37, 0x96, 0xc2, 0x40, 0x2d, 0x6f, 0xc5, 0x14, 0x28, 0xc9, 0xa7, 0xfd, 0xa0, 0x80, - 0xb2, 0x4c, 0xf8, 0x12, 0x26, 0x7a, 0x3b, 0x3d, 0xd1, 0xb5, 0xf3, 0xab, 0x34, 0x61, 0x9e, 0x3f, - 0x1b, 0x45, 0x2c, 0x86, 0xf9, 0x00, 0xcc, 0x9a, 0xa2, 0x54, 0x7e, 0x55, 0x11, 0xac, 0x57, 0xcf, - 0x67, 0x95, 0xbb, 0x62, 0x49, 0x72, 0xcf, 0x46, 0x67, 0x1f, 0x0d, 0x59, 0xb4, 0x6f, 0x66, 0xc0, - 0xc2, 0x70, 0x4c, 0xba, 0xd8, 0xf7, 0x2f, 0xa1, 0x79, 0xdf, 0x05, 0x65, 0xd7, 0xa3, 0x7d, 0xcb, - 0xb7, 0xa8, 0x43, 0x3c, 0xd9, 0x47, 0x2b, 0x12, 0x52, 0x3e, 0x8c, 0x55, 0x28, 0x69, 0x07, 0x3b, - 0x00, 0xb8, 0xd8, 0xc3, 0x36, 0x61, 0x3c, 0xfb, 0x82, 0xc8, 0xfe, 0x9d, 0x09, 0xd9, 0x27, 0x33, - 0xd2, 0x0f, 0x47, 0xa8, 0xa6, 0xc3, 0xbc, 0x41, 0x1c, 0x5d, 0xac, 0x40, 0x09, 0x6a, 0x78, 0x0a, - 0x16, 0x3d, 0x62, 0x74, 0xb1, 0x65, 0xcb, 0xa5, 0x50, 0x14, 0x11, 0x36, 0xf9, 0x52, 0x40, 0x49, - 0xc5, 0x59, 0xa0, 0xde, 0x18, 0x7f, 0xa0, 0xf5, 0x43, 0xe2, 0xf9, 0x96, 0xcf, 0x88, 0xc3, 0xa2, - 0x0e, 0x4d, 0x61, 0x50, 0x9a, 0x9b, 0xcf, 0x89, 0xcd, 0xd7, 0xe5, 0x81, 0xcb, 0x2c, 0xea, 0xf8, - 0xd5, 0x52, 0x3c, 0x27, 0xad, 0x84, 0x1c, 0xa5, 0xac, 0xe0, 0x1e, 0x58, 0xe5, 0x7d, 0xfd, 0x45, - 0xe4, 0xa0, 0xf9, 0xa5, 0x8b, 0x1d, 0x7e, 0x4b, 0xd5, 0x19, 0xb1, 0xfb, 0xaa, 0x7c, 0xb7, 0x6e, - 0xe5, 0xe8, 0x51, 0x2e, 0x0a, 0x7e, 0x04, 0x96, 0xa3, 0xe5, 0xda, 0xb0, 0x1c, 0xd3, 0x72, 0x3a, - 0x7c, 0xb5, 0x56, 0x67, 0x45, 0xd2, 0xd7, 0xc2, 0x40, 0x5d, 0x7e, 0x94, 0x55, 0x9e, 0xe5, 0x09, - 0xd1, 0x38, 0x09, 0x7c, 0x02, 0x96, 0x85, 0x47, 0x62, 0xca, 0xa1, 0xb7, 0x88, 0x5f, 0x9d, 0x13, - 0xa5, 0xdb, 0x48, 0x96, 0x8e, 0x5f, 0x1d, 0xaf, 0xdb, 0x70, 0x35, 0xb4, 0x49, 0x97, 0x18, 0x8c, - 0x7a, 0x0f, 0x89, 0x67, 0x37, 0xfe, 0x2f, 0xeb, 0xb5, 0xbc, 0x95, 0xa5, 0x42, 0xe3, 0xec, 0x6b, - 0xf7, 0xc0, 0x52, 0xa6, 0xe0, 0xb0, 0x02, 0x0a, 0xa7, 0x64, 0x10, 0x2d, 0x35, 0xc4, 0x7f, 0xc2, - 0x55, 0x50, 0xea, 0xe3, 0x6e, 0x8f, 0x44, 0xcd, 0x87, 0xa2, 0xc3, 0xdd, 0xe9, 0x3b, 0x8a, 0xf6, - 0x8b, 0x02, 0x2a, 0xc9, 0xee, 0xb9, 0x84, 0x3d, 0xf1, 0x20, 0xbd, 0x27, 0x5e, 0xbb, 0x40, 0x4f, - 0x4f, 0x58, 0x16, 0xdf, 0x4f, 0x83, 0x4a, 0x54, 0x97, 0xe8, 0x5d, 0xb7, 0x89, 0xc3, 0x2e, 0x61, - 0xa0, 0x5b, 0xa9, 0xd7, 0xe8, 0xad, 0x73, 0xd7, 0x75, 0x1c, 0xd8, 0xa4, 0x67, 0x09, 0x7e, 0x08, - 0x66, 0x7c, 0x86, 0x59, 0x8f, 0x0f, 0x39, 0x27, 0xbc, 0x7e, 0x51, 0x42, 0x01, 0x8a, 0x5f, 0xa4, - 0xe8, 0x8c, 0x24, 0x99, 0xf6, 0xab, 0x02, 0x56, 0xb3, 0x90, 0x4b, 0xa8, 0xee, 0x5e, 0xba, 0xba, - 0x6f, 0x5c, 0x30, 0x99, 0x09, 0x15, 0xfe, 0x5d, 0x01, 0xff, 0x1d, 0xcb, 0x5b, 0xbc, 0x7d, 0x7c, - 0x27, 0xb8, 0x99, 0xcd, 0xb3, 0x1f, 0xbf, 0xe5, 0x62, 0x27, 0x1c, 0xe6, 0xe8, 0x51, 0x2e, 0x0a, - 0x3e, 0x06, 0x15, 0xcb, 0xe9, 0x5a, 0x0e, 0x89, 0x64, 0xed, 0xb8, 0xbe, 0xb9, 0x83, 0x9b, 0x65, - 0x16, 0xc5, 0x5d, 0x0d, 0x03, 0xb5, 0xb2, 0x9b, 0x61, 0x41, 0x63, 0xbc, 0xda, 0x6f, 0x39, 0x95, - 0x11, 0xaf, 0xdd, 0xdb, 0x60, 0x2e, 0xfa, 0x20, 0x25, 0x9e, 0x4c, 0x63, 0x74, 0xd3, 0x5b, 0x52, - 0x8e, 0x46, 0x16, 0xa2, 0x6f, 0xc4, 0x55, 0xc8, 0x40, 0x2f, 0xdc, 0x37, 0x02, 0x94, 0xe8, 0x1b, - 0x71, 0x46, 0x92, 0x8c, 0x07, 0xc1, 0xbf, 0x69, 0xc4, 0x5d, 0x16, 0xd2, 0x41, 0xec, 0x4b, 0x39, - 0x1a, 0x59, 0x68, 0x7f, 0x15, 0x72, 0x0a, 0x24, 0x1a, 0x30, 0x91, 0xcd, 0xf0, 0x13, 0x3c, 0x9b, - 0x8d, 0x39, 0xca, 0xc6, 0x84, 0xdf, 0x29, 0x00, 0xe2, 0x11, 0x45, 0x6b, 0xd8, 0xa0, 0x51, 0x17, - 0x35, 0x5f, 0x69, 0x24, 0xf4, 0xad, 0x31, 0x9e, 0xe8, 0x25, 0x5c, 0x93, 0xfe, 0xe1, 0xb8, 0x01, - 0xca, 0x71, 0x0e, 0x4d, 0x50, 0x8e, 0xa4, 0x4d, 0xcf, 0xa3, 0x9e, 0x1c, 0x4f, 0xed, 0xdc, 0x58, - 0x84, 0x65, 0xa3, 0x26, 0x3e, 0xcb, 0x62, 0xe8, 0x59, 0xa0, 0x96, 0x13, 0x7a, 0x94, 0xa4, 0xe5, - 0x5e, 0x4c, 0x12, 0x7b, 0x29, 0xbe, 0x9a, 0x97, 0x1d, 0x32, 0xd9, 0x4b, 0x82, 0x76, 0xad, 0x09, - 0xfe, 0x37, 0xe1, 0x5a, 0x5e, 0xe9, 0xbd, 0xf8, 0x5a, 0x01, 0x49, 0x1f, 0x70, 0x0f, 0x14, 0xf9, - 0xbf, 0x61, 0xb9, 0x48, 0xae, 0x5d, 0x6c, 0x91, 0x3c, 0xb4, 0x6c, 0x12, 0xaf, 0x42, 0x7e, 0x42, - 0x82, 0x05, 0xbe, 0x09, 0x66, 0x6d, 0xe2, 0xfb, 0xb8, 0x23, 0x3d, 0xc7, 0x1f, 0x72, 0xad, 0x48, - 0x8c, 0x86, 0x7a, 0xed, 0x36, 0x58, 0xc9, 0xf9, 0x20, 0x86, 0x2a, 0x28, 0x19, 0xe2, 0x8f, 0x1b, - 0x0f, 0xa8, 0xd4, 0x98, 0xe7, 0x1b, 0x65, 0x5b, 0xfc, 0x5f, 0x8b, 0xe4, 0x8d, 0x8d, 0xa7, 0x2f, - 0x6a, 0x53, 0xcf, 0x5e, 0xd4, 0xa6, 0x9e, 0xbf, 0xa8, 0x4d, 0x7d, 0x15, 0xd6, 0x94, 0xa7, 0x61, - 0x4d, 0x79, 0x16, 0xd6, 0x94, 0xe7, 0x61, 0x4d, 0xf9, 0x23, 0xac, 0x29, 0xdf, 0xfe, 0x59, 0x9b, - 0xfa, 0x78, 0xba, 0xbf, 0xf9, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9e, 0x83, 0x24, 0x44, 0x13, - 0x11, 0x00, 0x00, + 0x17, 0xcf, 0xc6, 0xce, 0xaf, 0x71, 0xd2, 0x24, 0x93, 0xf4, 0xfb, 0x35, 0x39, 0xd8, 0xd1, 0xb6, + 0x82, 0x50, 0xe8, 0xba, 0x29, 0xa5, 0xaa, 0x2a, 0x15, 0x29, 0x9b, 0xb8, 0x34, 0x22, 0xbf, 0x34, + 0x0e, 0x15, 0x42, 0x80, 0x3a, 0xd9, 0x9d, 0x38, 0x5b, 0x7b, 0x77, 0xb6, 0x3b, 0x63, 0x53, 0xdf, + 0xe0, 0xc2, 0x0d, 0x09, 0xae, 0x88, 0x3f, 0x02, 0x24, 0xb8, 0x70, 0xe4, 0x54, 0x6e, 0x15, 0xa7, + 0x9e, 0x2c, 0x6a, 0xce, 0xf0, 0x07, 0xe4, 0x84, 0x66, 0x76, 0xec, 0xfd, 0xe1, 0x75, 0x9a, 0x5e, + 0x72, 0xf3, 0xbe, 0x1f, 0x9f, 0xf7, 0x66, 0xde, 0x7b, 0x9f, 0x37, 0x06, 0x1f, 0x34, 0xee, 0x30, + 0xc3, 0xa1, 0x95, 0x46, 0xeb, 0x88, 0x04, 0x1e, 0xe1, 0x84, 0x55, 0xda, 0xc4, 0xb3, 0x69, 0x50, + 0x51, 0x0a, 0xec, 0x3b, 0x15, 0xc6, 0x69, 0x80, 0xeb, 0xa4, 0xd2, 0x5e, 0xaf, 0xd4, 0x89, 0x47, + 0x02, 0xcc, 0x89, 0x6d, 0xf8, 0x01, 0xe5, 0x14, 0x5e, 0x0e, 0xcd, 0x0c, 0xec, 0x3b, 0x86, 0x32, + 0x33, 0xda, 0xeb, 0x2b, 0xd7, 0xeb, 0x0e, 0x3f, 0x69, 0x1d, 0x19, 0x16, 0x75, 0x2b, 0x75, 0x5a, + 0xa7, 0x15, 0x69, 0x7d, 0xd4, 0x3a, 0x96, 0x5f, 0xf2, 0x43, 0xfe, 0x0a, 0x51, 0x56, 0xf4, 0x58, + 0x30, 0x8b, 0x06, 0x59, 0x91, 0x56, 0x6e, 0x45, 0x36, 0x2e, 0xb6, 0x4e, 0x1c, 0x8f, 0x04, 0x9d, + 0x8a, 0xdf, 0xa8, 0x0b, 0x01, 0xab, 0xb8, 0x84, 0xe3, 0x2c, 0xaf, 0xca, 0x28, 0xaf, 0xa0, 0xe5, + 0x71, 0xc7, 0x25, 0x43, 0x0e, 0xb7, 0x5f, 0xe5, 0xc0, 0xac, 0x13, 0xe2, 0xe2, 0xb4, 0x9f, 0xfe, + 0xab, 0x06, 0x66, 0x36, 0x6b, 0xdb, 0x5b, 0x81, 0xd3, 0x26, 0x01, 0x7c, 0x04, 0xa6, 0x45, 0x46, + 0x36, 0xe6, 0xb8, 0xa8, 0xad, 0x6a, 0x6b, 0x85, 0x9b, 0x37, 0x8c, 0xe8, 0xa6, 0x06, 0xc0, 0x86, + 0xdf, 0xa8, 0x0b, 0x01, 0x33, 0x84, 0xb5, 0xd1, 0x5e, 0x37, 0xf6, 0x8f, 0x1e, 0x13, 0x8b, 0xef, + 0x12, 0x8e, 0x4d, 0xf8, 0xac, 0x5b, 0x1e, 0xeb, 0x75, 0xcb, 0x20, 0x92, 0xa1, 0x01, 0x2a, 0xbc, + 0x0f, 0xf2, 0xcc, 0x27, 0x56, 0x71, 0x5c, 0xa2, 0x5f, 0x35, 0x32, 0xeb, 0x60, 0x0c, 0x32, 0xaa, + 0xf9, 0xc4, 0x32, 0x67, 0x15, 0x62, 0x5e, 0x7c, 0x21, 0xe9, 0xaf, 0xff, 0xa2, 0x81, 0xb9, 0x81, + 0xd5, 0x8e, 0xc3, 0x38, 0xfc, 0x6c, 0x28, 0x77, 0xe3, 0x7c, 0xb9, 0x0b, 0x6f, 0x99, 0xf9, 0x82, + 0x8a, 0x33, 0xdd, 0x97, 0xc4, 0xf2, 0xae, 0x82, 0x09, 0x87, 0x13, 0x97, 0x15, 0xc7, 0x57, 0x73, + 0x6b, 0x85, 0x9b, 0xab, 0xaf, 0x4a, 0xdc, 0x9c, 0x53, 0x60, 0x13, 0xdb, 0xc2, 0x0d, 0x85, 0xde, + 0xfa, 0x8f, 0xf9, 0x58, 0xda, 0xe2, 0x38, 0xf0, 0x2e, 0xb8, 0x84, 0x39, 0xc7, 0xd6, 0x09, 0x22, + 0x4f, 0x5a, 0x4e, 0x40, 0x6c, 0x99, 0xfc, 0xb4, 0x09, 0x7b, 0xdd, 0xf2, 0xa5, 0x8d, 0x84, 0x06, + 0xa5, 0x2c, 0x85, 0xaf, 0x4f, 0xed, 0x6d, 0xef, 0x98, 0xee, 0x7b, 0xbb, 0xb4, 0xe5, 0x71, 0x79, + 0xad, 0xca, 0xf7, 0x20, 0xa1, 0x41, 0x29, 0x4b, 0x68, 0x81, 0xe5, 0x36, 0x6d, 0xb6, 0x5c, 0xb2, + 0xe3, 0x1c, 0x13, 0xab, 0x63, 0x35, 0xc9, 0x2e, 0xb5, 0x09, 0x2b, 0xe6, 0x56, 0x73, 0x6b, 0x33, + 0x66, 0xa5, 0xd7, 0x2d, 0x2f, 0x3f, 0xcc, 0xd0, 0x9f, 0x76, 0xcb, 0x4b, 0x19, 0x72, 0x94, 0x09, + 0x06, 0xef, 0x81, 0x79, 0x75, 0x39, 0x9b, 0xd8, 0xc7, 0x96, 0xc3, 0x3b, 0xc5, 0xbc, 0xcc, 0x70, + 0xa9, 0xd7, 0x2d, 0xcf, 0xd7, 0x92, 0x2a, 0x94, 0xb6, 0x85, 0x0f, 0xc0, 0xdc, 0x31, 0xfb, 0x30, + 0xa0, 0x2d, 0xff, 0x80, 0x36, 0x1d, 0xab, 0x53, 0x9c, 0x58, 0xd5, 0xd6, 0x66, 0x4c, 0xbd, 0xd7, + 0x2d, 0xcf, 0xdd, 0xaf, 0xc5, 0x14, 0xa7, 0x69, 0x01, 0x4a, 0x3a, 0xc2, 0x47, 0x60, 0x8e, 0xd3, + 0x06, 0xf1, 0xc4, 0xd5, 0x11, 0xc6, 0x59, 0x71, 0x52, 0x96, 0xf1, 0xca, 0x88, 0x32, 0x1e, 0xc6, + 0x6c, 0xcd, 0xcb, 0xaa, 0x92, 0x73, 0x71, 0x29, 0x43, 0x49, 0x40, 0xb8, 0x09, 0x16, 0x83, 0xb0, + 0x2e, 0x0c, 0x11, 0xbf, 0x75, 0xd4, 0x74, 0xd8, 0x49, 0x71, 0x4a, 0x1e, 0xf6, 0x72, 0xaf, 0x5b, + 0x5e, 0x44, 0x69, 0x25, 0x1a, 0xb6, 0xd7, 0x7f, 0xd6, 0xc0, 0xd4, 0x66, 0x6d, 0x7b, 0x8f, 0xda, + 0xe4, 0x02, 0x66, 0x71, 0x2b, 0x31, 0x8b, 0xfa, 0xe8, 0x96, 0x16, 0xf9, 0x8c, 0x9c, 0xc4, 0x7f, + 0xc3, 0x49, 0x14, 0x36, 0x8a, 0x45, 0x56, 0x41, 0xde, 0xc3, 0x2e, 0x91, 0x59, 0xcf, 0x44, 0x3e, + 0x7b, 0xd8, 0x25, 0x48, 0x6a, 0xe0, 0x9b, 0x60, 0xd2, 0xa3, 0x36, 0xd9, 0xde, 0x92, 0xb1, 0x67, + 0xcc, 0x4b, 0xca, 0x66, 0x72, 0x4f, 0x4a, 0x91, 0xd2, 0xc2, 0x5b, 0x60, 0x96, 0x53, 0x9f, 0x36, + 0x69, 0xbd, 0xf3, 0x11, 0xe9, 0xf4, 0x9b, 0x73, 0xa1, 0xd7, 0x2d, 0xcf, 0x1e, 0xc6, 0xe4, 0x28, + 0x61, 0x05, 0x3f, 0x07, 0x05, 0xdc, 0x6c, 0x52, 0x0b, 0x73, 0x7c, 0xd4, 0x24, 0xb2, 0xe3, 0x0a, + 0x37, 0xaf, 0x8d, 0x38, 0x5e, 0xd8, 0xcc, 0x22, 0x2e, 0x22, 0x8c, 0xb6, 0x02, 0x8b, 0x30, 0x73, + 0xbe, 0xd7, 0x2d, 0x17, 0x36, 0x22, 0x08, 0x14, 0xc7, 0xd3, 0x7f, 0xd2, 0x40, 0x41, 0x1d, 0xf8, + 0x02, 0x88, 0x67, 0x33, 0x49, 0x3c, 0xa5, 0xb3, 0xab, 0x34, 0x82, 0x76, 0xbe, 0x18, 0x64, 0x2c, + 0x39, 0x67, 0x1f, 0x4c, 0xd9, 0xb2, 0x54, 0xac, 0xa8, 0x49, 0xd4, 0xab, 0x67, 0xa3, 0x2a, 0x4a, + 0x9b, 0x57, 0xd8, 0x53, 0xe1, 0x37, 0x43, 0x7d, 0x14, 0xfd, 0xdb, 0x49, 0x30, 0xdb, 0x9f, 0xe6, + 0x26, 0x66, 0xec, 0x02, 0x9a, 0xf7, 0x7d, 0x50, 0xf0, 0x03, 0xda, 0x76, 0x98, 0x43, 0x3d, 0x12, + 0xa8, 0x3e, 0x5a, 0x52, 0x2e, 0x85, 0x83, 0x48, 0x85, 0xe2, 0x76, 0xb0, 0x0e, 0x80, 0x8f, 0x03, + 0xec, 0x12, 0x2e, 0x4e, 0x9f, 0x93, 0xa7, 0x7f, 0x6f, 0xc4, 0xe9, 0xe3, 0x27, 0x32, 0x0e, 0x06, + 0x5e, 0x55, 0x8f, 0x07, 0x9d, 0x28, 0xbb, 0x48, 0x81, 0x62, 0xd0, 0xb0, 0x01, 0xe6, 0x02, 0x62, + 0x35, 0xb1, 0xe3, 0x2a, 0xee, 0xca, 0xcb, 0x0c, 0xab, 0x82, 0x48, 0x50, 0x5c, 0x71, 0xda, 0x2d, + 0xdf, 0x18, 0x7e, 0x47, 0x18, 0x07, 0x24, 0x60, 0x0e, 0xe3, 0xc4, 0xe3, 0x61, 0x87, 0x26, 0x7c, + 0x50, 0x12, 0x5b, 0xcc, 0x89, 0x2b, 0x58, 0x7d, 0xdf, 0xe7, 0x0e, 0xf5, 0x58, 0x71, 0x22, 0x9a, + 0x93, 0xdd, 0x98, 0x1c, 0x25, 0xac, 0xe0, 0x0e, 0x58, 0x16, 0x7d, 0xfd, 0x65, 0x18, 0xa0, 0xfa, + 0xd4, 0xc7, 0x9e, 0xb8, 0xa5, 0xe2, 0xa4, 0x64, 0xad, 0xa2, 0x58, 0x01, 0x1b, 0x19, 0x7a, 0x94, + 0xe9, 0x05, 0x3f, 0x01, 0x8b, 0xe1, 0x0e, 0x30, 0x1d, 0xcf, 0x76, 0xbc, 0xba, 0xd8, 0x00, 0x92, + 0x00, 0x67, 0xcc, 0x6b, 0x82, 0x00, 0x1f, 0xa6, 0x95, 0xa7, 0x59, 0x42, 0x34, 0x0c, 0x02, 0x9f, + 0x80, 0x45, 0x19, 0x91, 0xd8, 0x6a, 0xe8, 0x1d, 0xc2, 0x8a, 0xd3, 0xb2, 0x74, 0x6b, 0xf1, 0xd2, + 0x89, 0xab, 0x0b, 0xd9, 0x3b, 0x24, 0x83, 0x1a, 0x69, 0x12, 0x8b, 0xd3, 0xe0, 0x90, 0x04, 0xae, + 0xf9, 0x86, 0xaa, 0xd7, 0xe2, 0x46, 0x1a, 0x0a, 0x0d, 0xa3, 0xaf, 0xdc, 0x03, 0xf3, 0xa9, 0x82, + 0xc3, 0x05, 0x90, 0x6b, 0x90, 0x4e, 0x48, 0x6a, 0x48, 0xfc, 0x84, 0xcb, 0x60, 0xa2, 0x8d, 0x9b, + 0x2d, 0x12, 0x36, 0x1f, 0x0a, 0x3f, 0xee, 0x8e, 0xdf, 0xd1, 0xf4, 0xdf, 0x34, 0xb0, 0x10, 0xef, + 0x9e, 0x0b, 0xe0, 0x89, 0x07, 0x49, 0x9e, 0xb8, 0x72, 0x8e, 0x9e, 0x1e, 0x41, 0x16, 0x5f, 0x6b, + 0x60, 0x36, 0xbe, 0xea, 0xe0, 0xbb, 0x60, 0x1a, 0xb7, 0x6c, 0x87, 0x78, 0x56, 0x9f, 0xd3, 0x07, + 0x89, 0x6c, 0x28, 0x39, 0x1a, 0x58, 0x88, 0x45, 0x48, 0x9e, 0xfa, 0x4e, 0x80, 0x45, 0x93, 0xd5, + 0x88, 0x45, 0x3d, 0x9b, 0xc9, 0x1b, 0xca, 0x85, 0x8b, 0xb0, 0x9a, 0x56, 0xa2, 0x61, 0x7b, 0xfd, + 0x87, 0x71, 0xb0, 0x10, 0xf6, 0x46, 0xf8, 0x04, 0x72, 0x89, 0xc7, 0x2f, 0x80, 0x54, 0x76, 0x13, + 0x1b, 0xf1, 0x9d, 0x33, 0x57, 0x46, 0x94, 0xd8, 0xa8, 0xd5, 0x08, 0x3f, 0x06, 0x93, 0x8c, 0x63, + 0xde, 0x12, 0x44, 0x23, 0x00, 0xaf, 0x9f, 0x17, 0x50, 0x3a, 0x45, 0x5b, 0x31, 0xfc, 0x46, 0x0a, + 0x4c, 0xff, 0x5d, 0x03, 0xcb, 0x69, 0x97, 0x0b, 0xe8, 0xb0, 0x9d, 0x64, 0x87, 0xbd, 0x75, 0xce, + 0xc3, 0x8c, 0xe8, 0xb2, 0x3f, 0x35, 0xf0, 0xbf, 0xa1, 0x73, 0xcb, 0xfd, 0x2b, 0x78, 0xc9, 0x4f, + 0xb1, 0xdf, 0x5e, 0xf4, 0x9e, 0x90, 0xbc, 0x74, 0x90, 0xa1, 0x47, 0x99, 0x5e, 0xf0, 0x31, 0x58, + 0x70, 0xbc, 0xa6, 0xe3, 0x91, 0x50, 0x56, 0x8b, 0xea, 0x9b, 0x49, 0x1e, 0x69, 0x64, 0x59, 0xdc, + 0xe5, 0x5e, 0xb7, 0xbc, 0xb0, 0x9d, 0x42, 0x41, 0x43, 0xb8, 0xfa, 0x1f, 0x19, 0x95, 0x91, 0x1b, + 0x57, 0x8c, 0x90, 0x94, 0x90, 0x60, 0x68, 0x84, 0x94, 0x1c, 0x0d, 0x2c, 0x64, 0xdf, 0xc8, 0xab, + 0x50, 0x89, 0x9e, 0xbb, 0x6f, 0xa4, 0x53, 0xac, 0x6f, 0xe4, 0x37, 0x52, 0x60, 0x22, 0x09, 0xf1, + 0xae, 0x92, 0x77, 0x99, 0x4b, 0x26, 0xb1, 0xa7, 0xe4, 0x68, 0x60, 0xa1, 0xff, 0x93, 0xcb, 0x28, + 0x90, 0x6c, 0xc0, 0xd8, 0x69, 0xfa, 0xff, 0x56, 0xd2, 0xa7, 0xb1, 0x07, 0xa7, 0xb1, 0xe1, 0xf7, + 0x1a, 0x80, 0x78, 0x00, 0xb1, 0xdb, 0x6f, 0xd0, 0xb0, 0x8b, 0xaa, 0xaf, 0x35, 0x12, 0xc6, 0xc6, + 0x10, 0x4e, 0xb8, 0x8d, 0x57, 0x54, 0x7c, 0x38, 0x6c, 0x80, 0x32, 0x82, 0x43, 0x1b, 0x14, 0x42, + 0x69, 0x35, 0x08, 0x68, 0xa0, 0xc6, 0x53, 0x3f, 0x33, 0x17, 0x69, 0x69, 0x96, 0xe4, 0xd3, 0x30, + 0x72, 0x3d, 0xed, 0x96, 0x0b, 0x31, 0x3d, 0x8a, 0xc3, 0x8a, 0x28, 0x36, 0x89, 0xa2, 0xe4, 0x5f, + 0x2f, 0xca, 0x16, 0x19, 0x1d, 0x25, 0x06, 0xbb, 0x52, 0x05, 0xff, 0x1f, 0x71, 0x2d, 0xaf, 0xb5, + 0xb3, 0xbe, 0xd1, 0x40, 0x3c, 0x06, 0xdc, 0x01, 0x79, 0xee, 0xa8, 0xa9, 0x4b, 0x3e, 0x9f, 0xcf, + 0x20, 0x92, 0x43, 0xc7, 0x25, 0x11, 0x15, 0x8a, 0x2f, 0x24, 0x51, 0xe0, 0xdb, 0x60, 0xca, 0x25, + 0x8c, 0xe1, 0xba, 0x8a, 0x1c, 0x3d, 0x26, 0x77, 0x43, 0x31, 0xea, 0xeb, 0xf5, 0xdb, 0x60, 0x29, + 0xe3, 0x51, 0x0e, 0xcb, 0x60, 0xc2, 0x92, 0xff, 0x71, 0x45, 0x42, 0x13, 0xe6, 0x8c, 0x60, 0x94, + 0x4d, 0xf9, 0xd7, 0x36, 0x94, 0x9b, 0x6b, 0xcf, 0x5e, 0x96, 0xc6, 0x9e, 0xbf, 0x2c, 0x8d, 0xbd, + 0x78, 0x59, 0x1a, 0xfb, 0xaa, 0x57, 0xd2, 0x9e, 0xf5, 0x4a, 0xda, 0xf3, 0x5e, 0x49, 0x7b, 0xd1, + 0x2b, 0x69, 0x7f, 0xf5, 0x4a, 0xda, 0x77, 0x7f, 0x97, 0xc6, 0x3e, 0x1d, 0x6f, 0xaf, 0xff, 0x17, + 0x00, 0x00, 0xff, 0xff, 0x02, 0xb2, 0x6f, 0xe2, 0x3e, 0x12, 0x00, 0x00, } func (m *CSIDriver) Marshal() (dAtA []byte, err error) { @@ -721,6 +756,30 @@ func (m *CSIDriverSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.RequiresRepublish != nil { + i-- + if *m.RequiresRepublish { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if len(m.TokenRequests) > 0 { + for iNdEx := len(m.TokenRequests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TokenRequests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } if m.FSGroupPolicy != nil { i -= len(*m.FSGroupPolicy) copy(dAtA[i:], *m.FSGroupPolicy) @@ -1107,6 +1166,39 @@ func (m *StorageClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *TokenRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ExpirationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Audience) + copy(dAtA[i:], m.Audience) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audience))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1503,6 +1595,15 @@ func (m *CSIDriverSpec) Size() (n int) { l = len(*m.FSGroupPolicy) n += 1 + l + sovGenerated(uint64(l)) } + if len(m.TokenRequests) > 0 { + for _, e := range m.TokenRequests { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.RequiresRepublish != nil { + n += 2 + } return n } @@ -1635,6 +1736,20 @@ func (m *StorageClassList) Size() (n int) { return n } +func (m *TokenRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Audience) + n += 1 + l + sovGenerated(uint64(l)) + if m.ExpirationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ExpirationSeconds)) + } + return n +} + func (m *VolumeAttachment) Size() (n int) { if m == nil { return 0 @@ -1787,12 +1902,19 @@ func (this *CSIDriverSpec) String() string { if this == nil { return "nil" } + repeatedStringForTokenRequests := "[]TokenRequest{" + for _, f := range this.TokenRequests { + repeatedStringForTokenRequests += strings.Replace(strings.Replace(f.String(), "TokenRequest", "TokenRequest", 1), `&`, ``, 1) + "," + } + repeatedStringForTokenRequests += "}" s := strings.Join([]string{`&CSIDriverSpec{`, `AttachRequired:` + valueToStringGenerated(this.AttachRequired) + `,`, `PodInfoOnMount:` + valueToStringGenerated(this.PodInfoOnMount) + `,`, `VolumeLifecycleModes:` + fmt.Sprintf("%v", this.VolumeLifecycleModes) + `,`, `StorageCapacity:` + valueToStringGenerated(this.StorageCapacity) + `,`, `FSGroupPolicy:` + valueToStringGenerated(this.FSGroupPolicy) + `,`, + `TokenRequests:` + repeatedStringForTokenRequests + `,`, + `RequiresRepublish:` + valueToStringGenerated(this.RequiresRepublish) + `,`, `}`, }, "") return s @@ -1900,6 +2022,17 @@ func (this *StorageClassList) String() string { }, "") return s } +func (this *TokenRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenRequest{`, + `Audience:` + fmt.Sprintf("%v", this.Audience) + `,`, + `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, + `}`, + }, "") + return s +} func (this *VolumeAttachment) String() string { if this == nil { return "nil" @@ -2399,6 +2532,61 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { s := FSGroupPolicy(dAtA[iNdEx:postIndex]) m.FSGroupPolicy = &s iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TokenRequests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TokenRequests = append(m.TokenRequests, TokenRequest{}) + if err := m.TokenRequests[len(m.TokenRequests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiresRepublish", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.RequiresRepublish = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3452,6 +3640,111 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } return nil } +func (m *TokenRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audience", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audience = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpirationSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExpirationSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index a3526ca4e..d6b4d9cbd 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.storage.v1; @@ -146,6 +146,43 @@ message CSIDriverSpec { // that enable the CSIVolumeFSGroupPolicy feature gate. // +optional optional string fsGroupPolicy = 5; + + // TokenRequests indicates the CSI driver needs pods' service account + // tokens it is mounting volume for to do necessary authentication. Kubelet + // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. + // The CSI driver should parse and validate the following VolumeContext: + // "csi.storage.k8s.io/serviceAccount.tokens": { + // "": { + // "token": , + // "expirationTimestamp": , + // }, + // ... + // } + // + // Note: Audience in each TokenRequest should be different and at + // most one token is empty string. To receive a new token after expiry, + // RequiresRepublish can be used to trigger NodePublishVolume periodically. + // + // This is an alpha feature and only available when the + // CSIServiceAccountToken feature is enabled. + // + // +optional + // +listType=atomic + repeated TokenRequest tokenRequests = 6; + + // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // being periodically called to reflect any possible change in the mounted + // volume. This field defaults to false. + // + // Note: After a successful initial NodePublishVolume call, subsequent calls + // to NodePublishVolume should only update the contents of the volume. New + // mount points will not be seen by a running container. + // + // This is an alpha feature and only available when the + // CSIServiceAccountToken feature is enabled. + // + // +optional + optional bool requiresRepublish = 7; } // CSINode holds information about all CSI drivers installed on a node. @@ -281,6 +318,19 @@ message StorageClassList { repeated StorageClass items = 2; } +// TokenRequest contains parameters of a service account token. +message TokenRequest { + // Audience is the intended audience of the token in "TokenRequestSpec". + // It will default to the audiences of kube apiserver. + optional string audience = 1; + + // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec". + // + // +optional + optional int64 expirationSeconds = 2; +} + // VolumeAttachment captures the intent to attach or detach the specified volume // to/from the specified node. // diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index 27e06debb..18d0fb877 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -344,6 +344,43 @@ type CSIDriverSpec struct { // that enable the CSIVolumeFSGroupPolicy feature gate. // +optional FSGroupPolicy *FSGroupPolicy `json:"fsGroupPolicy,omitempty" protobuf:"bytes,5,opt,name=fsGroupPolicy"` + + // TokenRequests indicates the CSI driver needs pods' service account + // tokens it is mounting volume for to do necessary authentication. Kubelet + // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. + // The CSI driver should parse and validate the following VolumeContext: + // "csi.storage.k8s.io/serviceAccount.tokens": { + // "": { + // "token": , + // "expirationTimestamp": , + // }, + // ... + // } + // + // Note: Audience in each TokenRequest should be different and at + // most one token is empty string. To receive a new token after expiry, + // RequiresRepublish can be used to trigger NodePublishVolume periodically. + // + // This is an alpha feature and only available when the + // CSIServiceAccountToken feature is enabled. + // + // +optional + // +listType=atomic + TokenRequests []TokenRequest `json:"tokenRequests,omitempty" protobuf:"bytes,6,opt,name=tokenRequests"` + + // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // being periodically called to reflect any possible change in the mounted + // volume. This field defaults to false. + // + // Note: After a successful initial NodePublishVolume call, subsequent calls + // to NodePublishVolume should only update the contents of the volume. New + // mount points will not be seen by a running container. + // + // This is an alpha feature and only available when the + // CSIServiceAccountToken feature is enabled. + // + // +optional + RequiresRepublish *bool `json:"requiresRepublish,omitempty" protobuf:"varint,7,opt,name=requiresRepublish"` } // FSGroupPolicy specifies if a CSI Driver supports modifying @@ -381,6 +418,20 @@ const ( // provided by a CSI driver. More modes may be added in the future. type VolumeLifecycleMode string +// TokenRequest contains parameters of a service account token. +type TokenRequest struct { + // Audience is the intended audience of the token in "TokenRequestSpec". + // It will default to the audiences of kube apiserver. + // + Audience string `json:"audience" protobuf:"bytes,1,opt,name=audience"` + + // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec". + // + // +optional + ExpirationSeconds *int64 `json:"expirationSeconds,omitempty" protobuf:"varint,2,opt,name=expirationSeconds"` +} + const ( // VolumeLifecyclePersistent explicitly confirms that the driver implements // the full CSI spec. It is the default when CSIDriverSpec.VolumeLifecycleModes is not diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index 606cda4db..0e28b1d2f 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -54,6 +54,8 @@ var map_CSIDriverSpec = map[string]string{ "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta.", "storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis is an alpha field and only available when the CSIStorageCapacity feature is enabled. The default is false.", "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is alpha-level, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate.", + "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\n\nThis is an alpha feature and only available when the CSIServiceAccountToken feature is enabled.", + "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.\n\nThis is an alpha feature and only available when the CSIServiceAccountToken feature is enabled.", } func (CSIDriverSpec) SwaggerDoc() map[string]string { @@ -127,6 +129,16 @@ func (StorageClassList) SwaggerDoc() map[string]string { return map_StorageClassList } +var map_TokenRequest = map[string]string{ + "": "TokenRequest contains parameters of a service account token.", + "audience": "Audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", + "expirationSeconds": "ExpirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\".", +} + +func (TokenRequest) SwaggerDoc() map[string]string { + return map_TokenRequest +} + var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go index 5eb0225a0..f4de94216 100644 --- a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go @@ -113,6 +113,18 @@ func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) { *out = new(FSGroupPolicy) **out = **in } + if in.TokenRequests != nil { + in, out := &in.TokenRequests, &out.TokenRequests + *out = make([]TokenRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequiresRepublish != nil { + in, out := &in.RequiresRepublish, &out.RequiresRepublish + *out = new(bool) + **out = **in + } return } @@ -328,6 +340,27 @@ func (in *StorageClassList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenRequest) DeepCopyInto(out *TokenRequest) { + *out = *in + if in.ExpirationSeconds != nil { + in, out := &in.ExpirationSeconds, &out.ExpirationSeconds + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequest. +func (in *TokenRequest) DeepCopy() *TokenRequest { + if in == nil { + return nil + } + out := new(TokenRequest) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) { *out = *in diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto index 40a764051..d64345333 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.storage.v1alpha1; diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go index cec77515e..21f664094 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go @@ -298,10 +298,38 @@ func (m *StorageClassList) XXX_DiscardUnknown() { var xxx_messageInfo_StorageClassList proto.InternalMessageInfo +func (m *TokenRequest) Reset() { *m = TokenRequest{} } +func (*TokenRequest) ProtoMessage() {} +func (*TokenRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{9} +} +func (m *TokenRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenRequest.Merge(m, src) +} +func (m *TokenRequest) XXX_Size() int { + return m.Size() +} +func (m *TokenRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TokenRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenRequest proto.InternalMessageInfo + func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } func (*VolumeAttachment) ProtoMessage() {} func (*VolumeAttachment) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{9} + return fileDescriptor_7d2980599fd0de80, []int{10} } func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -329,7 +357,7 @@ var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } func (*VolumeAttachmentList) ProtoMessage() {} func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{10} + return fileDescriptor_7d2980599fd0de80, []int{11} } func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -357,7 +385,7 @@ var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } func (*VolumeAttachmentSource) ProtoMessage() {} func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{11} + return fileDescriptor_7d2980599fd0de80, []int{12} } func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -385,7 +413,7 @@ var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } func (*VolumeAttachmentSpec) ProtoMessage() {} func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{12} + return fileDescriptor_7d2980599fd0de80, []int{13} } func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -413,7 +441,7 @@ var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } func (*VolumeAttachmentStatus) ProtoMessage() {} func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{13} + return fileDescriptor_7d2980599fd0de80, []int{14} } func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -441,7 +469,7 @@ var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo func (m *VolumeError) Reset() { *m = VolumeError{} } func (*VolumeError) ProtoMessage() {} func (*VolumeError) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{14} + return fileDescriptor_7d2980599fd0de80, []int{15} } func (m *VolumeError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -469,7 +497,7 @@ var xxx_messageInfo_VolumeError proto.InternalMessageInfo func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} } func (*VolumeNodeResources) ProtoMessage() {} func (*VolumeNodeResources) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{15} + return fileDescriptor_7d2980599fd0de80, []int{16} } func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -505,6 +533,7 @@ func init() { proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1beta1.StorageClass") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.StorageClass.ParametersEntry") proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1beta1.StorageClassList") + proto.RegisterType((*TokenRequest)(nil), "k8s.io.api.storage.v1beta1.TokenRequest") proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachment") proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentList") proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSource") @@ -520,95 +549,102 @@ func init() { } var fileDescriptor_7d2980599fd0de80 = []byte{ - // 1400 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x3d, 0x6f, 0xdb, 0x46, - 0x1f, 0x37, 0x2d, 0xc9, 0x2f, 0x27, 0x3b, 0x96, 0xcf, 0xc6, 0xf3, 0xe8, 0xd1, 0x20, 0x1a, 0x7a, - 0xd0, 0xc6, 0x09, 0x12, 0x2a, 0x31, 0xd2, 0x20, 0x08, 0x90, 0xc1, 0x72, 0xdc, 0x46, 0x89, 0xe5, - 0xb8, 0x27, 0x23, 0x28, 0x82, 0x0e, 0x3d, 0x91, 0x67, 0x99, 0xb1, 0xc8, 0x63, 0xc8, 0x93, 0x5a, - 0x6d, 0x9d, 0x3a, 0x17, 0x1d, 0xfa, 0x09, 0xfa, 0x15, 0x5a, 0xa0, 0x5d, 0x3a, 0x36, 0x53, 0x11, - 0x74, 0xca, 0x44, 0x34, 0xec, 0x47, 0x28, 0xba, 0x18, 0x1d, 0x8a, 0x3b, 0x9e, 0xc4, 0x17, 0x51, - 0xb1, 0xdd, 0xc1, 0x1b, 0xef, 0xff, 0xf2, 0xfb, 0xbf, 0xff, 0xef, 0x08, 0x76, 0x4e, 0xee, 0x79, - 0x9a, 0x49, 0xeb, 0x27, 0xfd, 0x0e, 0x71, 0x6d, 0xc2, 0x88, 0x57, 0x1f, 0x10, 0xdb, 0xa0, 0x6e, - 0x5d, 0x32, 0xb0, 0x63, 0xd6, 0x3d, 0x46, 0x5d, 0xdc, 0x25, 0xf5, 0xc1, 0xed, 0x0e, 0x61, 0xf8, - 0x76, 0xbd, 0x4b, 0x6c, 0xe2, 0x62, 0x46, 0x0c, 0xcd, 0x71, 0x29, 0xa3, 0xb0, 0x12, 0xca, 0x6a, - 0xd8, 0x31, 0x35, 0x29, 0xab, 0x49, 0xd9, 0xca, 0xcd, 0xae, 0xc9, 0x8e, 0xfb, 0x1d, 0x4d, 0xa7, - 0x56, 0xbd, 0x4b, 0xbb, 0xb4, 0x2e, 0x54, 0x3a, 0xfd, 0x23, 0x71, 0x12, 0x07, 0xf1, 0x15, 0x42, - 0x55, 0x6a, 0x31, 0xb3, 0x3a, 0x75, 0xb9, 0xcd, 0xb4, 0xb9, 0xca, 0x9d, 0x48, 0xc6, 0xc2, 0xfa, - 0xb1, 0x69, 0x13, 0x77, 0x58, 0x77, 0x4e, 0xba, 0x9c, 0xe0, 0xd5, 0x2d, 0xc2, 0x70, 0x96, 0x56, - 0x7d, 0x9a, 0x96, 0xdb, 0xb7, 0x99, 0x69, 0x91, 0x09, 0x85, 0xbb, 0x67, 0x29, 0x78, 0xfa, 0x31, - 0xb1, 0x70, 0x5a, 0xaf, 0xf6, 0x93, 0x02, 0x16, 0x77, 0xda, 0xcd, 0x87, 0xae, 0x39, 0x20, 0x2e, - 0xfc, 0x0c, 0x2c, 0x70, 0x8f, 0x0c, 0xcc, 0x70, 0x59, 0xd9, 0x50, 0x36, 0x8b, 0x5b, 0xb7, 0xb4, - 0x28, 0x5d, 0x63, 0x60, 0xcd, 0x39, 0xe9, 0x72, 0x82, 0xa7, 0x71, 0x69, 0x6d, 0x70, 0x5b, 0x7b, - 0xda, 0x79, 0x41, 0x74, 0xd6, 0x22, 0x0c, 0x37, 0xe0, 0x2b, 0x5f, 0x9d, 0x09, 0x7c, 0x15, 0x44, - 0x34, 0x34, 0x46, 0x85, 0x4f, 0x40, 0xde, 0x73, 0x88, 0x5e, 0x9e, 0x15, 0xe8, 0xd7, 0xb4, 0xe9, - 0xc5, 0xd0, 0xc6, 0x6e, 0xb5, 0x1d, 0xa2, 0x37, 0x96, 0x24, 0x6c, 0x9e, 0x9f, 0x90, 0x00, 0xa9, - 0xfd, 0xa8, 0x80, 0xe5, 0xb1, 0xd4, 0x9e, 0xe9, 0x31, 0xf8, 0xe9, 0x44, 0x00, 0xda, 0xf9, 0x02, - 0xe0, 0xda, 0xc2, 0xfd, 0x92, 0xb4, 0xb3, 0x30, 0xa2, 0xc4, 0x9c, 0x7f, 0x0c, 0x0a, 0x26, 0x23, - 0x96, 0x57, 0x9e, 0xdd, 0xc8, 0x6d, 0x16, 0xb7, 0xde, 0x3b, 0x97, 0xf7, 0x8d, 0x65, 0x89, 0x58, - 0x68, 0x72, 0x5d, 0x14, 0x42, 0xd4, 0xfe, 0x9a, 0x8d, 0xf9, 0xce, 0x63, 0x82, 0xf7, 0xc1, 0x15, - 0xcc, 0x18, 0xd6, 0x8f, 0x11, 0x79, 0xd9, 0x37, 0x5d, 0x62, 0x88, 0x08, 0x16, 0x1a, 0x30, 0xf0, - 0xd5, 0x2b, 0xdb, 0x09, 0x0e, 0x4a, 0x49, 0x72, 0x5d, 0x87, 0x1a, 0x4d, 0xfb, 0x88, 0x3e, 0xb5, - 0x5b, 0xb4, 0x6f, 0x33, 0x91, 0x60, 0xa9, 0x7b, 0x90, 0xe0, 0xa0, 0x94, 0x24, 0xd4, 0xc1, 0xfa, - 0x80, 0xf6, 0xfa, 0x16, 0xd9, 0x33, 0x8f, 0x88, 0x3e, 0xd4, 0x7b, 0xa4, 0x45, 0x0d, 0xe2, 0x95, - 0x73, 0x1b, 0xb9, 0xcd, 0xc5, 0x46, 0x3d, 0xf0, 0xd5, 0xf5, 0x67, 0x19, 0xfc, 0x53, 0x5f, 0x5d, - 0xcb, 0xa0, 0xa3, 0x4c, 0x30, 0xf8, 0x00, 0xac, 0xc8, 0x0c, 0xed, 0x60, 0x07, 0xeb, 0x26, 0x1b, - 0x96, 0xf3, 0xc2, 0xc3, 0xb5, 0xc0, 0x57, 0x57, 0xda, 0x49, 0x16, 0x4a, 0xcb, 0xc2, 0x47, 0x60, - 0xf9, 0xc8, 0xfb, 0xc8, 0xa5, 0x7d, 0xe7, 0x80, 0xf6, 0x4c, 0x7d, 0x58, 0x2e, 0x6c, 0x28, 0x9b, - 0x8b, 0x8d, 0x5a, 0xe0, 0xab, 0xcb, 0x1f, 0xb6, 0x63, 0x8c, 0xd3, 0x34, 0x01, 0x25, 0x15, 0x6b, - 0x3f, 0x28, 0x60, 0x7e, 0xa7, 0xdd, 0xdc, 0xa7, 0x06, 0xb9, 0x84, 0x76, 0x6f, 0x26, 0xda, 0xfd, - 0xea, 0x19, 0x0d, 0xc3, 0x9d, 0x9a, 0xda, 0xec, 0x7f, 0x86, 0xcd, 0xce, 0x65, 0xe4, 0xb4, 0x6e, - 0x80, 0xbc, 0x8d, 0x2d, 0x22, 0x5c, 0x5f, 0x8c, 0x74, 0xf6, 0xb1, 0x45, 0x90, 0xe0, 0xc0, 0xf7, - 0xc1, 0x9c, 0x4d, 0x0d, 0xd2, 0x7c, 0x28, 0x1c, 0x58, 0x6c, 0x5c, 0x91, 0x32, 0x73, 0xfb, 0x82, - 0x8a, 0x24, 0x17, 0xde, 0x01, 0x4b, 0x8c, 0x3a, 0xb4, 0x47, 0xbb, 0xc3, 0x27, 0x64, 0x38, 0x2a, - 0x7d, 0x29, 0xf0, 0xd5, 0xa5, 0xc3, 0x18, 0x1d, 0x25, 0xa4, 0x60, 0x07, 0x14, 0x71, 0xaf, 0x47, - 0x75, 0xcc, 0x70, 0xa7, 0x47, 0x44, 0x3d, 0x8b, 0x5b, 0xf5, 0x77, 0xc5, 0x18, 0xf6, 0x0b, 0x37, - 0x8e, 0x88, 0x47, 0xfb, 0xae, 0x4e, 0xbc, 0xc6, 0x4a, 0xe0, 0xab, 0xc5, 0xed, 0x08, 0x07, 0xc5, - 0x41, 0x6b, 0xdf, 0x2b, 0xa0, 0x28, 0xa3, 0xbe, 0x84, 0x01, 0x7f, 0x94, 0x1c, 0xf0, 0xff, 0x9f, - 0xa3, 0x5e, 0x53, 0xc6, 0x5b, 0x1f, 0xbb, 0x2d, 0x66, 0xfb, 0x10, 0xcc, 0x1b, 0xa2, 0x68, 0x5e, - 0x59, 0x11, 0xd0, 0xd7, 0xce, 0x01, 0x2d, 0xf7, 0xc7, 0x8a, 0x34, 0x30, 0x1f, 0x9e, 0x3d, 0x34, - 0x82, 0xaa, 0x7d, 0x33, 0x07, 0x96, 0x46, 0xa3, 0xd3, 0xc3, 0x9e, 0x77, 0x09, 0x0d, 0xfd, 0x01, - 0x28, 0x3a, 0x2e, 0x1d, 0x98, 0x9e, 0x49, 0x6d, 0xe2, 0xca, 0xb6, 0x5a, 0x93, 0x2a, 0xc5, 0x83, - 0x88, 0x85, 0xe2, 0x72, 0xb0, 0x07, 0x80, 0x83, 0x5d, 0x6c, 0x11, 0xc6, 0x53, 0x90, 0x13, 0x29, - 0xb8, 0xf7, 0xae, 0x14, 0xc4, 0xc3, 0xd2, 0x0e, 0xc6, 0xaa, 0xbb, 0x36, 0x73, 0x87, 0x91, 0x8b, - 0x11, 0x03, 0xc5, 0xf0, 0xe1, 0x09, 0x58, 0x76, 0x89, 0xde, 0xc3, 0xa6, 0x25, 0xb7, 0x45, 0x5e, - 0xb8, 0xb9, 0xcb, 0xb7, 0x05, 0x8a, 0x33, 0x4e, 0x7d, 0xf5, 0xd6, 0xe4, 0x1d, 0xae, 0x1d, 0x10, - 0xd7, 0x33, 0x3d, 0x46, 0x6c, 0x16, 0x36, 0x6c, 0x42, 0x07, 0x25, 0xb1, 0xf9, 0xec, 0x58, 0x7c, - 0x8f, 0x3e, 0x75, 0x98, 0x49, 0x6d, 0xaf, 0x5c, 0x88, 0x66, 0xa7, 0x15, 0xa3, 0xa3, 0x84, 0x14, - 0xdc, 0x03, 0xeb, 0xbc, 0xcd, 0x3f, 0x0f, 0x0d, 0xec, 0x7e, 0xe1, 0x60, 0x9b, 0xa7, 0xaa, 0x3c, - 0x27, 0x96, 0x62, 0x99, 0x2f, 0xdd, 0xed, 0x0c, 0x3e, 0xca, 0xd4, 0x82, 0x9f, 0x80, 0xd5, 0x70, - 0xeb, 0x36, 0x4c, 0xdb, 0x30, 0xed, 0x2e, 0xdf, 0xb9, 0xe5, 0x79, 0x11, 0xf4, 0xf5, 0xc0, 0x57, - 0x57, 0x9f, 0xa5, 0x99, 0xa7, 0x59, 0x44, 0x34, 0x09, 0x02, 0x5f, 0x82, 0x55, 0x61, 0x91, 0x18, - 0x72, 0x11, 0x98, 0xc4, 0x2b, 0x2f, 0x88, 0xfa, 0x6d, 0xc6, 0xeb, 0xc7, 0x53, 0xc7, 0x1b, 0x69, - 0xb4, 0x2e, 0xda, 0xa4, 0x47, 0x74, 0x46, 0xdd, 0x43, 0xe2, 0x5a, 0x8d, 0xff, 0xc9, 0x7a, 0xad, - 0x6e, 0xa7, 0xa1, 0xd0, 0x24, 0x7a, 0xe5, 0x01, 0x58, 0x49, 0x15, 0x1c, 0x96, 0x40, 0xee, 0x84, - 0x0c, 0xc3, 0x45, 0x87, 0xf8, 0x27, 0x5c, 0x07, 0x85, 0x01, 0xee, 0xf5, 0x49, 0xd8, 0x81, 0x28, - 0x3c, 0xdc, 0x9f, 0xbd, 0xa7, 0xd4, 0x7e, 0x56, 0x40, 0x29, 0xde, 0x3d, 0x97, 0xb0, 0x36, 0x5a, - 0xc9, 0xb5, 0xb1, 0x79, 0xde, 0xc6, 0x9e, 0xb2, 0x3b, 0xbe, 0x9b, 0x05, 0xa5, 0xb0, 0x38, 0xe1, - 0xad, 0x6f, 0x11, 0x9b, 0x5d, 0xc2, 0x68, 0xa3, 0xc4, 0x5d, 0x75, 0xeb, 0xec, 0x3d, 0x1e, 0x79, - 0x37, 0xed, 0xd2, 0x82, 0xcf, 0xc1, 0x9c, 0xc7, 0x30, 0xeb, 0xf3, 0x99, 0xe7, 0xa8, 0x5b, 0x17, - 0x42, 0x15, 0x9a, 0xd1, 0xa5, 0x15, 0x9e, 0x91, 0x44, 0xac, 0xfd, 0xa2, 0x80, 0xf5, 0xb4, 0xca, - 0x25, 0x14, 0xfb, 0xe3, 0x64, 0xb1, 0x6f, 0x5c, 0x24, 0xa2, 0x29, 0x05, 0xff, 0x4d, 0x01, 0xff, - 0x99, 0x08, 0x5e, 0x5c, 0x8f, 0x7c, 0x4f, 0x38, 0xa9, 0x6d, 0xb4, 0x1f, 0xdd, 0xf9, 0x62, 0x4f, - 0x1c, 0x64, 0xf0, 0x51, 0xa6, 0x16, 0x7c, 0x01, 0x4a, 0xa6, 0xdd, 0x33, 0x6d, 0x12, 0xd2, 0xda, - 0x51, 0xb9, 0x33, 0x87, 0x39, 0x8d, 0x2c, 0xca, 0xbc, 0x1e, 0xf8, 0x6a, 0xa9, 0x99, 0x42, 0x41, - 0x13, 0xb8, 0xb5, 0x5f, 0x33, 0xca, 0x23, 0xee, 0xc2, 0x1b, 0x60, 0x21, 0x7c, 0xbd, 0x12, 0x57, - 0x86, 0x31, 0x4e, 0xf7, 0xb6, 0xa4, 0xa3, 0xb1, 0x84, 0xe8, 0x20, 0x91, 0x0a, 0xe9, 0xe8, 0xc5, - 0x3a, 0x48, 0x68, 0xc6, 0x3a, 0x48, 0x9c, 0x91, 0x44, 0xe4, 0x9e, 0xf0, 0x07, 0x90, 0x48, 0x68, - 0x2e, 0xe9, 0xc9, 0xbe, 0xa4, 0xa3, 0xb1, 0x44, 0xed, 0xef, 0x5c, 0x46, 0x95, 0x44, 0x2b, 0xc6, - 0x42, 0x1a, 0x3d, 0xda, 0xd3, 0x21, 0x19, 0xe3, 0x90, 0x0c, 0xf8, 0xad, 0x02, 0x20, 0x1e, 0x43, - 0xb4, 0x46, 0xad, 0x1a, 0xf6, 0xd3, 0xe3, 0x8b, 0x4f, 0x88, 0xb6, 0x3d, 0x01, 0x16, 0xde, 0x93, - 0x15, 0xe9, 0x04, 0x9c, 0x14, 0x40, 0x19, 0x1e, 0x40, 0x13, 0x14, 0x43, 0xea, 0xae, 0xeb, 0x52, - 0x57, 0x8e, 0xec, 0xd5, 0xb3, 0x1d, 0x12, 0xe2, 0x8d, 0xaa, 0x78, 0xc8, 0x45, 0xfa, 0xa7, 0xbe, - 0x5a, 0x8c, 0xf1, 0x51, 0x1c, 0x9b, 0x9b, 0x32, 0x48, 0x64, 0x2a, 0xff, 0x2f, 0x4c, 0x3d, 0x24, - 0xd3, 0x4d, 0xc5, 0xb0, 0x2b, 0xbb, 0xe0, 0xbf, 0x53, 0x12, 0x74, 0xa1, 0x7b, 0xe5, 0x2b, 0x05, - 0xc4, 0x6d, 0xc0, 0x3d, 0x90, 0xe7, 0x3f, 0xd6, 0x72, 0xc3, 0x5c, 0x3f, 0xdf, 0x86, 0x39, 0x34, - 0x2d, 0x12, 0x2d, 0x4a, 0x7e, 0x42, 0x02, 0x05, 0x5e, 0x03, 0xf3, 0x16, 0xf1, 0x3c, 0xdc, 0x95, - 0x96, 0xa3, 0x57, 0x5f, 0x2b, 0x24, 0xa3, 0x11, 0xbf, 0x76, 0x17, 0xac, 0x65, 0xbc, 0xa3, 0xa1, - 0x0a, 0x0a, 0xba, 0xf8, 0xf3, 0xe3, 0x0e, 0x15, 0x1a, 0x8b, 0x7c, 0xcb, 0xec, 0x88, 0x1f, 0xbe, - 0x90, 0xde, 0xb8, 0xf9, 0xea, 0x6d, 0x75, 0xe6, 0xf5, 0xdb, 0xea, 0xcc, 0x9b, 0xb7, 0xd5, 0x99, - 0x2f, 0x83, 0xaa, 0xf2, 0x2a, 0xa8, 0x2a, 0xaf, 0x83, 0xaa, 0xf2, 0x26, 0xa8, 0x2a, 0xbf, 0x07, - 0x55, 0xe5, 0xeb, 0x3f, 0xaa, 0x33, 0xcf, 0xe7, 0x65, 0xbe, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, - 0x4b, 0x3f, 0x49, 0x6e, 0x6d, 0x11, 0x00, 0x00, + // 1508 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xbd, 0x6f, 0x1b, 0x47, + 0x16, 0xd7, 0x8a, 0xd4, 0xd7, 0x50, 0xb2, 0xa4, 0x91, 0x7c, 0xc7, 0x53, 0x41, 0x0a, 0x3c, 0xdc, + 0x59, 0x36, 0xec, 0xa5, 0x2d, 0xf8, 0x0c, 0xc3, 0x80, 0x0b, 0xad, 0xac, 0x3b, 0xcb, 0x96, 0x64, + 0xdd, 0x50, 0x30, 0x0e, 0xc6, 0x15, 0x19, 0xee, 0x3e, 0x51, 0x6b, 0x71, 0x77, 0xd6, 0x3b, 0x43, + 0xc5, 0xec, 0x92, 0x26, 0x75, 0x90, 0x22, 0x7d, 0x80, 0xfc, 0x0b, 0x09, 0x90, 0x34, 0x29, 0xe3, + 0x2a, 0x30, 0x52, 0xb9, 0x22, 0x62, 0xe6, 0x4f, 0x48, 0x27, 0xa4, 0x08, 0x66, 0x76, 0xc8, 0xfd, + 0x20, 0x69, 0x49, 0x29, 0xd4, 0x71, 0xde, 0xc7, 0xef, 0xbd, 0x79, 0xef, 0xcd, 0xef, 0x2d, 0xd1, + 0xe6, 0xf1, 0x7d, 0x6e, 0xba, 0xac, 0x7a, 0xdc, 0xaa, 0x43, 0xe8, 0x83, 0x00, 0x5e, 0x3d, 0x01, + 0xdf, 0x61, 0x61, 0x55, 0x2b, 0x68, 0xe0, 0x56, 0xb9, 0x60, 0x21, 0x6d, 0x40, 0xf5, 0xe4, 0x4e, + 0x1d, 0x04, 0xbd, 0x53, 0x6d, 0x80, 0x0f, 0x21, 0x15, 0xe0, 0x98, 0x41, 0xc8, 0x04, 0xc3, 0x2b, + 0x91, 0xad, 0x49, 0x03, 0xd7, 0xd4, 0xb6, 0xa6, 0xb6, 0x5d, 0xb9, 0xd5, 0x70, 0xc5, 0x51, 0xab, + 0x6e, 0xda, 0xcc, 0xab, 0x36, 0x58, 0x83, 0x55, 0x95, 0x4b, 0xbd, 0x75, 0xa8, 0x4e, 0xea, 0xa0, + 0x7e, 0x45, 0x50, 0x2b, 0x95, 0x44, 0x58, 0x9b, 0x85, 0x32, 0x66, 0x36, 0xdc, 0xca, 0xdd, 0xd8, + 0xc6, 0xa3, 0xf6, 0x91, 0xeb, 0x43, 0xd8, 0xae, 0x06, 0xc7, 0x0d, 0x29, 0xe0, 0x55, 0x0f, 0x04, + 0x1d, 0xe6, 0x55, 0x1d, 0xe5, 0x15, 0xb6, 0x7c, 0xe1, 0x7a, 0x30, 0xe0, 0x70, 0xef, 0x2c, 0x07, + 0x6e, 0x1f, 0x81, 0x47, 0xb3, 0x7e, 0x95, 0xef, 0x0d, 0x34, 0xb3, 0x59, 0xdb, 0x7e, 0x14, 0xba, + 0x27, 0x10, 0xe2, 0x8f, 0xd0, 0xb4, 0xcc, 0xc8, 0xa1, 0x82, 0x16, 0x8d, 0x55, 0x63, 0xad, 0xb0, + 0x7e, 0xdb, 0x8c, 0xcb, 0xd5, 0x07, 0x36, 0x83, 0xe3, 0x86, 0x14, 0x70, 0x53, 0x5a, 0x9b, 0x27, + 0x77, 0xcc, 0x67, 0xf5, 0x97, 0x60, 0x8b, 0x5d, 0x10, 0xd4, 0xc2, 0x6f, 0x3a, 0xe5, 0xb1, 0x6e, + 0xa7, 0x8c, 0x62, 0x19, 0xe9, 0xa3, 0xe2, 0xa7, 0x28, 0xcf, 0x03, 0xb0, 0x8b, 0xe3, 0x0a, 0xfd, + 0xba, 0x39, 0xba, 0x19, 0x66, 0x3f, 0xad, 0x5a, 0x00, 0xb6, 0x35, 0xab, 0x61, 0xf3, 0xf2, 0x44, + 0x14, 0x48, 0xe5, 0x3b, 0x03, 0xcd, 0xf5, 0xad, 0x76, 0x5c, 0x2e, 0xf0, 0xff, 0x07, 0x2e, 0x60, + 0x9e, 0xef, 0x02, 0xd2, 0x5b, 0xa5, 0xbf, 0xa0, 0xe3, 0x4c, 0xf7, 0x24, 0x89, 0xe4, 0x9f, 0xa0, + 0x09, 0x57, 0x80, 0xc7, 0x8b, 0xe3, 0xab, 0xb9, 0xb5, 0xc2, 0xfa, 0x3f, 0xce, 0x95, 0xbd, 0x35, + 0xa7, 0x11, 0x27, 0xb6, 0xa5, 0x2f, 0x89, 0x20, 0x2a, 0x5f, 0xe5, 0x13, 0xb9, 0xcb, 0x3b, 0xe1, + 0x07, 0xe8, 0x0a, 0x15, 0x82, 0xda, 0x47, 0x04, 0x5e, 0xb5, 0xdc, 0x10, 0x1c, 0x75, 0x83, 0x69, + 0x0b, 0x77, 0x3b, 0xe5, 0x2b, 0x1b, 0x29, 0x0d, 0xc9, 0x58, 0x4a, 0xdf, 0x80, 0x39, 0xdb, 0xfe, + 0x21, 0x7b, 0xe6, 0xef, 0xb2, 0x96, 0x2f, 0x54, 0x81, 0xb5, 0xef, 0x7e, 0x4a, 0x43, 0x32, 0x96, + 0xd8, 0x46, 0xcb, 0x27, 0xac, 0xd9, 0xf2, 0x60, 0xc7, 0x3d, 0x04, 0xbb, 0x6d, 0x37, 0x61, 0x97, + 0x39, 0xc0, 0x8b, 0xb9, 0xd5, 0xdc, 0xda, 0x8c, 0x55, 0xed, 0x76, 0xca, 0xcb, 0xcf, 0x87, 0xe8, + 0x4f, 0x3b, 0xe5, 0xa5, 0x21, 0x72, 0x32, 0x14, 0x0c, 0x3f, 0x44, 0xf3, 0xba, 0x42, 0x9b, 0x34, + 0xa0, 0xb6, 0x2b, 0xda, 0xc5, 0xbc, 0xca, 0x70, 0xa9, 0xdb, 0x29, 0xcf, 0xd7, 0xd2, 0x2a, 0x92, + 0xb5, 0xc5, 0x8f, 0xd1, 0xdc, 0x21, 0xff, 0x4f, 0xc8, 0x5a, 0xc1, 0x3e, 0x6b, 0xba, 0x76, 0xbb, + 0x38, 0xb1, 0x6a, 0xac, 0xcd, 0x58, 0x95, 0x6e, 0xa7, 0x3c, 0xf7, 0xef, 0x5a, 0x42, 0x71, 0x9a, + 0x15, 0x90, 0xb4, 0x23, 0x06, 0x34, 0x27, 0xd8, 0x31, 0xf8, 0xb2, 0x74, 0xc0, 0x05, 0x2f, 0x4e, + 0xaa, 0x5e, 0xae, 0x7d, 0xa8, 0x97, 0x07, 0x09, 0x07, 0xeb, 0xaa, 0x6e, 0xe7, 0x5c, 0x52, 0xca, + 0x49, 0x1a, 0x15, 0x6f, 0xa2, 0xc5, 0x30, 0x6a, 0x0e, 0x27, 0x10, 0xb4, 0xea, 0x4d, 0x97, 0x1f, + 0x15, 0xa7, 0xd4, 0x8d, 0xaf, 0x76, 0x3b, 0xe5, 0x45, 0x92, 0x55, 0x92, 0x41, 0xfb, 0xca, 0xb7, + 0x06, 0x9a, 0xda, 0xac, 0x6d, 0xef, 0x31, 0x07, 0x2e, 0xe1, 0x69, 0x6e, 0xa7, 0x9e, 0xe6, 0xb5, + 0x33, 0x86, 0x5b, 0x26, 0x35, 0xf2, 0x61, 0xfe, 0x16, 0x3d, 0x4c, 0x69, 0xa3, 0x99, 0x65, 0x15, + 0xe5, 0x7d, 0xea, 0x81, 0x4a, 0x7d, 0x26, 0xf6, 0xd9, 0xa3, 0x1e, 0x10, 0xa5, 0xc1, 0xff, 0x44, + 0x93, 0x3e, 0x73, 0x60, 0xfb, 0x91, 0x4a, 0x60, 0xc6, 0xba, 0xa2, 0x6d, 0x26, 0xf7, 0x94, 0x94, + 0x68, 0x2d, 0xbe, 0x8b, 0x66, 0x05, 0x0b, 0x58, 0x93, 0x35, 0xda, 0x4f, 0xa1, 0xdd, 0x1b, 0xd3, + 0x85, 0x6e, 0xa7, 0x3c, 0x7b, 0x90, 0x90, 0x93, 0x94, 0x15, 0xae, 0xa3, 0x02, 0x6d, 0x36, 0x99, + 0x4d, 0x05, 0xad, 0x37, 0x41, 0xcd, 0x5e, 0x61, 0xbd, 0xfa, 0xa1, 0x3b, 0x46, 0xb3, 0x2d, 0x83, + 0x13, 0xe0, 0xac, 0x15, 0xda, 0xc0, 0xad, 0xf9, 0x6e, 0xa7, 0x5c, 0xd8, 0x88, 0x71, 0x48, 0x12, + 0xb4, 0xf2, 0x8d, 0x81, 0x0a, 0xfa, 0xd6, 0x97, 0x40, 0x46, 0x8f, 0xd3, 0x64, 0xf4, 0xf7, 0x73, + 0xf4, 0x6b, 0x04, 0x15, 0xd9, 0xfd, 0xb4, 0x15, 0x0f, 0x1d, 0xa0, 0x29, 0x47, 0x35, 0x8d, 0x17, + 0x0d, 0x05, 0x7d, 0xfd, 0x1c, 0xd0, 0x9a, 0xeb, 0xe6, 0x75, 0x80, 0xa9, 0xe8, 0xcc, 0x49, 0x0f, + 0xaa, 0xf2, 0xc5, 0x24, 0x9a, 0xed, 0x3d, 0xf3, 0x26, 0xe5, 0xfc, 0x12, 0x06, 0xfa, 0x5f, 0xa8, + 0x10, 0x84, 0xec, 0xc4, 0xe5, 0x2e, 0xf3, 0x21, 0xd4, 0x63, 0xb5, 0xa4, 0x5d, 0x0a, 0xfb, 0xb1, + 0x8a, 0x24, 0xed, 0x70, 0x13, 0xa1, 0x80, 0x86, 0xd4, 0x03, 0x21, 0x4b, 0x90, 0x53, 0x25, 0xb8, + 0xff, 0xa1, 0x12, 0x24, 0xaf, 0x65, 0xee, 0xf7, 0x5d, 0xb7, 0x7c, 0x11, 0xb6, 0xe3, 0x14, 0x63, + 0x05, 0x49, 0xe0, 0xe3, 0x63, 0x34, 0x17, 0x82, 0xdd, 0xa4, 0xae, 0xa7, 0x99, 0x2d, 0xaf, 0xd2, + 0xdc, 0x92, 0x0c, 0x43, 0x92, 0x8a, 0xd3, 0x4e, 0xf9, 0xf6, 0xe0, 0xf7, 0x86, 0xb9, 0x0f, 0x21, + 0x77, 0xb9, 0x00, 0x5f, 0x44, 0x03, 0x9b, 0xf2, 0x21, 0x69, 0x6c, 0xf9, 0x76, 0x3c, 0xc9, 0xf9, + 0xcf, 0x02, 0xe1, 0x32, 0x9f, 0x17, 0x27, 0xe2, 0xb7, 0xb3, 0x9b, 0x90, 0x93, 0x94, 0x15, 0xde, + 0x41, 0xcb, 0x72, 0xcc, 0x3f, 0x8e, 0x02, 0x6c, 0xbd, 0x0e, 0xa8, 0x2f, 0x4b, 0x55, 0x9c, 0x54, + 0x74, 0x56, 0x94, 0x0b, 0x62, 0x63, 0x88, 0x9e, 0x0c, 0xf5, 0xc2, 0xff, 0x43, 0x8b, 0xd1, 0x86, + 0xb0, 0x5c, 0xdf, 0x71, 0xfd, 0x86, 0xdc, 0x0f, 0x8a, 0x19, 0x67, 0xac, 0x1b, 0x92, 0x19, 0x9f, + 0x67, 0x95, 0xa7, 0xc3, 0x84, 0x64, 0x10, 0x04, 0xbf, 0x42, 0x8b, 0x2a, 0x22, 0x38, 0x9a, 0x08, + 0x5c, 0xe0, 0xc5, 0xe9, 0x41, 0x7a, 0x97, 0xa5, 0x93, 0x83, 0xd4, 0xa3, 0x8b, 0x1a, 0x34, 0xc1, + 0x16, 0x2c, 0x3c, 0x80, 0xd0, 0xb3, 0xfe, 0xa6, 0xfb, 0xb5, 0xb8, 0x91, 0x85, 0x22, 0x83, 0xe8, + 0x2b, 0x0f, 0xd1, 0x7c, 0xa6, 0xe1, 0x78, 0x01, 0xe5, 0x8e, 0xa1, 0x1d, 0x11, 0x1d, 0x91, 0x3f, + 0xf1, 0x32, 0x9a, 0x38, 0xa1, 0xcd, 0x16, 0x44, 0x13, 0x48, 0xa2, 0xc3, 0x83, 0xf1, 0xfb, 0x46, + 0xe5, 0x07, 0x03, 0x2d, 0x24, 0xa7, 0xe7, 0x12, 0x68, 0x63, 0x37, 0x4d, 0x1b, 0x6b, 0xe7, 0x1d, + 0xec, 0x11, 0xdc, 0xf1, 0xa9, 0x81, 0x66, 0x93, 0x8b, 0x10, 0xdf, 0x44, 0xd3, 0xb4, 0xe5, 0xb8, + 0xe0, 0xdb, 0x3d, 0xb2, 0xef, 0x67, 0xb3, 0xa1, 0xe5, 0xa4, 0x6f, 0x21, 0xd7, 0x24, 0xbc, 0x0e, + 0xdc, 0x90, 0xca, 0x49, 0xab, 0x81, 0xcd, 0x7c, 0x87, 0xab, 0x32, 0xe5, 0xa2, 0x35, 0xb9, 0x95, + 0x55, 0x92, 0x41, 0xfb, 0xca, 0xd7, 0xe3, 0x68, 0x21, 0x1a, 0x90, 0xe8, 0x2b, 0xc9, 0x03, 0x5f, + 0x5c, 0x02, 0xbd, 0x90, 0xd4, 0xbe, 0xbc, 0x7d, 0xf6, 0x2e, 0x89, 0xb3, 0x1b, 0xb5, 0x38, 0xf1, + 0x0b, 0x34, 0xc9, 0x05, 0x15, 0x2d, 0xc9, 0x3b, 0x12, 0x75, 0xfd, 0x42, 0xa8, 0xca, 0x33, 0x5e, + 0x9c, 0xd1, 0x99, 0x68, 0xc4, 0xca, 0x8f, 0x06, 0x5a, 0xce, 0xba, 0x5c, 0xc2, 0xc0, 0xfd, 0x37, + 0x3d, 0x70, 0x37, 0x2f, 0x72, 0xa3, 0x11, 0x43, 0xf7, 0xb3, 0x81, 0xfe, 0x32, 0x70, 0x79, 0xb5, + 0xa2, 0x25, 0x57, 0x05, 0x19, 0x46, 0xdc, 0x8b, 0xbf, 0x3b, 0x14, 0x57, 0xed, 0x0f, 0xd1, 0x93, + 0xa1, 0x5e, 0xf8, 0x25, 0x5a, 0x70, 0xfd, 0xa6, 0xeb, 0x43, 0x24, 0xab, 0xc5, 0xed, 0x1e, 0x4a, + 0x28, 0x59, 0x64, 0xd5, 0xe6, 0xe5, 0x6e, 0xa7, 0xbc, 0xb0, 0x9d, 0x41, 0x21, 0x03, 0xb8, 0x95, + 0x9f, 0x86, 0xb4, 0x47, 0xed, 0x63, 0xf9, 0xa2, 0x94, 0x04, 0xc2, 0x81, 0x17, 0xa5, 0xe5, 0xa4, + 0x6f, 0xa1, 0x26, 0x48, 0x95, 0x42, 0x27, 0x7a, 0xb1, 0x09, 0x52, 0x9e, 0x89, 0x09, 0x52, 0x67, + 0xa2, 0x11, 0x65, 0x26, 0xf2, 0x23, 0x4c, 0x15, 0x34, 0x97, 0xce, 0x64, 0x4f, 0xcb, 0x49, 0xdf, + 0xa2, 0xf2, 0x7b, 0x6e, 0x48, 0x97, 0xd4, 0x28, 0x26, 0xae, 0xd4, 0xfb, 0x93, 0x93, 0xbd, 0x92, + 0xd3, 0xbf, 0x92, 0x83, 0xbf, 0x34, 0x10, 0xa6, 0x7d, 0x88, 0xdd, 0xde, 0xa8, 0x46, 0xf3, 0xf4, + 0xe4, 0xe2, 0x2f, 0xc4, 0xdc, 0x18, 0x00, 0x8b, 0x76, 0xf5, 0x8a, 0x4e, 0x02, 0x0f, 0x1a, 0x90, + 0x21, 0x19, 0x60, 0x17, 0x15, 0x22, 0xe9, 0x56, 0x18, 0xb2, 0x50, 0x3f, 0xd9, 0x6b, 0x67, 0x27, + 0xa4, 0xcc, 0xad, 0x92, 0xfa, 0x98, 0x8c, 0xfd, 0x4f, 0x3b, 0xe5, 0x42, 0x42, 0x4f, 0x92, 0xd8, + 0x32, 0x94, 0x03, 0x71, 0xa8, 0xfc, 0x9f, 0x08, 0xf5, 0x08, 0x46, 0x87, 0x4a, 0x60, 0xaf, 0x6c, + 0xa1, 0xbf, 0x8e, 0x28, 0xd0, 0x85, 0x76, 0xdb, 0x67, 0x06, 0x4a, 0xc6, 0xc0, 0x3b, 0x28, 0x2f, + 0x5c, 0xfd, 0x12, 0x0b, 0xeb, 0x37, 0xce, 0xc7, 0x30, 0x07, 0xae, 0x07, 0x31, 0x51, 0xca, 0x13, + 0x51, 0x28, 0xf8, 0x3a, 0x9a, 0xf2, 0x80, 0x73, 0xda, 0xd0, 0x91, 0xe3, 0x2f, 0xcf, 0xdd, 0x48, + 0x4c, 0x7a, 0xfa, 0xca, 0x3d, 0xb4, 0x34, 0xe4, 0x5b, 0x1e, 0x97, 0xd1, 0x84, 0xad, 0xfe, 0x29, + 0xcb, 0x84, 0x26, 0xac, 0x19, 0xc9, 0x32, 0x9b, 0xea, 0x0f, 0x72, 0x24, 0xb7, 0x6e, 0xbd, 0x79, + 0x5f, 0x1a, 0x7b, 0xfb, 0xbe, 0x34, 0xf6, 0xee, 0x7d, 0x69, 0xec, 0x93, 0x6e, 0xc9, 0x78, 0xd3, + 0x2d, 0x19, 0x6f, 0xbb, 0x25, 0xe3, 0x5d, 0xb7, 0x64, 0xfc, 0xd2, 0x2d, 0x19, 0x9f, 0xff, 0x5a, + 0x1a, 0x7b, 0x31, 0xa5, 0xeb, 0xfd, 0x47, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6a, 0xb1, 0x9d, 0x65, + 0x9d, 0x12, 0x00, 0x00, } func (m *CSIDriver) Marshal() (dAtA []byte, err error) { @@ -721,6 +757,30 @@ func (m *CSIDriverSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.RequiresRepublish != nil { + i-- + if *m.RequiresRepublish { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if len(m.TokenRequests) > 0 { + for iNdEx := len(m.TokenRequests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TokenRequests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } if m.FSGroupPolicy != nil { i -= len(*m.FSGroupPolicy) copy(dAtA[i:], *m.FSGroupPolicy) @@ -1107,6 +1167,39 @@ func (m *StorageClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *TokenRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ExpirationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Audience) + copy(dAtA[i:], m.Audience) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audience))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1503,6 +1596,15 @@ func (m *CSIDriverSpec) Size() (n int) { l = len(*m.FSGroupPolicy) n += 1 + l + sovGenerated(uint64(l)) } + if len(m.TokenRequests) > 0 { + for _, e := range m.TokenRequests { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.RequiresRepublish != nil { + n += 2 + } return n } @@ -1635,6 +1737,20 @@ func (m *StorageClassList) Size() (n int) { return n } +func (m *TokenRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Audience) + n += 1 + l + sovGenerated(uint64(l)) + if m.ExpirationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ExpirationSeconds)) + } + return n +} + func (m *VolumeAttachment) Size() (n int) { if m == nil { return 0 @@ -1787,12 +1903,19 @@ func (this *CSIDriverSpec) String() string { if this == nil { return "nil" } + repeatedStringForTokenRequests := "[]TokenRequest{" + for _, f := range this.TokenRequests { + repeatedStringForTokenRequests += strings.Replace(strings.Replace(f.String(), "TokenRequest", "TokenRequest", 1), `&`, ``, 1) + "," + } + repeatedStringForTokenRequests += "}" s := strings.Join([]string{`&CSIDriverSpec{`, `AttachRequired:` + valueToStringGenerated(this.AttachRequired) + `,`, `PodInfoOnMount:` + valueToStringGenerated(this.PodInfoOnMount) + `,`, `VolumeLifecycleModes:` + fmt.Sprintf("%v", this.VolumeLifecycleModes) + `,`, `StorageCapacity:` + valueToStringGenerated(this.StorageCapacity) + `,`, `FSGroupPolicy:` + valueToStringGenerated(this.FSGroupPolicy) + `,`, + `TokenRequests:` + repeatedStringForTokenRequests + `,`, + `RequiresRepublish:` + valueToStringGenerated(this.RequiresRepublish) + `,`, `}`, }, "") return s @@ -1900,6 +2023,17 @@ func (this *StorageClassList) String() string { }, "") return s } +func (this *TokenRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenRequest{`, + `Audience:` + fmt.Sprintf("%v", this.Audience) + `,`, + `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, + `}`, + }, "") + return s +} func (this *VolumeAttachment) String() string { if this == nil { return "nil" @@ -2399,6 +2533,61 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { s := FSGroupPolicy(dAtA[iNdEx:postIndex]) m.FSGroupPolicy = &s iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TokenRequests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TokenRequests = append(m.TokenRequests, TokenRequest{}) + if err := m.TokenRequests[len(m.TokenRequests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiresRepublish", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.RequiresRepublish = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3452,6 +3641,111 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } return nil } +func (m *TokenRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audience", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audience = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpirationSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExpirationSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto index e61876ed5..1eb4e1f88 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.storage.v1beta1; @@ -147,6 +147,43 @@ message CSIDriverSpec { // that enable the CSIVolumeFSGroupPolicy feature gate. // +optional optional string fsGroupPolicy = 5; + + // TokenRequests indicates the CSI driver needs pods' service account + // tokens it is mounting volume for to do necessary authentication. Kubelet + // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. + // The CSI driver should parse and validate the following VolumeContext: + // "csi.storage.k8s.io/serviceAccount.tokens": { + // "": { + // "token": , + // "expirationTimestamp": , + // }, + // ... + // } + // + // Note: Audience in each TokenRequest should be different and at + // most one token is empty string. To receive a new token after expiry, + // RequiresRepublish can be used to trigger NodePublishVolume periodically. + // + // This is an alpha feature and only available when the + // CSIServiceAccountToken feature is enabled. + // + // +optional + // +listType=atomic + repeated TokenRequest tokenRequests = 6; + + // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // being periodically called to reflect any possible change in the mounted + // volume. This field defaults to false. + // + // Note: After a successful initial NodePublishVolume call, subsequent calls + // to NodePublishVolume should only update the contents of the volume. New + // mount points will not be seen by a running container. + // + // This is an alpha feature and only available when the + // CSIServiceAccountToken feature is enabled. + // + // +optional + optional bool requiresRepublish = 7; } // DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode. @@ -283,6 +320,19 @@ message StorageClassList { repeated StorageClass items = 2; } +// TokenRequest contains parameters of a service account token. +message TokenRequest { + // Audience is the intended audience of the token in "TokenRequestSpec". + // It will default to the audiences of kube apiserver. + optional string audience = 1; + + // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec" + // + // +optional + optional int64 expirationSeconds = 2; +} + // VolumeAttachment captures the intent to attach or detach the specified volume // to/from the specified node. // diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go index 7946663a3..10df2baa7 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -364,6 +364,43 @@ type CSIDriverSpec struct { // that enable the CSIVolumeFSGroupPolicy feature gate. // +optional FSGroupPolicy *FSGroupPolicy `json:"fsGroupPolicy,omitempty" protobuf:"bytes,5,opt,name=fsGroupPolicy"` + + // TokenRequests indicates the CSI driver needs pods' service account + // tokens it is mounting volume for to do necessary authentication. Kubelet + // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. + // The CSI driver should parse and validate the following VolumeContext: + // "csi.storage.k8s.io/serviceAccount.tokens": { + // "": { + // "token": , + // "expirationTimestamp": , + // }, + // ... + // } + // + // Note: Audience in each TokenRequest should be different and at + // most one token is empty string. To receive a new token after expiry, + // RequiresRepublish can be used to trigger NodePublishVolume periodically. + // + // This is an alpha feature and only available when the + // CSIServiceAccountToken feature is enabled. + // + // +optional + // +listType=atomic + TokenRequests []TokenRequest `json:"tokenRequests,omitempty" protobuf:"bytes,6,opt,name=tokenRequests"` + + // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // being periodically called to reflect any possible change in the mounted + // volume. This field defaults to false. + // + // Note: After a successful initial NodePublishVolume call, subsequent calls + // to NodePublishVolume should only update the contents of the volume. New + // mount points will not be seen by a running container. + // + // This is an alpha feature and only available when the + // CSIServiceAccountToken feature is enabled. + // + // +optional + RequiresRepublish *bool `json:"requiresRepublish,omitempty" protobuf:"varint,7,opt,name=requiresRepublish"` } // FSGroupPolicy specifies if a CSI Driver supports modifying @@ -395,6 +432,20 @@ const ( // provided by a CSI driver. More modes may be added in the future. type VolumeLifecycleMode string +// TokenRequest contains parameters of a service account token. +type TokenRequest struct { + // Audience is the intended audience of the token in "TokenRequestSpec". + // It will default to the audiences of kube apiserver. + // + Audience string `json:"audience" protobuf:"bytes,1,opt,name=audience"` + + // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec" + // + // +optional + ExpirationSeconds *int64 `json:"expirationSeconds,omitempty" protobuf:"varint,2,opt,name=expirationSeconds"` +} + const ( // VolumeLifecyclePersistent explicitly confirms that the driver implements // the full CSI spec. It is the default when CSIDriverSpec.VolumeLifecycleModes is not diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index 60cc4c6a4..c51950d7c 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -54,6 +54,8 @@ var map_CSIDriverSpec = map[string]string{ "volumeLifecycleModes": "VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.", "storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis is an alpha field and only available when the CSIStorageCapacity feature is enabled. The default is false.", "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is alpha-level, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate.", + "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\n\nThis is an alpha feature and only available when the CSIServiceAccountToken feature is enabled.", + "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.\n\nThis is an alpha feature and only available when the CSIServiceAccountToken feature is enabled.", } func (CSIDriverSpec) SwaggerDoc() map[string]string { @@ -127,6 +129,16 @@ func (StorageClassList) SwaggerDoc() map[string]string { return map_StorageClassList } +var map_TokenRequest = map[string]string{ + "": "TokenRequest contains parameters of a service account token.", + "audience": "Audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", + "expirationSeconds": "ExpirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\"", +} + +func (TokenRequest) SwaggerDoc() map[string]string { + return map_TokenRequest +} + var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go index a1538c131..89a102901 100644 --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go @@ -113,6 +113,18 @@ func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) { *out = new(FSGroupPolicy) **out = **in } + if in.TokenRequests != nil { + in, out := &in.TokenRequests, &out.TokenRequests + *out = make([]TokenRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequiresRepublish != nil { + in, out := &in.RequiresRepublish, &out.RequiresRepublish + *out = new(bool) + **out = **in + } return } @@ -328,6 +340,27 @@ func (in *StorageClassList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenRequest) DeepCopyInto(out *TokenRequest) { + *out = *in + if in.ExpirationSeconds != nil { + in, out := &in.ExpirationSeconds, &out.ExpirationSeconds + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequest. +func (in *TokenRequest) DeepCopy() *TokenRequest { + if in == nil { + return nil + } + out := new(TokenRequest) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) { *out = *in diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go b/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go index 934790dcb..343a6f550 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go @@ -51,6 +51,7 @@ func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Cond existingCondition.Reason = newCondition.Reason existingCondition.Message = newCondition.Message + existingCondition.ObservedGeneration = newCondition.ObservedGeneration } // RemoveStatusCondition removes the corresponding conditionType from conditions. diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go index 9ca34c9fa..6a4116a04 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go @@ -40,8 +40,6 @@ func CommonAccessor(obj interface{}) (metav1.Common, error) { switch t := obj.(type) { case List: return t, nil - case metav1.ListInterface: - return t, nil case ListMetaAccessor: if m := t.GetListMeta(); m != nil { return m, nil @@ -72,8 +70,6 @@ func ListAccessor(obj interface{}) (List, error) { switch t := obj.(type) { case List: return t, nil - case metav1.ListInterface: - return t, nil case ListMetaAccessor: if m := t.GetListMeta(); m != nil { return m, nil diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go index 41b60d731..00bd86f51 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go @@ -65,6 +65,9 @@ type DefaultRESTMapper struct { } func (m *DefaultRESTMapper) String() string { + if m == nil { + return "" + } return fmt.Sprintf("DefaultRESTMapper{kindToPluralResource=%v}", m.kindToPluralResource) } diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto index 18a6c7cd6..472104d54 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.apimachinery.pkg.api.resource; diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index d95e03aa9..8d718945d 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -20,6 +20,7 @@ import ( "bytes" "errors" "fmt" + "math" "math/big" "strconv" "strings" @@ -120,7 +121,7 @@ const ( ) // MustParse turns the given string into a quantity or panics; for tests -// or others cases where you know the string is valid. +// or other cases where you know the string is valid. func MustParse(str string) Quantity { q, err := ParseQuantity(str) if err != nil { @@ -442,6 +443,36 @@ func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) { } } +// AsApproximateFloat64 returns a float64 representation of the quantity which may +// lose precision. If the value of the quantity is outside the range of a float64 +// +Inf/-Inf will be returned. +func (q *Quantity) AsApproximateFloat64() float64 { + var base float64 + var exponent int + if q.d.Dec != nil { + base, _ = big.NewFloat(0).SetInt(q.d.Dec.UnscaledBig()).Float64() + exponent = int(-q.d.Dec.Scale()) + } else { + base = float64(q.i.value) + exponent = int(q.i.scale) + } + if exponent == 0 { + return base + } + + // multiply by the appropriate exponential scale + switch q.Format { + case DecimalExponent, DecimalSI: + return base * math.Pow10(exponent) + default: + // fast path for exponents that can fit in 64 bits + if exponent > 0 && exponent < 7 { + return base * float64(int64(1)<<(exponent*10)) + } + return base * math.Pow(2, float64(exponent*10)) + } +} + // AsInt64 returns a representation of the current value as an int64 if a fast conversion // is possible. If false is returned, callers must use the inf.Dec form of this quantity. func (q *Quantity) AsInt64() (int64, bool) { @@ -598,6 +629,9 @@ const int64QuantityExpectedBytes = 18 // String is an expensive operation and caching this result significantly reduces the cost of // normal parse / marshal operations on Quantity. func (q *Quantity) String() string { + if q == nil { + return "" + } if len(q.s) == 0 { result := make([]byte, 0, int64QuantityExpectedBytes) number, suffix := q.CanonicalizeBytes(result) diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go b/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go new file mode 100644 index 000000000..9f20152e4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package validation contains generic api type validation functions. +package validation // import "k8s.io/apimachinery/pkg/api/validation" diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go b/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go new file mode 100644 index 000000000..947c96f43 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go @@ -0,0 +1,86 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "strings" + + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// IsNegativeErrorMsg is a error message for value must be greater than or equal to 0. +const IsNegativeErrorMsg string = `must be greater than or equal to 0` + +// ValidateNameFunc validates that the provided name is valid for a given resource type. +// Not all resources have the same validation rules for names. Prefix is true +// if the name will have a value appended to it. If the name is not valid, +// this returns a list of descriptions of individual characteristics of the +// value that were not valid. Otherwise this returns an empty list or nil. +type ValidateNameFunc func(name string, prefix bool) []string + +// NameIsDNSSubdomain is a ValidateNameFunc for names that must be a DNS subdomain. +func NameIsDNSSubdomain(name string, prefix bool) []string { + if prefix { + name = maskTrailingDash(name) + } + return validation.IsDNS1123Subdomain(name) +} + +// NameIsDNSLabel is a ValidateNameFunc for names that must be a DNS 1123 label. +func NameIsDNSLabel(name string, prefix bool) []string { + if prefix { + name = maskTrailingDash(name) + } + return validation.IsDNS1123Label(name) +} + +// NameIsDNS1035Label is a ValidateNameFunc for names that must be a DNS 952 label. +func NameIsDNS1035Label(name string, prefix bool) []string { + if prefix { + name = maskTrailingDash(name) + } + return validation.IsDNS1035Label(name) +} + +// ValidateNamespaceName can be used to check whether the given namespace name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateNamespaceName = NameIsDNSLabel + +// ValidateServiceAccountName can be used to check whether the given service account name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateServiceAccountName = NameIsDNSSubdomain + +// maskTrailingDash replaces the final character of a string with a subdomain safe +// value if is a dash. +func maskTrailingDash(name string) string { + if strings.HasSuffix(name, "-") { + return name[:len(name)-2] + "a" + } + return name +} + +// ValidateNonnegativeField validates that given value is not negative. +func ValidateNonnegativeField(value int64, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if value < 0 { + allErrs = append(allErrs, field.Invalid(fldPath, value, IsNegativeErrorMsg)) + } + return allErrs +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go b/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go new file mode 100644 index 000000000..889ec69aa --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go @@ -0,0 +1,268 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "strings" + + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// FieldImmutableErrorMsg is a error message for field is immutable. +const FieldImmutableErrorMsg string = `field is immutable` + +const totalAnnotationSizeLimitB int = 256 * (1 << 10) // 256 kB + +// BannedOwners is a black list of object that are not allowed to be owners. +var BannedOwners = map[schema.GroupVersionKind]struct{}{ + {Group: "", Version: "v1", Kind: "Event"}: {}, +} + +// ValidateClusterName can be used to check whether the given cluster name is valid. +var ValidateClusterName = NameIsDNS1035Label + +// ValidateAnnotations validates that a set of annotations are correctly defined. +func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + var totalSize int64 + for k, v := range annotations { + for _, msg := range validation.IsQualifiedName(strings.ToLower(k)) { + allErrs = append(allErrs, field.Invalid(fldPath, k, msg)) + } + totalSize += (int64)(len(k)) + (int64)(len(v)) + } + if totalSize > (int64)(totalAnnotationSizeLimitB) { + allErrs = append(allErrs, field.TooLong(fldPath, "", totalAnnotationSizeLimitB)) + } + return allErrs +} + +func validateOwnerReference(ownerReference metav1.OwnerReference, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + gvk := schema.FromAPIVersionAndKind(ownerReference.APIVersion, ownerReference.Kind) + // gvk.Group is empty for the legacy group. + if len(gvk.Version) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("apiVersion"), ownerReference.APIVersion, "version must not be empty")) + } + if len(gvk.Kind) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("kind"), ownerReference.Kind, "kind must not be empty")) + } + if len(ownerReference.Name) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), ownerReference.Name, "name must not be empty")) + } + if len(ownerReference.UID) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("uid"), ownerReference.UID, "uid must not be empty")) + } + if _, ok := BannedOwners[gvk]; ok { + allErrs = append(allErrs, field.Invalid(fldPath, ownerReference, fmt.Sprintf("%s is disallowed from being an owner", gvk))) + } + return allErrs +} + +// ValidateOwnerReferences validates that a set of owner references are correctly defined. +func ValidateOwnerReferences(ownerReferences []metav1.OwnerReference, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + controllerName := "" + for _, ref := range ownerReferences { + allErrs = append(allErrs, validateOwnerReference(ref, fldPath)...) + if ref.Controller != nil && *ref.Controller { + if controllerName != "" { + allErrs = append(allErrs, field.Invalid(fldPath, ownerReferences, + fmt.Sprintf("Only one reference can have Controller set to true. Found \"true\" in references for %v and %v", controllerName, ref.Name))) + } else { + controllerName = ref.Name + } + } + } + return allErrs +} + +// ValidateFinalizerName validates finalizer names. +func ValidateFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsQualifiedName(stringValue) { + allErrs = append(allErrs, field.Invalid(fldPath, stringValue, msg)) + } + + return allErrs +} + +// ValidateNoNewFinalizers validates the new finalizers has no new finalizers compare to old finalizers. +func ValidateNoNewFinalizers(newFinalizers []string, oldFinalizers []string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + extra := sets.NewString(newFinalizers...).Difference(sets.NewString(oldFinalizers...)) + if len(extra) != 0 { + allErrs = append(allErrs, field.Forbidden(fldPath, fmt.Sprintf("no new finalizers can be added if the object is being deleted, found new finalizers %#v", extra.List()))) + } + return allErrs +} + +// ValidateImmutableField validates the new value and the old value are deeply equal. +func ValidateImmutableField(newVal, oldVal interface{}, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if !apiequality.Semantic.DeepEqual(oldVal, newVal) { + allErrs = append(allErrs, field.Invalid(fldPath, newVal, FieldImmutableErrorMsg)) + } + return allErrs +} + +// ValidateObjectMeta validates an object's metadata on creation. It expects that name generation has already +// been performed. +// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before. +func ValidateObjectMeta(objMeta *metav1.ObjectMeta, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList { + metadata, err := meta.Accessor(objMeta) + if err != nil { + allErrs := field.ErrorList{} + allErrs = append(allErrs, field.Invalid(fldPath, objMeta, err.Error())) + return allErrs + } + return ValidateObjectMetaAccessor(metadata, requiresNamespace, nameFn, fldPath) +} + +// ValidateObjectMetaAccessor validates an object's metadata on creation. It expects that name generation has already +// been performed. +// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before. +func ValidateObjectMetaAccessor(meta metav1.Object, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(meta.GetGenerateName()) != 0 { + for _, msg := range nameFn(meta.GetGenerateName(), true) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("generateName"), meta.GetGenerateName(), msg)) + } + } + // If the generated name validates, but the calculated value does not, it's a problem with generation, and we + // report it here. This may confuse users, but indicates a programming bug and still must be validated. + // If there are multiple fields out of which one is required then add an or as a separator + if len(meta.GetName()) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "name or generateName is required")) + } else { + for _, msg := range nameFn(meta.GetName(), false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), meta.GetName(), msg)) + } + } + if requiresNamespace { + if len(meta.GetNamespace()) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("namespace"), "")) + } else { + for _, msg := range ValidateNamespaceName(meta.GetNamespace(), false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), meta.GetNamespace(), msg)) + } + } + } else { + if len(meta.GetNamespace()) != 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("namespace"), "not allowed on this type")) + } + } + if len(meta.GetClusterName()) != 0 { + for _, msg := range ValidateClusterName(meta.GetClusterName(), false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("clusterName"), meta.GetClusterName(), msg)) + } + } + for _, entry := range meta.GetManagedFields() { + allErrs = append(allErrs, v1validation.ValidateFieldManager(entry.Manager, fldPath.Child("fieldManager"))...) + } + allErrs = append(allErrs, ValidateNonnegativeField(meta.GetGeneration(), fldPath.Child("generation"))...) + allErrs = append(allErrs, v1validation.ValidateLabels(meta.GetLabels(), fldPath.Child("labels"))...) + allErrs = append(allErrs, ValidateAnnotations(meta.GetAnnotations(), fldPath.Child("annotations"))...) + allErrs = append(allErrs, ValidateOwnerReferences(meta.GetOwnerReferences(), fldPath.Child("ownerReferences"))...) + allErrs = append(allErrs, ValidateFinalizers(meta.GetFinalizers(), fldPath.Child("finalizers"))...) + allErrs = append(allErrs, v1validation.ValidateManagedFields(meta.GetManagedFields(), fldPath.Child("managedFields"))...) + return allErrs +} + +// ValidateFinalizers tests if the finalizers name are valid, and if there are conflicting finalizers. +func ValidateFinalizers(finalizers []string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + hasFinalizerOrphanDependents := false + hasFinalizerDeleteDependents := false + for _, finalizer := range finalizers { + allErrs = append(allErrs, ValidateFinalizerName(finalizer, fldPath)...) + if finalizer == metav1.FinalizerOrphanDependents { + hasFinalizerOrphanDependents = true + } + if finalizer == metav1.FinalizerDeleteDependents { + hasFinalizerDeleteDependents = true + } + } + if hasFinalizerDeleteDependents && hasFinalizerOrphanDependents { + allErrs = append(allErrs, field.Invalid(fldPath, finalizers, fmt.Sprintf("finalizer %s and %s cannot be both set", metav1.FinalizerOrphanDependents, metav1.FinalizerDeleteDependents))) + } + return allErrs +} + +// ValidateObjectMetaUpdate validates an object's metadata when updated. +func ValidateObjectMetaUpdate(newMeta, oldMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList { + newMetadata, err := meta.Accessor(newMeta) + if err != nil { + allErrs := field.ErrorList{} + allErrs = append(allErrs, field.Invalid(fldPath, newMeta, err.Error())) + return allErrs + } + oldMetadata, err := meta.Accessor(oldMeta) + if err != nil { + allErrs := field.ErrorList{} + allErrs = append(allErrs, field.Invalid(fldPath, oldMeta, err.Error())) + return allErrs + } + return ValidateObjectMetaAccessorUpdate(newMetadata, oldMetadata, fldPath) +} + +// ValidateObjectMetaAccessorUpdate validates an object's metadata when updated. +func ValidateObjectMetaAccessorUpdate(newMeta, oldMeta metav1.Object, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + // Finalizers cannot be added if the object is already being deleted. + if oldMeta.GetDeletionTimestamp() != nil { + allErrs = append(allErrs, ValidateNoNewFinalizers(newMeta.GetFinalizers(), oldMeta.GetFinalizers(), fldPath.Child("finalizers"))...) + } + + // Reject updates that don't specify a resource version + if len(newMeta.GetResourceVersion()) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceVersion"), newMeta.GetResourceVersion(), "must be specified for an update")) + } + + // Generation shouldn't be decremented + if newMeta.GetGeneration() < oldMeta.GetGeneration() { + allErrs = append(allErrs, field.Invalid(fldPath.Child("generation"), newMeta.GetGeneration(), "must not be decremented")) + } + + for _, entry := range newMeta.GetManagedFields() { + allErrs = append(allErrs, v1validation.ValidateFieldManager(entry.Manager, fldPath.Child("fieldManager"))...) + } + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetName(), oldMeta.GetName(), fldPath.Child("name"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetNamespace(), oldMeta.GetNamespace(), fldPath.Child("namespace"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetUID(), oldMeta.GetUID(), fldPath.Child("uid"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetCreationTimestamp(), oldMeta.GetCreationTimestamp(), fldPath.Child("creationTimestamp"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetDeletionTimestamp(), oldMeta.GetDeletionTimestamp(), fldPath.Child("deletionTimestamp"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetDeletionGracePeriodSeconds(), oldMeta.GetDeletionGracePeriodSeconds(), fldPath.Child("deletionGracePeriodSeconds"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetClusterName(), oldMeta.GetClusterName(), fldPath.Child("clusterName"))...) + + allErrs = append(allErrs, v1validation.ValidateLabels(newMeta.GetLabels(), fldPath.Child("labels"))...) + allErrs = append(allErrs, ValidateAnnotations(newMeta.GetAnnotations(), fldPath.Child("annotations"))...) + allErrs = append(allErrs, ValidateOwnerReferences(newMeta.GetOwnerReferences(), fldPath.Child("ownerReferences"))...) + allErrs = append(allErrs, v1validation.ValidateManagedFields(newMeta.GetManagedFields(), fldPath.Child("managedFields"))...) + + return allErrs +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go new file mode 100644 index 000000000..8403d1a86 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go @@ -0,0 +1,46 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// ValidateListOptions returns all validation errors found while validating the ListOptions. +func ValidateListOptions(options *internalversion.ListOptions) field.ErrorList { + allErrs := field.ErrorList{} + if match := options.ResourceVersionMatch; len(match) > 0 { + if options.Watch { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden for watch")) + } + if len(options.ResourceVersion) == 0 { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden unless resourceVersion is provided")) + } + if len(options.Continue) > 0 { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden when continue is provided")) + } + if match != metav1.ResourceVersionMatchExact && match != metav1.ResourceVersionMatchNotOlderThan { + allErrs = append(allErrs, field.NotSupported(field.NewPath("resourceVersionMatch"), match, []string{string(metav1.ResourceVersionMatchExact), string(metav1.ResourceVersionMatchNotOlderThan), ""})) + } + if match == metav1.ResourceVersionMatchExact && options.ResourceVersion == "0" { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch \"exact\" is forbidden for resourceVersion \"0\"")) + } + } + return allErrs +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS index 15b4c875a..40018601c 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS @@ -26,6 +26,5 @@ reviewers: - mml - mbohlool - therc -- mqliang - kevin-wangzefeng - jianhuiz diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index b72d43ff0..fd24483c0 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.apimachinery.pkg.apis.meta.v1; @@ -375,6 +375,7 @@ message GroupVersionResource { // A label selector is a label query over a set of resources. The result of matchLabels and // matchExpressions are ANDed. An empty label selector matches all objects. A null // label selector matches no objects. +// +structType=atomic message LabelSelector { // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels // map is equivalent to an element of matchExpressions, whose key field is "key", the diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go index bd4c6d9b5..54a0944af 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go @@ -34,6 +34,9 @@ type GroupResource struct { } func (gr *GroupResource) String() string { + if gr == nil { + return "" + } if len(gr.Group) == 0 { return gr.Resource } @@ -51,6 +54,9 @@ type GroupVersionResource struct { } func (gvr *GroupVersionResource) String() string { + if gvr == nil { + return "" + } return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "") } @@ -64,6 +70,9 @@ type GroupKind struct { } func (gk *GroupKind) String() string { + if gk == nil { + return "" + } if len(gk.Group) == 0 { return gk.Kind } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go index ad989ad75..3c5a1518c 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go @@ -201,6 +201,20 @@ func SetMetaDataAnnotation(obj *ObjectMeta, ann string, value string) { obj.Annotations[ann] = value } +// HasLabel returns a bool if passed in label exists +func HasLabel(obj ObjectMeta, label string) bool { + _, found := obj.Labels[label] + return found +} + +// SetMetaDataLabel sets the label and value +func SetMetaDataLabel(obj *ObjectMeta, label string, value string) { + if obj.Labels == nil { + obj.Labels = make(map[string]string) + } + obj.Labels[label] = value +} + // SingleObject returns a ListOptions for watching a single object. func SingleObject(meta ObjectMeta) ListOptions { return ListOptions{ diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go index cdd9a6a7a..8eb37f436 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go @@ -19,8 +19,6 @@ package v1 import ( "encoding/json" "time" - - "github.com/google/gofuzz" ) const RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00" @@ -181,16 +179,3 @@ func (t MicroTime) MarshalQueryParameter() (string, error) { return t.UTC().Format(RFC3339Micro), nil } - -// Fuzz satisfies fuzz.Interface. -func (t *MicroTime) Fuzz(c fuzz.Continue) { - if t == nil { - return - } - // Allow for about 1000 years of randomness. Accurate to a tenth of - // micro second. Leave off nanoseconds because JSON doesn't - // represent them so they can't round-trip properly. - t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 1000*c.Rand.Int63n(1000000)) -} - -var _ fuzz.Interface = &MicroTime{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go new file mode 100644 index 000000000..befab16f7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go @@ -0,0 +1,39 @@ +// +build !notest + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + fuzz "github.com/google/gofuzz" +) + +// Fuzz satisfies fuzz.Interface. +func (t *MicroTime) Fuzz(c fuzz.Continue) { + if t == nil { + return + } + // Allow for about 1000 years of randomness. Accurate to a tenth of + // micro second. Leave off nanoseconds because JSON doesn't + // represent them so they can't round-trip properly. + t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 1000*c.Rand.Int63n(1000000)) +} + +// ensure MicroTime implements fuzz.Interface +var _ fuzz.Interface = &MicroTime{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go index 4a1d89cfc..421770d43 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -19,8 +19,6 @@ package v1 import ( "encoding/json" "time" - - fuzz "github.com/google/gofuzz" ) // Time is a wrapper around time.Time which supports correct @@ -182,16 +180,3 @@ func (t Time) MarshalQueryParameter() (string, error) { return t.UTC().Format(time.RFC3339), nil } - -// Fuzz satisfies fuzz.Interface. -func (t *Time) Fuzz(c fuzz.Continue) { - if t == nil { - return - } - // Allow for about 1000 years of randomness. Leave off nanoseconds - // because JSON doesn't represent them so they can't round-trip - // properly. - t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 0) -} - -var _ fuzz.Interface = &Time{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go new file mode 100644 index 000000000..94ad8d7cf --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go @@ -0,0 +1,39 @@ +// +build !notest + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + fuzz "github.com/google/gofuzz" +) + +// Fuzz satisfies fuzz.Interface. +func (t *Time) Fuzz(c fuzz.Continue) { + if t == nil { + return + } + // Allow for about 1000 years of randomness. Leave off nanoseconds + // because JSON doesn't represent them so they can't round-trip + // properly. + t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 0) +} + +// ensure Time implements fuzz.Interface +var _ fuzz.Interface = &Time{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index bb57f2cc4..d84878d7c 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -1092,6 +1092,7 @@ type Patch struct{} // A label selector is a label query over a set of resources. The result of matchLabels and // matchExpressions are ANDed. An empty label selector matches all objects. A null // label selector matches no objects. +// +structType=atomic type LabelSelector struct { // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels // map is equivalent to an element of matchExpressions, whose key field is "key", the diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index 54a231e49..7b101ea51 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -282,14 +282,6 @@ func getNestedString(obj map[string]interface{}, fields ...string) string { return val } -func getNestedInt64(obj map[string]interface{}, fields ...string) int64 { - val, found, err := NestedInt64(obj, fields...) - if !found || err != nil { - return 0 - } - return val -} - func getNestedInt64Pointer(obj map[string]interface{}, fields ...string) *int64 { val, found, err := NestedInt64(obj, fields...) if !found || err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto index 59ce74376..a209dd456 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.apimachinery.pkg.apis.meta.v1beta1; diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation/validation.go new file mode 100644 index 000000000..563b62efa --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation/validation.go @@ -0,0 +1,33 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// ValidateTableOptions returns any invalid flags on TableOptions. +func ValidateTableOptions(opts *metav1.TableOptions) field.ErrorList { + var allErrs field.ErrorList + switch opts.IncludeObject { + case metav1.IncludeMetadata, metav1.IncludeNone, metav1.IncludeObject, "": + default: + allErrs = append(allErrs, field.Invalid(field.NewPath("includeObject"), opts.IncludeObject, "must be 'Metadata', 'Object', 'None', or empty")) + } + return allErrs +} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go index 838d5b0aa..791348476 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go @@ -26,16 +26,6 @@ type typePair struct { dest reflect.Type } -type typeNamePair struct { - fieldType reflect.Type - fieldName string -} - -// DebugLogger allows you to get debugging messages if necessary. -type DebugLogger interface { - Logf(format string, args ...interface{}) -} - type NameFunc func(t reflect.Type) string var DefaultNameFunc = func(t reflect.Type) string { return t.Name() } @@ -57,24 +47,6 @@ type Converter struct { ignoredConversions map[typePair]struct{} ignoredUntypedConversions map[typePair]struct{} - // This is a map from a source field type and name, to a list of destination - // field type and name. - structFieldDests map[typeNamePair][]typeNamePair - - // Allows for the opposite lookup of structFieldDests. So that SourceFromDest - // copy flag also works. So this is a map of destination field name, to potential - // source field name and type to look for. - structFieldSources map[typeNamePair][]typeNamePair - - // Map from an input type to a function which can apply a key name mapping - inputFieldMappingFuncs map[reflect.Type]FieldMappingFunc - - // Map from an input type to a set of default conversion flags. - inputDefaultFlags map[reflect.Type]FieldMatchingFlags - - // If non-nil, will be called to print helpful debugging info. Quite verbose. - Debug DebugLogger - // nameFunc is called to retrieve the name of a type; this name is used for the // purpose of deciding whether two types match or not (i.e., will we attempt to // do a conversion). The default returns the go type name. @@ -89,11 +61,6 @@ func NewConverter(nameFn NameFunc) *Converter { ignoredConversions: make(map[typePair]struct{}), ignoredUntypedConversions: make(map[typePair]struct{}), nameFunc: nameFn, - structFieldDests: make(map[typeNamePair][]typeNamePair), - structFieldSources: make(map[typeNamePair][]typeNamePair), - - inputFieldMappingFuncs: make(map[reflect.Type]FieldMappingFunc), - inputDefaultFlags: make(map[reflect.Type]FieldMatchingFlags), } c.RegisterUntypedConversionFunc( (*[]byte)(nil), (*[]byte)(nil), @@ -112,11 +79,9 @@ func (c *Converter) WithConversions(fns ConversionFuncs) *Converter { return &copied } -// DefaultMeta returns the conversion FieldMappingFunc and meta for a given type. -func (c *Converter) DefaultMeta(t reflect.Type) (FieldMatchingFlags, *Meta) { - return c.inputDefaultFlags[t], &Meta{ - KeyNameMapping: c.inputFieldMappingFuncs[t], - } +// DefaultMeta returns meta for a given type. +func (c *Converter) DefaultMeta(t reflect.Type) *Meta { + return &Meta{} } // Convert_Slice_byte_To_Slice_byte prevents recursing into every byte @@ -136,24 +101,12 @@ func Convert_Slice_byte_To_Slice_byte(in *[]byte, out *[]byte, s Scope) error { type Scope interface { // Call Convert to convert sub-objects. Note that if you call it with your own exact // parameters, you'll run out of stack space before anything useful happens. - Convert(src, dest interface{}, flags FieldMatchingFlags) error - - // SrcTags and DestTags contain the struct tags that src and dest had, respectively. - // If the enclosing object was not a struct, then these will contain no tags, of course. - SrcTag() reflect.StructTag - DestTag() reflect.StructTag - - // Flags returns the flags with which the conversion was started. - Flags() FieldMatchingFlags + Convert(src, dest interface{}) error // Meta returns any information originally passed to Convert. Meta() *Meta } -// FieldMappingFunc can convert an input field value into different values, depending on -// the value of the source or destination struct tags. -type FieldMappingFunc func(key string, sourceTag, destTag reflect.StructTag) (source string, dest string) - func NewConversionFuncs() ConversionFuncs { return ConversionFuncs{ untyped: make(map[typePair]ConversionFunc), @@ -194,9 +147,6 @@ func (c ConversionFuncs) Merge(other ConversionFuncs) ConversionFuncs { // Meta is supplied by Scheme, when it calls Convert. type Meta struct { - // KeyNameMapping is an optional function which may map the listed key (field name) - // into a source and destination value. - KeyNameMapping FieldMappingFunc // Context is an optional field that callers may use to pass info to conversion functions. Context interface{} } @@ -205,84 +155,11 @@ type Meta struct { type scope struct { converter *Converter meta *Meta - flags FieldMatchingFlags - - // srcStack & destStack are separate because they may not have a 1:1 - // relationship. - srcStack scopeStack - destStack scopeStack -} - -type scopeStackElem struct { - tag reflect.StructTag - value reflect.Value - key string -} - -type scopeStack []scopeStackElem - -func (s *scopeStack) pop() { - n := len(*s) - *s = (*s)[:n-1] -} - -func (s *scopeStack) push(e scopeStackElem) { - *s = append(*s, e) -} - -func (s *scopeStack) top() *scopeStackElem { - return &(*s)[len(*s)-1] -} - -func (s scopeStack) describe() string { - desc := "" - if len(s) > 1 { - desc = "(" + s[1].value.Type().String() + ")" - } - for i, v := range s { - if i < 2 { - // First layer on stack is not real; second is handled specially above. - continue - } - if v.key == "" { - desc += fmt.Sprintf(".%v", v.value.Type()) - } else { - desc += fmt.Sprintf(".%v", v.key) - } - } - return desc -} - -// Formats src & dest as indices for printing. -func (s *scope) setIndices(src, dest int) { - s.srcStack.top().key = fmt.Sprintf("[%v]", src) - s.destStack.top().key = fmt.Sprintf("[%v]", dest) -} - -// Formats src & dest as map keys for printing. -func (s *scope) setKeys(src, dest interface{}) { - s.srcStack.top().key = fmt.Sprintf(`["%v"]`, src) - s.destStack.top().key = fmt.Sprintf(`["%v"]`, dest) } // Convert continues a conversion. -func (s *scope) Convert(src, dest interface{}, flags FieldMatchingFlags) error { - return s.converter.Convert(src, dest, flags, s.meta) -} - -// SrcTag returns the tag of the struct containing the current source item, if any. -func (s *scope) SrcTag() reflect.StructTag { - return s.srcStack.top().tag -} - -// DestTag returns the tag of the struct containing the current dest item, if any. -func (s *scope) DestTag() reflect.StructTag { - return s.destStack.top().tag -} - -// Flags returns the flags with which the current conversion was started. -func (s *scope) Flags() FieldMatchingFlags { - return s.flags +func (s *scope) Convert(src, dest interface{}) error { + return s.converter.Convert(src, dest, s.meta) } // Meta returns the meta object that was originally passed to Convert. @@ -290,50 +167,6 @@ func (s *scope) Meta() *Meta { return s.meta } -// describe prints the path to get to the current (source, dest) values. -func (s *scope) describe() (src, dest string) { - return s.srcStack.describe(), s.destStack.describe() -} - -// error makes an error that includes information about where we were in the objects -// we were asked to convert. -func (s *scope) errorf(message string, args ...interface{}) error { - srcPath, destPath := s.describe() - where := fmt.Sprintf("converting %v to %v: ", srcPath, destPath) - return fmt.Errorf(where+message, args...) -} - -// Verifies whether a conversion function has a correct signature. -func verifyConversionFunctionSignature(ft reflect.Type) error { - if ft.Kind() != reflect.Func { - return fmt.Errorf("expected func, got: %v", ft) - } - if ft.NumIn() != 3 { - return fmt.Errorf("expected three 'in' params, got: %v", ft) - } - if ft.NumOut() != 1 { - return fmt.Errorf("expected one 'out' param, got: %v", ft) - } - if ft.In(0).Kind() != reflect.Ptr { - return fmt.Errorf("expected pointer arg for 'in' param 0, got: %v", ft) - } - if ft.In(1).Kind() != reflect.Ptr { - return fmt.Errorf("expected pointer arg for 'in' param 1, got: %v", ft) - } - scopeType := Scope(nil) - if e, a := reflect.TypeOf(&scopeType).Elem(), ft.In(2); e != a { - return fmt.Errorf("expected '%v' arg for 'in' param 2, got '%v' (%v)", e, a, ft) - } - var forErrorType error - // This convolution is necessary, otherwise TypeOf picks up on the fact - // that forErrorType is nil. - errorType := reflect.TypeOf(&forErrorType).Elem() - if ft.Out(0) != errorType { - return fmt.Errorf("expected error return, got: %v", ft) - } - return nil -} - // RegisterUntypedConversionFunc registers a function that converts between a and b by passing objects of those // types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce // any other guarantee. @@ -364,71 +197,16 @@ func (c *Converter) RegisterIgnoredConversion(from, to interface{}) error { return nil } -// RegisterInputDefaults registers a field name mapping function, used when converting -// from maps to structs. Inputs to the conversion methods are checked for this type and a mapping -// applied automatically if the input matches in. A set of default flags for the input conversion -// may also be provided, which will be used when no explicit flags are requested. -func (c *Converter) RegisterInputDefaults(in interface{}, fn FieldMappingFunc, defaultFlags FieldMatchingFlags) error { - fv := reflect.ValueOf(in) - ft := fv.Type() - if ft.Kind() != reflect.Ptr { - return fmt.Errorf("expected pointer 'in' argument, got: %v", ft) - } - c.inputFieldMappingFuncs[ft] = fn - c.inputDefaultFlags[ft] = defaultFlags - return nil -} - -// FieldMatchingFlags contains a list of ways in which struct fields could be -// copied. These constants may be | combined. -type FieldMatchingFlags int - -const ( - // Loop through destination fields, search for matching source - // field to copy it from. Source fields with no corresponding - // destination field will be ignored. If SourceToDest is - // specified, this flag is ignored. If neither is specified, - // or no flags are passed, this flag is the default. - DestFromSource FieldMatchingFlags = 0 - // Loop through source fields, search for matching dest field - // to copy it into. Destination fields with no corresponding - // source field will be ignored. - SourceToDest FieldMatchingFlags = 1 << iota - // Don't treat it as an error if the corresponding source or - // dest field can't be found. - IgnoreMissingFields - // Don't require type names to match. - AllowDifferentFieldTypeNames -) - -// IsSet returns true if the given flag or combination of flags is set. -func (f FieldMatchingFlags) IsSet(flag FieldMatchingFlags) bool { - if flag == DestFromSource { - // The bit logic doesn't work on the default value. - return f&SourceToDest != SourceToDest - } - return f&flag == flag -} - // Convert will translate src to dest if it knows how. Both must be pointers. // If no conversion func is registered and the default copying mechanism // doesn't work on this type pair, an error will be returned. -// Read the comments on the various FieldMatchingFlags constants to understand -// what the 'flags' parameter does. // 'meta' is given to allow you to pass information to conversion functions, // it is not used by Convert() other than storing it in the scope. // Not safe for objects with cyclic references! -func (c *Converter) Convert(src, dest interface{}, flags FieldMatchingFlags, meta *Meta) error { - return c.doConversion(src, dest, flags, meta, c.convert) -} - -type conversionFunc func(sv, dv reflect.Value, scope *scope) error - -func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags, meta *Meta, f conversionFunc) error { +func (c *Converter) Convert(src, dest interface{}, meta *Meta) error { pair := typePair{reflect.TypeOf(src), reflect.TypeOf(dest)} scope := &scope{ converter: c, - flags: flags, meta: meta, } @@ -452,366 +230,4 @@ func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags return err } return fmt.Errorf("converting (%s) to (%s): unknown conversion", sv.Type(), dv.Type()) - - // TODO: Everything past this point is deprecated. - // Remove in 1.20 once we're sure it didn't break anything. - - // Leave something on the stack, so that calls to struct tag getters never fail. - scope.srcStack.push(scopeStackElem{}) - scope.destStack.push(scopeStackElem{}) - return f(sv, dv, scope) -} - -// callUntyped calls predefined conversion func. -func (c *Converter) callUntyped(sv, dv reflect.Value, f ConversionFunc, scope *scope) error { - if !dv.CanAddr() { - return scope.errorf("cant addr dest") - } - var svPointer reflect.Value - if sv.CanAddr() { - svPointer = sv.Addr() - } else { - svPointer = reflect.New(sv.Type()) - svPointer.Elem().Set(sv) - } - dvPointer := dv.Addr() - return f(svPointer.Interface(), dvPointer.Interface(), scope) -} - -// convert recursively copies sv into dv, calling an appropriate conversion function if -// one is registered. -func (c *Converter) convert(sv, dv reflect.Value, scope *scope) error { - dt, st := dv.Type(), sv.Type() - pair := typePair{st, dt} - - // ignore conversions of this type - if _, ok := c.ignoredConversions[pair]; ok { - if c.Debug != nil { - c.Debug.Logf("Ignoring conversion of '%v' to '%v'", st, dt) - } - return nil - } - - // Convert sv to dv. - pair = typePair{reflect.PtrTo(sv.Type()), reflect.PtrTo(dv.Type())} - if f, ok := c.conversionFuncs.untyped[pair]; ok { - return c.callUntyped(sv, dv, f, scope) - } - if f, ok := c.generatedConversionFuncs.untyped[pair]; ok { - return c.callUntyped(sv, dv, f, scope) - } - - if !dv.CanSet() { - return scope.errorf("Cannot set dest. (Tried to deep copy something with unexported fields?)") - } - - if !scope.flags.IsSet(AllowDifferentFieldTypeNames) && c.nameFunc(dt) != c.nameFunc(st) { - return scope.errorf( - "type names don't match (%v, %v), and no conversion 'func (%v, %v) error' registered.", - c.nameFunc(st), c.nameFunc(dt), st, dt) - } - - switch st.Kind() { - case reflect.Map, reflect.Ptr, reflect.Slice, reflect.Interface, reflect.Struct: - // Don't copy these via assignment/conversion! - default: - // This should handle all simple types. - if st.AssignableTo(dt) { - dv.Set(sv) - return nil - } - if st.ConvertibleTo(dt) { - dv.Set(sv.Convert(dt)) - return nil - } - } - - if c.Debug != nil { - c.Debug.Logf("Trying to convert '%v' to '%v'", st, dt) - } - - scope.srcStack.push(scopeStackElem{value: sv}) - scope.destStack.push(scopeStackElem{value: dv}) - defer scope.srcStack.pop() - defer scope.destStack.pop() - - switch dv.Kind() { - case reflect.Struct: - return c.convertKV(toKVValue(sv), toKVValue(dv), scope) - case reflect.Slice: - if sv.IsNil() { - // Don't make a zero-length slice. - dv.Set(reflect.Zero(dt)) - return nil - } - dv.Set(reflect.MakeSlice(dt, sv.Len(), sv.Cap())) - for i := 0; i < sv.Len(); i++ { - scope.setIndices(i, i) - if err := c.convert(sv.Index(i), dv.Index(i), scope); err != nil { - return err - } - } - case reflect.Ptr: - if sv.IsNil() { - // Don't copy a nil ptr! - dv.Set(reflect.Zero(dt)) - return nil - } - dv.Set(reflect.New(dt.Elem())) - switch st.Kind() { - case reflect.Ptr, reflect.Interface: - return c.convert(sv.Elem(), dv.Elem(), scope) - default: - return c.convert(sv, dv.Elem(), scope) - } - case reflect.Map: - if sv.IsNil() { - // Don't copy a nil ptr! - dv.Set(reflect.Zero(dt)) - return nil - } - dv.Set(reflect.MakeMap(dt)) - for _, sk := range sv.MapKeys() { - dk := reflect.New(dt.Key()).Elem() - if err := c.convert(sk, dk, scope); err != nil { - return err - } - dkv := reflect.New(dt.Elem()).Elem() - scope.setKeys(sk.Interface(), dk.Interface()) - // TODO: sv.MapIndex(sk) may return a value with CanAddr() == false, - // because a map[string]struct{} does not allow a pointer reference. - // Calling a custom conversion function defined for the map value - // will panic. Example is PodInfo map[string]ContainerStatus. - if err := c.convert(sv.MapIndex(sk), dkv, scope); err != nil { - return err - } - dv.SetMapIndex(dk, dkv) - } - case reflect.Interface: - if sv.IsNil() { - // Don't copy a nil interface! - dv.Set(reflect.Zero(dt)) - return nil - } - tmpdv := reflect.New(sv.Elem().Type()).Elem() - if err := c.convert(sv.Elem(), tmpdv, scope); err != nil { - return err - } - dv.Set(reflect.ValueOf(tmpdv.Interface())) - return nil - default: - return scope.errorf("couldn't copy '%v' into '%v'; didn't understand types", st, dt) - } - return nil -} - -var stringType = reflect.TypeOf("") - -func toKVValue(v reflect.Value) kvValue { - switch v.Kind() { - case reflect.Struct: - return structAdaptor(v) - case reflect.Map: - if v.Type().Key().AssignableTo(stringType) { - return stringMapAdaptor(v) - } - } - - return nil -} - -// kvValue lets us write the same conversion logic to work with both maps -// and structs. Only maps with string keys make sense for this. -type kvValue interface { - // returns all keys, as a []string. - keys() []string - // Will just return "" for maps. - tagOf(key string) reflect.StructTag - // Will return the zero Value if the key doesn't exist. - value(key string) reflect.Value - // Maps require explicit setting-- will do nothing for structs. - // Returns false on failure. - confirmSet(key string, v reflect.Value) bool -} - -type stringMapAdaptor reflect.Value - -func (a stringMapAdaptor) len() int { - return reflect.Value(a).Len() -} - -func (a stringMapAdaptor) keys() []string { - v := reflect.Value(a) - keys := make([]string, v.Len()) - for i, v := range v.MapKeys() { - if v.IsNil() { - continue - } - switch t := v.Interface().(type) { - case string: - keys[i] = t - } - } - return keys -} - -func (a stringMapAdaptor) tagOf(key string) reflect.StructTag { - return "" -} - -func (a stringMapAdaptor) value(key string) reflect.Value { - return reflect.Value(a).MapIndex(reflect.ValueOf(key)) -} - -func (a stringMapAdaptor) confirmSet(key string, v reflect.Value) bool { - return true -} - -type structAdaptor reflect.Value - -func (a structAdaptor) len() int { - v := reflect.Value(a) - return v.Type().NumField() -} - -func (a structAdaptor) keys() []string { - v := reflect.Value(a) - t := v.Type() - keys := make([]string, t.NumField()) - for i := range keys { - keys[i] = t.Field(i).Name - } - return keys -} - -func (a structAdaptor) tagOf(key string) reflect.StructTag { - v := reflect.Value(a) - field, ok := v.Type().FieldByName(key) - if ok { - return field.Tag - } - return "" -} - -func (a structAdaptor) value(key string) reflect.Value { - v := reflect.Value(a) - return v.FieldByName(key) -} - -func (a structAdaptor) confirmSet(key string, v reflect.Value) bool { - return true -} - -// convertKV can convert things that consist of key/value pairs, like structs -// and some maps. -func (c *Converter) convertKV(skv, dkv kvValue, scope *scope) error { - if skv == nil || dkv == nil { - // TODO: add keys to stack to support really understandable error messages. - return fmt.Errorf("Unable to convert %#v to %#v", skv, dkv) - } - - lister := dkv - if scope.flags.IsSet(SourceToDest) { - lister = skv - } - - var mapping FieldMappingFunc - if scope.meta != nil && scope.meta.KeyNameMapping != nil { - mapping = scope.meta.KeyNameMapping - } - - for _, key := range lister.keys() { - if found, err := c.checkField(key, skv, dkv, scope); found { - if err != nil { - return err - } - continue - } - stag := skv.tagOf(key) - dtag := dkv.tagOf(key) - skey := key - dkey := key - if mapping != nil { - skey, dkey = scope.meta.KeyNameMapping(key, stag, dtag) - } - - df := dkv.value(dkey) - sf := skv.value(skey) - if !df.IsValid() || !sf.IsValid() { - switch { - case scope.flags.IsSet(IgnoreMissingFields): - // No error. - case scope.flags.IsSet(SourceToDest): - return scope.errorf("%v not present in dest", dkey) - default: - return scope.errorf("%v not present in src", skey) - } - continue - } - scope.srcStack.top().key = skey - scope.srcStack.top().tag = stag - scope.destStack.top().key = dkey - scope.destStack.top().tag = dtag - if err := c.convert(sf, df, scope); err != nil { - return err - } - } - return nil -} - -// checkField returns true if the field name matches any of the struct -// field copying rules. The error should be ignored if it returns false. -func (c *Converter) checkField(fieldName string, skv, dkv kvValue, scope *scope) (bool, error) { - replacementMade := false - if scope.flags.IsSet(DestFromSource) { - df := dkv.value(fieldName) - if !df.IsValid() { - return false, nil - } - destKey := typeNamePair{df.Type(), fieldName} - // Check each of the potential source (type, name) pairs to see if they're - // present in sv. - for _, potentialSourceKey := range c.structFieldSources[destKey] { - sf := skv.value(potentialSourceKey.fieldName) - if !sf.IsValid() { - continue - } - if sf.Type() == potentialSourceKey.fieldType { - // Both the source's name and type matched, so copy. - scope.srcStack.top().key = potentialSourceKey.fieldName - scope.destStack.top().key = fieldName - if err := c.convert(sf, df, scope); err != nil { - return true, err - } - dkv.confirmSet(fieldName, df) - replacementMade = true - } - } - return replacementMade, nil - } - - sf := skv.value(fieldName) - if !sf.IsValid() { - return false, nil - } - srcKey := typeNamePair{sf.Type(), fieldName} - // Check each of the potential dest (type, name) pairs to see if they're - // present in dv. - for _, potentialDestKey := range c.structFieldDests[srcKey] { - df := dkv.value(potentialDestKey.fieldName) - if !df.IsValid() { - continue - } - if df.Type() == potentialDestKey.fieldType { - // Both the dest's name and type matched, so copy. - scope.srcStack.top().key = fieldName - scope.destStack.top().key = potentialDestKey.fieldName - if err := c.convert(sf, df, scope); err != nil { - return true, err - } - dkv.confirmSet(potentialDestKey.fieldName, df) - replacementMade = true - } - } - return replacementMade, nil } diff --git a/vendor/k8s.io/apimachinery/pkg/labels/labels.go b/vendor/k8s.io/apimachinery/pkg/labels/labels.go index d9eeb4f91..d6bbeeaca 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/labels.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/labels.go @@ -141,25 +141,6 @@ func Equals(labels1, labels2 Set) bool { return true } -// AreLabelsInWhiteList verifies if the provided label list -// is in the provided whitelist and returns true, otherwise false. -func AreLabelsInWhiteList(labels, whitelist Set) bool { - if len(whitelist) == 0 { - return true - } - - for k, v := range labels { - value, ok := whitelist[k] - if !ok { - return false - } - if value != v { - return false - } - } - return true -} - // ConvertSelectorToLabelsMap converts selector string to labels map // and validates keys and values func ConvertSelectorToLabelsMap(selector string) (Set, error) { diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go index bf62f98a4..50ae4f7ce 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -263,11 +263,11 @@ func (r *Requirement) Values() sets.String { } // Empty returns true if the internalSelector doesn't restrict selection space -func (lsel internalSelector) Empty() bool { - if lsel == nil { +func (s internalSelector) Empty() bool { + if s == nil { return true } - return len(lsel) == 0 + return len(s) == 0 } // String returns a human-readable string that represents this @@ -330,51 +330,51 @@ func safeSort(in []string) []string { } // Add adds requirements to the selector. It copies the current selector returning a new one -func (lsel internalSelector) Add(reqs ...Requirement) Selector { - var sel internalSelector - for ix := range lsel { - sel = append(sel, lsel[ix]) +func (s internalSelector) Add(reqs ...Requirement) Selector { + var ret internalSelector + for ix := range s { + ret = append(ret, s[ix]) } for _, r := range reqs { - sel = append(sel, r) + ret = append(ret, r) } - sort.Sort(ByKey(sel)) - return sel + sort.Sort(ByKey(ret)) + return ret } // Matches for a internalSelector returns true if all // its Requirements match the input Labels. If any // Requirement does not match, false is returned. -func (lsel internalSelector) Matches(l Labels) bool { - for ix := range lsel { - if matches := lsel[ix].Matches(l); !matches { +func (s internalSelector) Matches(l Labels) bool { + for ix := range s { + if matches := s[ix].Matches(l); !matches { return false } } return true } -func (lsel internalSelector) Requirements() (Requirements, bool) { return Requirements(lsel), true } +func (s internalSelector) Requirements() (Requirements, bool) { return Requirements(s), true } // String returns a comma-separated string of all // the internalSelector Requirements' human-readable strings. -func (lsel internalSelector) String() string { +func (s internalSelector) String() string { var reqs []string - for ix := range lsel { - reqs = append(reqs, lsel[ix].String()) + for ix := range s { + reqs = append(reqs, s[ix].String()) } return strings.Join(reqs, ",") } // RequiresExactMatch introspect whether a given selector requires a single specific field // to be set, and if so returns the value it requires. -func (lsel internalSelector) RequiresExactMatch(label string) (value string, found bool) { - for ix := range lsel { - if lsel[ix].key == label { - switch lsel[ix].operator { +func (s internalSelector) RequiresExactMatch(label string) (value string, found bool) { + for ix := range s { + if s[ix].key == label { + switch s[ix].operator { case selection.Equals, selection.DoubleEquals, selection.In: - if len(lsel[ix].strValues) == 1 { - return lsel[ix].strValues[0], true + if len(s[ix].strValues) == 1 { + return s[ix].strValues[0], true } } return "", false @@ -789,12 +789,12 @@ func (p *Parser) parseIdentifiersList() (sets.String, error) { // parseExactValue parses the only value for exact match style func (p *Parser) parseExactValue() (sets.String, error) { s := sets.NewString() - tok, lit := p.lookahead(Values) + tok, _ := p.lookahead(Values) if tok == EndOfStringToken || tok == CommaToken { s.Insert("") return s, nil } - tok, lit = p.consume(Values) + tok, lit := p.consume(Values) if tok == IdentifierToken { s.Insert(lit) return s, nil diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go index 871e4c8c4..4a6cc6857 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go @@ -186,6 +186,9 @@ func fromUnstructured(sv, dv reflect.Value) error { reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: dv.Set(sv.Convert(dt)) return nil + case reflect.Float32, reflect.Float64: + dv.Set(sv.Convert(dt)) + return nil } case reflect.Float32, reflect.Float64: switch dt.Kind() { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto index 0e212ec94..3b25391fa 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.apimachinery.pkg.runtime; diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go index f44693c0c..3e1fab1d1 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -57,7 +57,7 @@ type Encoder interface { // Identifiers of two different encoders should be equal if and only if for every input // object it will be encoded to the same representation by both of them. // - // Identifier is inteted for use with CacheableObject#CacheEncode method. In order to + // Identifier is intended for use with CacheableObject#CacheEncode method. In order to // correctly handle CacheableObject, Encode() method should look similar to below, where // doEncode() is the encoding logic of implemented encoder: // func (e *MyEncoder) Encode(obj Object, w io.Writer) error { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go b/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go index 159b30120..3ab119b0a 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go @@ -92,39 +92,6 @@ func NewClientNegotiator(serializer NegotiatedSerializer, gv schema.GroupVersion } } -// NewInternalClientNegotiator applies the default client rules for connecting to a Kubernetes apiserver -// where objects are converted to gv prior to sending and decoded to their internal representation prior -// to retrieval. -// -// DEPRECATED: Internal clients are deprecated and will be removed in a future Kubernetes release. -func NewInternalClientNegotiator(serializer NegotiatedSerializer, gv schema.GroupVersion) ClientNegotiator { - decode := schema.GroupVersions{ - { - Group: gv.Group, - Version: APIVersionInternal, - }, - // always include the legacy group as a decoding target to handle non-error `Status` return types - { - Group: "", - Version: APIVersionInternal, - }, - } - return &clientNegotiator{ - encode: gv, - decode: decode, - serializer: serializer, - } -} - -// NewSimpleClientNegotiator will negotiate for a single serializer. This should only be used -// for testing or when the caller is taking responsibility for setting the GVK on encoded objects. -func NewSimpleClientNegotiator(info SerializerInfo, gv schema.GroupVersion) ClientNegotiator { - return &clientNegotiator{ - serializer: &simpleNegotiatedSerializer{info: info}, - encode: gv, - } -} - type simpleNegotiatedSerializer struct { info SerializerInfo } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto index 5aeeaa100..c50766a4b 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.apimachinery.pkg.runtime.schema; diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go index b57066845..f04453fb0 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go @@ -23,8 +23,8 @@ type ObjectKind interface { // SetGroupVersionKind sets or clears the intended serialized kind of an object. Passing kind nil // should clear the current setting. SetGroupVersionKind(kind GroupVersionKind) - // GroupVersionKind returns the stored group, version, and kind of an object, or nil if the object does - // not expose or provide these fields. + // GroupVersionKind returns the stored group, version, and kind of an object, or an empty struct + // if the object does not expose or provide these fields. GroupVersionKind() GroupVersionKind } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go index 3b254961d..697dd4ed7 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -18,7 +18,6 @@ package runtime import ( "fmt" - "net/url" "reflect" "strings" @@ -105,9 +104,6 @@ func NewScheme() *Scheme { // Enable couple default conversions by default. utilruntime.Must(RegisterEmbeddedConversions(s)) utilruntime.Must(RegisterStringConversions(s)) - - utilruntime.Must(s.RegisterInputDefaults(&map[string][]string{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields)) - utilruntime.Must(s.RegisterInputDefaults(&url.Values{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields)) return s } @@ -309,11 +305,6 @@ func (s *Scheme) New(kind schema.GroupVersionKind) (Object, error) { return nil, NewNotRegisteredErrForKind(s.schemeName, kind) } -// Log sets a logger on the scheme. For test purposes only -func (s *Scheme) Log(l conversion.DebugLogger) { - s.converter.Debug = l -} - // AddIgnoredConversionType identifies a pair of types that should be skipped by // conversion (because the data inside them is explicitly dropped during // conversion). @@ -342,14 +333,6 @@ func (s *Scheme) AddFieldLabelConversionFunc(gvk schema.GroupVersionKind, conver return nil } -// RegisterInputDefaults sets the provided field mapping function and field matching -// as the defaults for the provided input type. The fn may be nil, in which case no -// mapping will happen by default. Use this method to register a mechanism for handling -// a specific input type in conversion, such as a map[string]string to structs. -func (s *Scheme) RegisterInputDefaults(in interface{}, fn conversion.FieldMappingFunc, defaultFlags conversion.FieldMatchingFlags) error { - return s.converter.RegisterInputDefaults(in, fn, defaultFlags) -} - // AddTypeDefaultingFunc registers a function that is passed a pointer to an // object and can default fields on the object. These functions will be invoked // when Default() is called. The function will never be called unless the @@ -433,12 +416,9 @@ func (s *Scheme) Convert(in, out interface{}, context interface{}) error { in = typed } - flags, meta := s.generateConvertMeta(in) + meta := s.generateConvertMeta(in) meta.Context = context - if flags == 0 { - flags = conversion.AllowDifferentFieldTypeNames - } - return s.converter.Convert(in, out, flags, meta) + return s.converter.Convert(in, out, meta) } // ConvertFieldLabel alters the given field label and value for an kind field selector from @@ -535,9 +515,9 @@ func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) ( in = in.DeepCopyObject() } - flags, meta := s.generateConvertMeta(in) + meta := s.generateConvertMeta(in) meta.Context = target - if err := s.converter.Convert(in, out, flags, meta); err != nil { + if err := s.converter.Convert(in, out, meta); err != nil { return nil, err } @@ -565,7 +545,7 @@ func (s *Scheme) unstructuredToTyped(in Unstructured) (Object, error) { } // generateConvertMeta constructs the meta value we pass to Convert. -func (s *Scheme) generateConvertMeta(in interface{}) (conversion.FieldMatchingFlags, *conversion.Meta) { +func (s *Scheme) generateConvertMeta(in interface{}) *conversion.Meta { return s.converter.DefaultMeta(reflect.TypeOf(in)) } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go index f21b0ef19..e55ab94d1 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go @@ -108,10 +108,9 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option // CodecFactory provides methods for retrieving codecs and serializers for specific // versions and content types. type CodecFactory struct { - scheme *runtime.Scheme - serializers []serializerType - universal runtime.Decoder - accepts []runtime.SerializerInfo + scheme *runtime.Scheme + universal runtime.Decoder + accepts []runtime.SerializerInfo legacySerializer runtime.Serializer } @@ -216,9 +215,8 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec } return CodecFactory{ - scheme: scheme, - serializers: serializers, - universal: recognizer.NewDecoder(decoders...), + scheme: scheme, + universal: recognizer.NewDecoder(decoders...), accepts: accepts, diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index e081d7ff1..83b2e1393 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -96,6 +96,7 @@ type SerializerOptions struct { Strict bool } +// Serializer handles encoding versioned objects into the proper JSON form type Serializer struct { meta MetaFactory options SerializerOptions @@ -144,10 +145,10 @@ func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { } } -// CaseSensitiveJsonIterator returns a jsoniterator API that's configured to be +// CaseSensitiveJSONIterator returns a jsoniterator API that's configured to be // case-sensitive when unmarshalling, and otherwise compatible with // the encoding/json standard library. -func CaseSensitiveJsonIterator() jsoniter.API { +func CaseSensitiveJSONIterator() jsoniter.API { config := jsoniter.Config{ EscapeHTML: true, SortMapKeys: true, @@ -159,10 +160,10 @@ func CaseSensitiveJsonIterator() jsoniter.API { return config } -// StrictCaseSensitiveJsonIterator returns a jsoniterator API that's configured to be +// StrictCaseSensitiveJSONIterator returns a jsoniterator API that's configured to be // case-sensitive, but also disallows unknown fields when unmarshalling. It is compatible with // the encoding/json standard library. -func StrictCaseSensitiveJsonIterator() jsoniter.API { +func StrictCaseSensitiveJSONIterator() jsoniter.API { config := jsoniter.Config{ EscapeHTML: true, SortMapKeys: true, @@ -179,8 +180,8 @@ func StrictCaseSensitiveJsonIterator() jsoniter.API { // from outside. Still does not protect from package level jsoniter.Register*() functions - someone calling them // in some other library will mess with every usage of the jsoniter library in the whole program. // See https://github.com/json-iterator/go/issues/265 -var caseSensitiveJsonIterator = CaseSensitiveJsonIterator() -var strictCaseSensitiveJsonIterator = StrictCaseSensitiveJsonIterator() +var caseSensitiveJSONIterator = CaseSensitiveJSONIterator() +var strictCaseSensitiveJSONIterator = StrictCaseSensitiveJSONIterator() // gvkWithDefaults returns group kind and version defaulting from provided default func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind { @@ -236,7 +237,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i types, _, err := s.typer.ObjectKinds(into) switch { case runtime.IsNotRegisteredError(err), isUnstructured: - if err := caseSensitiveJsonIterator.Unmarshal(data, into); err != nil { + if err := caseSensitiveJSONIterator.Unmarshal(data, into); err != nil { return nil, actual, err } return into, actual, nil @@ -260,7 +261,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i return nil, actual, err } - if err := caseSensitiveJsonIterator.Unmarshal(data, obj); err != nil { + if err := caseSensitiveJSONIterator.Unmarshal(data, obj); err != nil { return nil, actual, err } @@ -285,7 +286,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i // due to that a matching field doesn't exist in the object. hence we can return a typed strictDecoderError, // the actual error is that the object contains unknown field. strictObj := obj.DeepCopyObject() - if err := strictCaseSensitiveJsonIterator.Unmarshal(altered, strictObj); err != nil { + if err := strictCaseSensitiveJSONIterator.Unmarshal(altered, strictObj); err != nil { return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData)) } // Always return the same object as the non-strict serializer to avoid any deviations. @@ -302,7 +303,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { if s.options.Yaml { - json, err := caseSensitiveJsonIterator.Marshal(obj) + json, err := caseSensitiveJSONIterator.Marshal(obj) if err != nil { return err } @@ -315,7 +316,7 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { } if s.options.Pretty { - data, err := caseSensitiveJsonIterator.MarshalIndent(obj, "", " ") + data, err := caseSensitiveJSONIterator.MarshalIndent(obj, "", " ") if err != nil { return err } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go index f606b7d72..404fb1b7e 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go @@ -61,6 +61,7 @@ func (e errNotMarshalable) Status() metav1.Status { } } +// IsNotMarshalable checks the type of error, returns a boolean true if error is not nil and not marshalable false otherwise func IsNotMarshalable(err error) bool { _, ok := err.(errNotMarshalable) return err != nil && ok @@ -77,6 +78,7 @@ func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Se } } +// Serializer handles encoding versioned objects into the proper wire form type Serializer struct { prefix []byte creater runtime.ObjectCreater @@ -457,8 +459,10 @@ func (s *RawSerializer) Identifier() runtime.Identifier { return rawSerializerIdentifier } +// LengthDelimitedFramer is exported variable of type lengthDelimitedFramer var LengthDelimitedFramer = lengthDelimitedFramer{} +// Provides length delimited frame reader and writer methods type lengthDelimitedFramer struct{} // NewFrameWriter implements stream framing for this serializer diff --git a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go index 88f0de36d..b19750f3a 100644 --- a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go +++ b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go @@ -16,10 +16,6 @@ limitations under the License. package types -import ( - "fmt" -) - // NamespacedName comprises a resource name, with a mandatory namespace, // rendered as "/". Being a type captures intent and // helps make sure that UIDs, namespaced names and non-namespaced names @@ -39,5 +35,5 @@ const ( // String returns the general purpose string representation func (n NamespacedName) String() string { - return fmt.Sprintf("%s%c%s", n.Namespace, Separator, n.Name) + return n.Namespace + string(Separator) + n.Name } diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go b/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go index 84b4f5884..faf2c2645 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go +++ b/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go @@ -145,7 +145,7 @@ func (c *Expiring) gc(now time.Time) { // expired. // // heap[0] is a peek at the next element in the heap, which is not obvious - // from looking at the (*expiringHeap).Pop() implmentation below. + // from looking at the (*expiringHeap).Pop() implementation below. // heap.Pop() swaps the first entry with the last entry of the heap, then // calls (*expiringHeap).Pop() which returns the last element. if len(c.heap) == 0 || now.Before(c.heap[0].expiry) { diff --git a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go index 6cf13d83d..3e1e2517b 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go +++ b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go @@ -348,7 +348,13 @@ func (f *fakeTimer) Stop() bool { // Reset conditionally updates the firing time of the timer. If the // timer has neither fired nor been stopped then this call resets the // timer to the fake clock's "now" + d and returns true, otherwise -// this call returns false. This is like time.Timer::Reset. +// it creates a new waiter, adds it to the clock, and returns true. +// +// It is not possible to return false, because a fake timer can be reset +// from any state (waiting to fire, already fired, and stopped). +// +// See the GoDoc for time.Timer::Reset for more context on why +// the return value of Reset() is not useful. func (f *fakeTimer) Reset(d time.Duration) bool { f.fakeClock.lock.Lock() defer f.fakeClock.lock.Unlock() @@ -360,7 +366,15 @@ func (f *fakeTimer) Reset(d time.Duration) bool { return true } } - return false + // No existing waiter, timer has already fired or been reset. + // We should still enable Reset() to succeed by creating a + // new waiter and adding it to the clock's waiters. + newWaiter := fakeClockWaiter{ + targetTime: f.fakeClock.time.Add(d), + destChan: seekChan, + } + f.fakeClock.waiters = append(f.fakeClock.waiters, newWaiter) + return true } // Ticker defines the Ticker interface diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go index 5bafc218e..1f5a04fd4 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go @@ -163,7 +163,7 @@ func matchesError(err error, fns ...Matcher) bool { // filterErrors returns any errors (or nested errors, if the list contains // nested Errors) for which all fns return false. If no errors -// remain a nil list is returned. The resulting silec will have all +// remain a nil list is returned. The resulting slice will have all // nested slices flattened as a side effect. func filterErrors(list []error, fns ...Matcher) []error { result := []error{} diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go index 066680f44..45aa74bf5 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go +++ b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go @@ -132,12 +132,14 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) { // Return whatever remaining data exists from an in progress frame if n := len(r.remaining); n > 0 { if n <= len(data) { + //lint:ignore SA4006,SA4010 underlying array of data is modified here. data = append(data[0:0], r.remaining...) r.remaining = nil return n, nil } n = len(data) + //lint:ignore SA4006,SA4010 underlying array of data is modified here. data = append(data[0:0], r.remaining[:n]...) r.remaining = r.remaining[n:] return n, io.ErrShortBuffer @@ -155,6 +157,7 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) { // and set m to it, which means we need to copy the partial result back into data and preserve // the remaining result for subsequent reads. if len(m) > n { + //lint:ignore SA4006,SA4010 underlying array of data is modified here. data = append(data[0:0], m[:n]...) r.remaining = m[n:] return n, io.ErrShortBuffer diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto index e79fb9e57..a76f79851 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.apimachinery.pkg.util.intstr; diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go new file mode 100644 index 000000000..2501d5516 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go @@ -0,0 +1,42 @@ +// +build !notest + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package intstr + +import ( + fuzz "github.com/google/gofuzz" +) + +// Fuzz satisfies fuzz.Interface +func (intstr *IntOrString) Fuzz(c fuzz.Continue) { + if intstr == nil { + return + } + if c.RandBool() { + intstr.Type = Int + c.Fuzz(&intstr.IntVal) + intstr.StrVal = "" + } else { + intstr.Type = String + intstr.IntVal = 0 + c.Fuzz(&intstr.StrVal) + } +} + +// ensure IntOrString implements fuzz.Interface +var _ fuzz.Interface = &IntOrString{} diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index 6576def82..c0e8927fe 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -25,7 +25,6 @@ import ( "strconv" "strings" - "github.com/google/gofuzz" "k8s.io/klog/v2" ) @@ -90,6 +89,9 @@ func (intstr *IntOrString) UnmarshalJSON(value []byte) error { // String returns the string value, or the Itoa of the int value. func (intstr *IntOrString) String() string { + if intstr == nil { + return "" + } if intstr.Type == String { return intstr.StrVal } @@ -129,21 +131,6 @@ func (IntOrString) OpenAPISchemaType() []string { return []string{"string"} } // the OpenAPI spec of this type. func (IntOrString) OpenAPISchemaFormat() string { return "int-or-string" } -func (intstr *IntOrString) Fuzz(c fuzz.Continue) { - if intstr == nil { - return - } - if c.RandBool() { - intstr.Type = Int - c.Fuzz(&intstr.IntVal) - intstr.StrVal = "" - } else { - intstr.Type = String - intstr.IntVal = 0 - c.Fuzz(&intstr.StrVal) - } -} - func ValueOrDefault(intOrPercent *IntOrString, defaultValue IntOrString) *IntOrString { if intOrPercent == nil { return &defaultValue @@ -151,6 +138,33 @@ func ValueOrDefault(intOrPercent *IntOrString, defaultValue IntOrString) *IntOrS return intOrPercent } +// GetScaledValueFromIntOrPercent is meant to replace GetValueFromIntOrPercent. +// This method returns a scaled value from an IntOrString type. If the IntOrString +// is a percentage string value it's treated as a percentage and scaled appropriately +// in accordance to the total, if it's an int value it's treated as a a simple value and +// if it is a string value which is either non-numeric or numeric but lacking a trailing '%' it returns an error. +func GetScaledValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool) (int, error) { + if intOrPercent == nil { + return 0, errors.New("nil value for IntOrString") + } + value, isPercent, err := getIntOrPercentValueSafely(intOrPercent) + if err != nil { + return 0, fmt.Errorf("invalid value for IntOrString: %v", err) + } + if isPercent { + if roundUp { + value = int(math.Ceil(float64(value) * (float64(total)) / 100)) + } else { + value = int(math.Floor(float64(value) * (float64(total)) / 100)) + } + } + return value, nil +} + +// GetValueFromIntOrPercent was deprecated in favor of +// GetScaledValueFromIntOrPercent. This method was treating all int as a numeric value and all +// strings with or without a percent symbol as a percentage value. +// Deprecated func GetValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool) (int, error) { if intOrPercent == nil { return 0, errors.New("nil value for IntOrString") @@ -169,6 +183,8 @@ func GetValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool return value, nil } +// getIntOrPercentValue is a legacy function and only meant to be called by GetValueFromIntOrPercent +// For a more correct implementation call getIntOrPercentSafely func getIntOrPercentValue(intOrStr *IntOrString) (int, bool, error) { switch intOrStr.Type { case Int: @@ -183,3 +199,25 @@ func getIntOrPercentValue(intOrStr *IntOrString) (int, bool, error) { } return 0, false, fmt.Errorf("invalid type: neither int nor percentage") } + +func getIntOrPercentValueSafely(intOrStr *IntOrString) (int, bool, error) { + switch intOrStr.Type { + case Int: + return intOrStr.IntValue(), false, nil + case String: + isPercent := false + s := intOrStr.StrVal + if strings.HasSuffix(s, "%") { + isPercent = true + s = strings.TrimSuffix(intOrStr.StrVal, "%") + } else { + return 0, false, fmt.Errorf("invalid type: string is not a percentage") + } + v, err := strconv.Atoi(s) + if err != nil { + return 0, false, fmt.Errorf("invalid value %q: %v", intOrStr.StrVal, err) + } + return int(v), isPercent, nil + } + return 0, false, fmt.Errorf("invalid type: neither int nor percentage") +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/json/json.go b/vendor/k8s.io/apimachinery/pkg/util/json/json.go index 204834883..778e58f70 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/util/json/json.go @@ -39,7 +39,8 @@ func Marshal(v interface{}) ([]byte, error) { const maxDepth = 10000 // Unmarshal unmarshals the given data -// If v is a *map[string]interface{}, numbers are converted to int64 or float64 +// If v is a *map[string]interface{}, *[]interface{}, or *interface{} numbers +// are converted to int64 or float64 func Unmarshal(data []byte, v interface{}) error { switch v := v.(type) { case *map[string]interface{}: @@ -52,7 +53,7 @@ func Unmarshal(data []byte, v interface{}) error { return err } // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertMapNumbers(*v, 0) + return ConvertMapNumbers(*v, 0) case *[]interface{}: // Build a decoder from the given data @@ -64,7 +65,7 @@ func Unmarshal(data []byte, v interface{}) error { return err } // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertSliceNumbers(*v, 0) + return ConvertSliceNumbers(*v, 0) case *interface{}: // Build a decoder from the given data @@ -76,29 +77,31 @@ func Unmarshal(data []byte, v interface{}) error { return err } // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertInterfaceNumbers(v, 0) + return ConvertInterfaceNumbers(v, 0) default: return json.Unmarshal(data, v) } } -func convertInterfaceNumbers(v *interface{}, depth int) error { +// ConvertInterfaceNumbers converts any json.Number values to int64 or float64. +// Values which are map[string]interface{} or []interface{} are recursively visited +func ConvertInterfaceNumbers(v *interface{}, depth int) error { var err error switch v2 := (*v).(type) { case json.Number: *v, err = convertNumber(v2) case map[string]interface{}: - err = convertMapNumbers(v2, depth+1) + err = ConvertMapNumbers(v2, depth+1) case []interface{}: - err = convertSliceNumbers(v2, depth+1) + err = ConvertSliceNumbers(v2, depth+1) } return err } -// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. +// ConvertMapNumbers traverses the map, converting any json.Number values to int64 or float64. // values which are map[string]interface{} or []interface{} are recursively visited -func convertMapNumbers(m map[string]interface{}, depth int) error { +func ConvertMapNumbers(m map[string]interface{}, depth int) error { if depth > maxDepth { return fmt.Errorf("exceeded max depth of %d", maxDepth) } @@ -109,9 +112,9 @@ func convertMapNumbers(m map[string]interface{}, depth int) error { case json.Number: m[k], err = convertNumber(v) case map[string]interface{}: - err = convertMapNumbers(v, depth+1) + err = ConvertMapNumbers(v, depth+1) case []interface{}: - err = convertSliceNumbers(v, depth+1) + err = ConvertSliceNumbers(v, depth+1) } if err != nil { return err @@ -120,9 +123,9 @@ func convertMapNumbers(m map[string]interface{}, depth int) error { return nil } -// convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64. +// ConvertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64. // values which are map[string]interface{} or []interface{} are recursively visited -func convertSliceNumbers(s []interface{}, depth int) error { +func ConvertSliceNumbers(s []interface{}, depth int) error { if depth > maxDepth { return fmt.Errorf("exceeded max depth of %d", maxDepth) } @@ -133,9 +136,9 @@ func convertSliceNumbers(s []interface{}, depth int) error { case json.Number: s[i], err = convertNumber(v) case map[string]interface{}: - err = convertMapNumbers(v, depth+1) + err = ConvertMapNumbers(v, depth+1) case []interface{}: - err = convertSliceNumbers(v, depth+1) + err = ConvertSliceNumbers(v, depth+1) } if err != nil { return err diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go index 945886c43..ba63d02df 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/http.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -33,6 +33,7 @@ import ( "regexp" "strconv" "strings" + "time" "unicode" "unicode/utf8" @@ -132,13 +133,61 @@ func SetTransportDefaults(t *http.Transport) *http.Transport { if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 { klog.Infof("HTTP2 has been explicitly disabled") } else if allowsHTTP2(t) { - if err := http2.ConfigureTransport(t); err != nil { + if err := configureHTTP2Transport(t); err != nil { klog.Warningf("Transport failed http2 configuration: %v", err) } } return t } +func readIdleTimeoutSeconds() int { + ret := 30 + // User can set the readIdleTimeout to 0 to disable the HTTP/2 + // connection health check. + if s := os.Getenv("HTTP2_READ_IDLE_TIMEOUT_SECONDS"); len(s) > 0 { + i, err := strconv.Atoi(s) + if err != nil { + klog.Warningf("Illegal HTTP2_READ_IDLE_TIMEOUT_SECONDS(%q): %v."+ + " Default value %d is used", s, err, ret) + return ret + } + ret = i + } + return ret +} + +func pingTimeoutSeconds() int { + ret := 15 + if s := os.Getenv("HTTP2_PING_TIMEOUT_SECONDS"); len(s) > 0 { + i, err := strconv.Atoi(s) + if err != nil { + klog.Warningf("Illegal HTTP2_PING_TIMEOUT_SECONDS(%q): %v."+ + " Default value %d is used", s, err, ret) + return ret + } + ret = i + } + return ret +} + +func configureHTTP2Transport(t *http.Transport) error { + t2, err := http2.ConfigureTransports(t) + if err != nil { + return err + } + // The following enables the HTTP/2 connection health check added in + // https://github.com/golang/net/pull/55. The health check detects and + // closes broken transport layer connections. Without the health check, + // a broken connection can linger too long, e.g., a broken TCP + // connection will be closed by the Linux kernel after 13 to 30 minutes + // by default, which caused + // https://github.com/kubernetes/client-go/issues/374 and + // https://github.com/kubernetes/kubernetes/issues/87615. + t2.ReadIdleTimeout = time.Duration(readIdleTimeoutSeconds()) * time.Second + t2.PingTimeout = time.Duration(pingTimeoutSeconds()) * time.Second + return nil +} + func allowsHTTP2(t *http.Transport) bool { if t.TLSClientConfig == nil || len(t.TLSClientConfig.NextProtos) == 0 { // the transport expressed no NextProto preference, allow diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go index 7b6eca893..42ecffcca 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go @@ -130,7 +130,7 @@ func (*PortRange) Type() string { } // ParsePortRange parses a string of the form "min-max", inclusive at both -// ends, and initializs a new PortRange from it. +// ends, and initializes a new PortRange from it. func ParsePortRange(value string) (*PortRange, error) { pr := &PortRange{} err := pr.Set(value) diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index e8a9f609f..035c52811 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -79,7 +79,7 @@ func logPanic(r interface{}) { } } -// ErrorHandlers is a list of functions which will be invoked when an unreturnable +// ErrorHandlers is a list of functions which will be invoked when a nonreturnable // error occurs. // TODO(lavalamp): for testability, this and the below HandleError function // should be packaged up into a testable and reusable object. @@ -165,7 +165,7 @@ func RecoverFromPanic(err *error) { } } -// Must panics on non-nil errors. Useful to handling programmer level errors. +// Must panics on non-nil errors. Useful to handling programmer level errors. func Must(err error) { if err != nil { panic(err) diff --git a/vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go b/vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go new file mode 100644 index 000000000..1fa351aab --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go @@ -0,0 +1,27 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uuid + +import ( + "github.com/google/uuid" + + "k8s.io/apimachinery/pkg/types" +) + +func NewUUID() types.UID { + return types.UID(uuid.New().String()) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go index 2efc8eec7..f9be7ac33 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go @@ -67,6 +67,9 @@ func (p *Path) Key(key string) *Path { // String produces a string representation of the Path. func (p *Path) String() string { + if p == nil { + return "" + } // make a slice to iterate elems := []*Path{} for ; p != nil; p = p.parent { diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go index 4752b29a9..c8b419984 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -175,7 +175,7 @@ func IsValidLabelValue(value string) []string { } const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" -const dns1123LabelErrMsg string = "a DNS-1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" +const dns1123LabelErrMsg string = "a lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" // DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123) const DNS1123LabelMaxLength int = 63 @@ -196,7 +196,7 @@ func IsDNS1123Label(value string) []string { } const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*" -const dns1123SubdomainErrorMsg string = "a DNS-1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" +const dns1123SubdomainErrorMsg string = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" // DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123) const DNS1123SubdomainMaxLength int = 253 @@ -347,7 +347,7 @@ func IsValidPortName(port string) []string { // IsValidIP tests that the argument is a valid IP address. func IsValidIP(value string) []string { if net.ParseIP(value) == nil { - return []string{"must be a valid IP address, (e.g. 10.9.8.7)"} + return []string{"must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)"} } return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index d759d912b..3dea7fe7f 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -604,3 +604,32 @@ func poller(interval, timeout time.Duration) WaitFunc { return ch }) } + +// ExponentialBackoffWithContext works with a request context and a Backoff. It ensures that the retry wait never +// exceeds the deadline specified by the request context. +func ExponentialBackoffWithContext(ctx context.Context, backoff Backoff, condition ConditionFunc) error { + for backoff.Steps > 0 { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if ok, err := runConditionWithCrashProtection(condition); err != nil || ok { + return err + } + + if backoff.Steps == 1 { + break + } + + waitBeforeRetry := backoff.Step() + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(waitBeforeRetry): + } + } + + return ErrWaitTimeout +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/waitgroup/doc.go b/vendor/k8s.io/apimachinery/pkg/util/waitgroup/doc.go new file mode 100644 index 000000000..a6f29cd7c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/waitgroup/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package waitgroup implements SafeWaitGroup wrap of sync.WaitGroup. +// Add with positive delta when waiting will fail, to prevent sync.WaitGroup race issue. +package waitgroup // import "k8s.io/apimachinery/pkg/util/waitgroup" diff --git a/vendor/k8s.io/apimachinery/pkg/util/waitgroup/waitgroup.go b/vendor/k8s.io/apimachinery/pkg/util/waitgroup/waitgroup.go new file mode 100644 index 000000000..e080a5e92 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/waitgroup/waitgroup.go @@ -0,0 +1,57 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package waitgroup + +import ( + "fmt" + "sync" +) + +// SafeWaitGroup must not be copied after first use. +type SafeWaitGroup struct { + wg sync.WaitGroup + mu sync.RWMutex + // wait indicate whether Wait is called, if true, + // then any Add with positive delta will return error. + wait bool +} + +// Add adds delta, which may be negative, similar to sync.WaitGroup. +// If Add with a positive delta happens after Wait, it will return error, +// which prevent unsafe Add. +func (wg *SafeWaitGroup) Add(delta int) error { + wg.mu.RLock() + defer wg.mu.RUnlock() + if wg.wait && delta > 0 { + return fmt.Errorf("add with positive delta after Wait is forbidden") + } + wg.wg.Add(delta) + return nil +} + +// Done decrements the WaitGroup counter. +func (wg *SafeWaitGroup) Done() { + wg.wg.Done() +} + +// Wait blocks until the WaitGroup counter is zero. +func (wg *SafeWaitGroup) Wait() { + wg.mu.Lock() + wg.wait = true + wg.mu.Unlock() + wg.wg.Wait() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go index 492171faf..7fe706467 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go +++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go @@ -26,10 +26,41 @@ import ( "strings" "unicode" + jsonutil "k8s.io/apimachinery/pkg/util/json" + "k8s.io/klog/v2" "sigs.k8s.io/yaml" ) +// Unmarshal unmarshals the given data +// If v is a *map[string]interface{}, *[]interface{}, or *interface{} numbers +// are converted to int64 or float64 +func Unmarshal(data []byte, v interface{}) error { + preserveIntFloat := func(d *json.Decoder) *json.Decoder { + d.UseNumber() + return d + } + switch v := v.(type) { + case *map[string]interface{}: + if err := yaml.Unmarshal(data, v, preserveIntFloat); err != nil { + return err + } + return jsonutil.ConvertMapNumbers(*v, 0) + case *[]interface{}: + if err := yaml.Unmarshal(data, v, preserveIntFloat); err != nil { + return err + } + return jsonutil.ConvertSliceNumbers(*v, 0) + case *interface{}: + if err := yaml.Unmarshal(data, v, preserveIntFloat); err != nil { + return err + } + return jsonutil.ConvertInterfaceNumbers(v, 0) + default: + return yaml.Unmarshal(data, v) + } +} + // ToJSON converts a single YAML document into a JSON document // or returns an error. If the document appears to be JSON the // YAML decoding path is not used (so that error messages are diff --git a/vendor/k8s.io/apimachinery/pkg/watch/mux.go b/vendor/k8s.io/apimachinery/pkg/watch/mux.go index 0ac8dc4ef..0aaf01adc 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/mux.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/mux.go @@ -40,15 +40,12 @@ const incomingQueueLength = 25 // Broadcaster distributes event notifications among any number of watchers. Every event // is delivered to every watcher. type Broadcaster struct { - // TODO: see if this lock is needed now that new watchers go through - // the incoming channel. - lock sync.Mutex - watchers map[int64]*broadcasterWatcher nextWatcher int64 distributing sync.WaitGroup incoming chan Event + stopped chan struct{} // How large to make watcher's channel. watchQueueLength int @@ -68,6 +65,7 @@ func NewBroadcaster(queueLength int, fullChannelBehavior FullChannelBehavior) *B m := &Broadcaster{ watchers: map[int64]*broadcasterWatcher{}, incoming: make(chan Event, incomingQueueLength), + stopped: make(chan struct{}), watchQueueLength: queueLength, fullChannelBehavior: fullChannelBehavior, } @@ -96,10 +94,15 @@ func (obj functionFakeRuntimeObject) DeepCopyObject() runtime.Object { // The purpose of this terrible hack is so that watchers added after an event // won't ever see that event, and will always see any event after they are // added. -func (b *Broadcaster) blockQueue(f func()) { +func (m *Broadcaster) blockQueue(f func()) { + select { + case <-m.stopped: + return + default: + } var wg sync.WaitGroup wg.Add(1) - b.incoming <- Event{ + m.incoming <- Event{ Type: internalRunFunctionMarker, Object: functionFakeRuntimeObject(func() { defer wg.Done() @@ -111,12 +114,11 @@ func (b *Broadcaster) blockQueue(f func()) { // Watch adds a new watcher to the list and returns an Interface for it. // Note: new watchers will only receive new events. They won't get an entire history -// of previous events. +// of previous events. It will block until the watcher is actually added to the +// broadcaster. func (m *Broadcaster) Watch() Interface { var w *broadcasterWatcher m.blockQueue(func() { - m.lock.Lock() - defer m.lock.Unlock() id := m.nextWatcher m.nextWatcher++ w = &broadcasterWatcher{ @@ -127,18 +129,22 @@ func (m *Broadcaster) Watch() Interface { } m.watchers[id] = w }) + if w == nil { + // The panic here is to be consistent with the previous interface behavior + // we are willing to re-evaluate in the future. + panic("broadcaster already stopped") + } return w } // WatchWithPrefix adds a new watcher to the list and returns an Interface for it. It sends // queuedEvents down the new watch before beginning to send ordinary events from Broadcaster. // The returned watch will have a queue length that is at least large enough to accommodate -// all of the items in queuedEvents. +// all of the items in queuedEvents. It will block until the watcher is actually added to +// the broadcaster. func (m *Broadcaster) WatchWithPrefix(queuedEvents []Event) Interface { var w *broadcasterWatcher m.blockQueue(func() { - m.lock.Lock() - defer m.lock.Unlock() id := m.nextWatcher m.nextWatcher++ length := m.watchQueueLength @@ -156,26 +162,29 @@ func (m *Broadcaster) WatchWithPrefix(queuedEvents []Event) Interface { w.result <- e } }) + if w == nil { + // The panic here is to be consistent with the previous interface behavior + // we are willing to re-evaluate in the future. + panic("broadcaster already stopped") + } return w } // stopWatching stops the given watcher and removes it from the list. func (m *Broadcaster) stopWatching(id int64) { - m.lock.Lock() - defer m.lock.Unlock() - w, ok := m.watchers[id] - if !ok { - // No need to do anything, it's already been removed from the list. - return - } - delete(m.watchers, id) - close(w.result) + m.blockQueue(func() { + w, ok := m.watchers[id] + if !ok { + // No need to do anything, it's already been removed from the list. + return + } + delete(m.watchers, id) + close(w.result) + }) } // closeAll disconnects all watchers (presumably in response to a Shutdown call). func (m *Broadcaster) closeAll() { - m.lock.Lock() - defer m.lock.Unlock() for _, w := range m.watchers { close(w.result) } @@ -194,9 +203,12 @@ func (m *Broadcaster) Action(action EventType, obj runtime.Object) { // until all events have been distributed through the outbound channels. Note // that since they can be buffered, this means that the watchers might not // have received the data yet as it can remain sitting in the buffered -// channel. +// channel. It will block until the broadcaster stop request is actually executed func (m *Broadcaster) Shutdown() { - close(m.incoming) + m.blockQueue(func() { + close(m.stopped) + close(m.incoming) + }) m.distributing.Wait() } @@ -217,8 +229,6 @@ func (m *Broadcaster) loop() { // distribute sends event to all watchers. Blocking. func (m *Broadcaster) distribute(event Event) { - m.lock.Lock() - defer m.lock.Unlock() if m.fullChannelBehavior == DropIfChannelFull { for _, w := range m.watchers { select { @@ -252,6 +262,7 @@ func (mw *broadcasterWatcher) ResultChan() <-chan Event { } // Stop stops watching and removes mw from its list. +// It will block until the watcher stop request is actually executed func (mw *broadcasterWatcher) Stop() { mw.stop.Do(func() { close(mw.stopped) diff --git a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go index 8271e9b70..99f6770b9 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go @@ -97,9 +97,9 @@ func (sw *StreamWatcher) stopping() bool { // receive reads result from the decoder in a loop and sends down the result channel. func (sw *StreamWatcher) receive() { + defer utilruntime.HandleCrash() defer close(sw.result) defer sw.Stop() - defer utilruntime.HandleCrash() for { action, obj, err := sw.source.Decode() if err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go index 1f4911a31..fd0550e4a 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/watch.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go @@ -276,7 +276,7 @@ func (f *RaceFreeFakeWatcher) Action(action EventType, obj runtime.Object) { } } -// ProxyWatcher lets you wrap your channel in watch Interface. Threadsafe. +// ProxyWatcher lets you wrap your channel in watch Interface. threadsafe. type ProxyWatcher struct { result chan Event stopCh chan struct{} diff --git a/vendor/k8s.io/apiserver/pkg/admission/attributes.go b/vendor/k8s.io/apiserver/pkg/admission/attributes.go new file mode 100644 index 000000000..1d291f6b2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/attributes.go @@ -0,0 +1,211 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "fmt" + "strings" + "sync" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/authentication/user" +) + +type attributesRecord struct { + kind schema.GroupVersionKind + namespace string + name string + resource schema.GroupVersionResource + subresource string + operation Operation + options runtime.Object + dryRun bool + object runtime.Object + oldObject runtime.Object + userInfo user.Info + + // other elements are always accessed in single goroutine. + // But ValidatingAdmissionWebhook add annotations concurrently. + annotations map[string]annotation + annotationsLock sync.RWMutex + + reinvocationContext ReinvocationContext +} + +type annotation struct { + level auditinternal.Level + value string +} + +func NewAttributesRecord(object runtime.Object, oldObject runtime.Object, kind schema.GroupVersionKind, namespace, name string, resource schema.GroupVersionResource, subresource string, operation Operation, operationOptions runtime.Object, dryRun bool, userInfo user.Info) Attributes { + return &attributesRecord{ + kind: kind, + namespace: namespace, + name: name, + resource: resource, + subresource: subresource, + operation: operation, + options: operationOptions, + dryRun: dryRun, + object: object, + oldObject: oldObject, + userInfo: userInfo, + reinvocationContext: &reinvocationContext{}, + } +} + +func (record *attributesRecord) GetKind() schema.GroupVersionKind { + return record.kind +} + +func (record *attributesRecord) GetNamespace() string { + return record.namespace +} + +func (record *attributesRecord) GetName() string { + return record.name +} + +func (record *attributesRecord) GetResource() schema.GroupVersionResource { + return record.resource +} + +func (record *attributesRecord) GetSubresource() string { + return record.subresource +} + +func (record *attributesRecord) GetOperation() Operation { + return record.operation +} + +func (record *attributesRecord) GetOperationOptions() runtime.Object { + return record.options +} + +func (record *attributesRecord) IsDryRun() bool { + return record.dryRun +} + +func (record *attributesRecord) GetObject() runtime.Object { + return record.object +} + +func (record *attributesRecord) GetOldObject() runtime.Object { + return record.oldObject +} + +func (record *attributesRecord) GetUserInfo() user.Info { + return record.userInfo +} + +// getAnnotations implements privateAnnotationsGetter.It's a private method used +// by WithAudit decorator. +func (record *attributesRecord) getAnnotations(maxLevel auditinternal.Level) map[string]string { + record.annotationsLock.RLock() + defer record.annotationsLock.RUnlock() + + if record.annotations == nil { + return nil + } + cp := make(map[string]string, len(record.annotations)) + for key, value := range record.annotations { + if value.level.Less(maxLevel) || value.level == maxLevel { + cp[key] = value.value + } + } + return cp +} + +// AddAnnotation adds an annotation to attributesRecord with Metadata audit level +func (record *attributesRecord) AddAnnotation(key, value string) error { + return record.AddAnnotationWithLevel(key, value, auditinternal.LevelMetadata) +} + +func (record *attributesRecord) AddAnnotationWithLevel(key, value string, level auditinternal.Level) error { + if err := checkKeyFormat(key); err != nil { + return err + } + if level.Less(auditinternal.LevelMetadata) { + return fmt.Errorf("admission annotations are not allowed to be set at audit level lower than Metadata, key: %q, level: %s", key, level) + } + record.annotationsLock.Lock() + defer record.annotationsLock.Unlock() + + if record.annotations == nil { + record.annotations = make(map[string]annotation) + } + annotation := annotation{level: level, value: value} + if v, ok := record.annotations[key]; ok && v != annotation { + return fmt.Errorf("admission annotations are not allowd to be overwritten, key:%q, old value: %v, new value: %v", key, record.annotations[key], annotation) + } + record.annotations[key] = annotation + return nil +} + +func (record *attributesRecord) GetReinvocationContext() ReinvocationContext { + return record.reinvocationContext +} + +type reinvocationContext struct { + // isReinvoke is true when admission plugins are being reinvoked + isReinvoke bool + // reinvokeRequested is true when an admission plugin requested a re-invocation of the chain + reinvokeRequested bool + // values stores reinvoke context values per plugin. + values map[string]interface{} +} + +func (rc *reinvocationContext) IsReinvoke() bool { + return rc.isReinvoke +} + +func (rc *reinvocationContext) SetIsReinvoke() { + rc.isReinvoke = true +} + +func (rc *reinvocationContext) ShouldReinvoke() bool { + return rc.reinvokeRequested +} + +func (rc *reinvocationContext) SetShouldReinvoke() { + rc.reinvokeRequested = true +} + +func (rc *reinvocationContext) SetValue(plugin string, v interface{}) { + if rc.values == nil { + rc.values = map[string]interface{}{} + } + rc.values[plugin] = v +} + +func (rc *reinvocationContext) Value(plugin string) interface{} { + return rc.values[plugin] +} + +func checkKeyFormat(key string) error { + parts := strings.Split(key, "/") + if len(parts) != 2 { + return fmt.Errorf("annotation key has invalid format, the right format is a DNS subdomain prefix and '/' and key name. (e.g. 'podsecuritypolicy.admission.k8s.io/admit-policy')") + } + if msgs := validation.IsQualifiedName(key); len(msgs) != 0 { + return fmt.Errorf("annotation key has invalid format %s. A qualified name like 'podsecuritypolicy.admission.k8s.io/admit-policy' is required.", strings.Join(msgs, ",")) + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/audit.go b/vendor/k8s.io/apiserver/pkg/admission/audit.go new file mode 100644 index 000000000..d1e103cfc --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/audit.go @@ -0,0 +1,103 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "fmt" + + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/audit" +) + +// auditHandler logs annotations set by other admission handlers +type auditHandler struct { + Interface + ae *auditinternal.Event +} + +var _ Interface = &auditHandler{} +var _ MutationInterface = &auditHandler{} +var _ ValidationInterface = &auditHandler{} + +// WithAudit is a decorator for a admission phase. It saves annotations +// of attribute into the audit event. Attributes passed to the Admit and +// Validate function must be instance of privateAnnotationsGetter or +// AnnotationsGetter, otherwise an error is returned. +func WithAudit(i Interface, ae *auditinternal.Event) Interface { + if i == nil { + return i + } + return &auditHandler{i, ae} +} + +func (handler auditHandler) Admit(ctx context.Context, a Attributes, o ObjectInterfaces) error { + if !handler.Interface.Handles(a.GetOperation()) { + return nil + } + if err := ensureAnnotationGetter(a); err != nil { + return err + } + var err error + if mutator, ok := handler.Interface.(MutationInterface); ok { + err = mutator.Admit(ctx, a, o) + handler.logAnnotations(a) + } + return err +} + +func (handler auditHandler) Validate(ctx context.Context, a Attributes, o ObjectInterfaces) error { + if !handler.Interface.Handles(a.GetOperation()) { + return nil + } + if err := ensureAnnotationGetter(a); err != nil { + return err + } + var err error + if validator, ok := handler.Interface.(ValidationInterface); ok { + err = validator.Validate(ctx, a, o) + handler.logAnnotations(a) + } + return err +} + +func ensureAnnotationGetter(a Attributes) error { + _, okPrivate := a.(privateAnnotationsGetter) + _, okPublic := a.(AnnotationsGetter) + if okPrivate || okPublic { + return nil + } + return fmt.Errorf("attributes must be an instance of privateAnnotationsGetter or AnnotationsGetter") +} + +func (handler auditHandler) logAnnotations(a Attributes) { + if handler.ae == nil { + return + } + switch a := a.(type) { + case privateAnnotationsGetter: + for key, value := range a.getAnnotations(handler.ae.Level) { + audit.LogAnnotation(handler.ae, key, value) + } + case AnnotationsGetter: + for key, value := range a.GetAnnotations(handler.ae.Level) { + audit.LogAnnotation(handler.ae, key, value) + } + default: + // this will never happen, because we have already checked it in ensureAnnotationGetter + } +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/chain.go b/vendor/k8s.io/apiserver/pkg/admission/chain.go new file mode 100644 index 000000000..f2af01ef3 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/chain.go @@ -0,0 +1,70 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import "context" + +// chainAdmissionHandler is an instance of admission.NamedHandler that performs admission control using +// a chain of admission handlers +type chainAdmissionHandler []Interface + +// NewChainHandler creates a new chain handler from an array of handlers. Used for testing. +func NewChainHandler(handlers ...Interface) chainAdmissionHandler { + return chainAdmissionHandler(handlers) +} + +// Admit performs an admission control check using a chain of handlers, and returns immediately on first error +func (admissionHandler chainAdmissionHandler) Admit(ctx context.Context, a Attributes, o ObjectInterfaces) error { + for _, handler := range admissionHandler { + if !handler.Handles(a.GetOperation()) { + continue + } + if mutator, ok := handler.(MutationInterface); ok { + err := mutator.Admit(ctx, a, o) + if err != nil { + return err + } + } + } + return nil +} + +// Validate performs an admission control check using a chain of handlers, and returns immediately on first error +func (admissionHandler chainAdmissionHandler) Validate(ctx context.Context, a Attributes, o ObjectInterfaces) error { + for _, handler := range admissionHandler { + if !handler.Handles(a.GetOperation()) { + continue + } + if validator, ok := handler.(ValidationInterface); ok { + err := validator.Validate(ctx, a, o) + if err != nil { + return err + } + } + } + return nil +} + +// Handles will return true if any of the handlers handles the given operation +func (admissionHandler chainAdmissionHandler) Handles(operation Operation) bool { + for _, handler := range admissionHandler { + if handler.Handles(operation) { + return true + } + } + return false +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/config.go b/vendor/k8s.io/apiserver/pkg/admission/config.go new file mode 100644 index 000000000..43613321b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/config.go @@ -0,0 +1,175 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + + "k8s.io/klog/v2" + "sigs.k8s.io/yaml" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/apis/apiserver" + apiserverv1 "k8s.io/apiserver/pkg/apis/apiserver/v1" +) + +func makeAbs(path, base string) (string, error) { + if filepath.IsAbs(path) { + return path, nil + } + if len(base) == 0 || base == "." { + cwd, err := os.Getwd() + if err != nil { + return "", err + } + base = cwd + } + return filepath.Join(base, path), nil +} + +// ReadAdmissionConfiguration reads the admission configuration at the specified path. +// It returns the loaded admission configuration if the input file aligns with the required syntax. +// If it does not align with the provided syntax, it returns a default configuration for the enumerated +// set of pluginNames whose config location references the specified configFilePath. +// It does this to preserve backward compatibility when admission control files were opaque. +// It returns an error if the file did not exist. +func ReadAdmissionConfiguration(pluginNames []string, configFilePath string, configScheme *runtime.Scheme) (ConfigProvider, error) { + if configFilePath == "" { + return configProvider{config: &apiserver.AdmissionConfiguration{}}, nil + } + // a file was provided, so we just read it. + data, err := ioutil.ReadFile(configFilePath) + if err != nil { + return nil, fmt.Errorf("unable to read admission control configuration from %q [%v]", configFilePath, err) + } + codecs := serializer.NewCodecFactory(configScheme) + decoder := codecs.UniversalDecoder() + decodedObj, err := runtime.Decode(decoder, data) + // we were able to decode the file successfully + if err == nil { + decodedConfig, ok := decodedObj.(*apiserver.AdmissionConfiguration) + if !ok { + return nil, fmt.Errorf("unexpected type: %T", decodedObj) + } + baseDir := path.Dir(configFilePath) + for i := range decodedConfig.Plugins { + if decodedConfig.Plugins[i].Path == "" { + continue + } + // we update relative file paths to absolute paths + absPath, err := makeAbs(decodedConfig.Plugins[i].Path, baseDir) + if err != nil { + return nil, err + } + decodedConfig.Plugins[i].Path = absPath + } + return configProvider{ + config: decodedConfig, + }, nil + } + // we got an error where the decode wasn't related to a missing type + if !(runtime.IsMissingVersion(err) || runtime.IsMissingKind(err) || runtime.IsNotRegisteredError(err)) { + return nil, err + } + + // Only tolerate load errors if the file appears to be one of the two legacy plugin configs + unstructuredData := map[string]interface{}{} + if err2 := yaml.Unmarshal(data, &unstructuredData); err2 != nil { + return nil, err + } + _, isLegacyImagePolicy := unstructuredData["imagePolicy"] + _, isLegacyPodNodeSelector := unstructuredData["podNodeSelectorPluginConfig"] + if !isLegacyImagePolicy && !isLegacyPodNodeSelector { + return nil, err + } + + // convert the legacy format to the new admission control format + // in order to preserve backwards compatibility, we set plugins that + // previously read input from a non-versioned file configuration to the + // current input file. + legacyPluginsWithUnversionedConfig := sets.NewString("ImagePolicyWebhook", "PodNodeSelector") + externalConfig := &apiserverv1.AdmissionConfiguration{} + for _, pluginName := range pluginNames { + if legacyPluginsWithUnversionedConfig.Has(pluginName) { + externalConfig.Plugins = append(externalConfig.Plugins, + apiserverv1.AdmissionPluginConfiguration{ + Name: pluginName, + Path: configFilePath}) + } + } + configScheme.Default(externalConfig) + internalConfig := &apiserver.AdmissionConfiguration{} + if err := configScheme.Convert(externalConfig, internalConfig, nil); err != nil { + return nil, err + } + return configProvider{ + config: internalConfig, + }, nil +} + +type configProvider struct { + config *apiserver.AdmissionConfiguration +} + +// GetAdmissionPluginConfigurationFor returns a reader that holds the admission plugin configuration. +func GetAdmissionPluginConfigurationFor(pluginCfg apiserver.AdmissionPluginConfiguration) (io.Reader, error) { + // if there is a nest object, return it directly + if pluginCfg.Configuration != nil { + return bytes.NewBuffer(pluginCfg.Configuration.Raw), nil + } + // there is nothing nested, so we delegate to path + if pluginCfg.Path != "" { + content, err := ioutil.ReadFile(pluginCfg.Path) + if err != nil { + klog.Fatalf("Couldn't open admission plugin configuration %s: %#v", pluginCfg.Path, err) + return nil, err + } + return bytes.NewBuffer(content), nil + } + // there is no special config at all + return nil, nil +} + +// ConfigFor returns a reader for the specified plugin. +// If no specific configuration is present, we return a nil reader. +func (p configProvider) ConfigFor(pluginName string) (io.Reader, error) { + // there is no config, so there is no potential config + if p.config == nil { + return nil, nil + } + // look for matching plugin and get configuration + for _, pluginCfg := range p.config.Plugins { + if pluginName != pluginCfg.Name { + continue + } + pluginConfig, err := GetAdmissionPluginConfigurationFor(pluginCfg) + if err != nil { + return nil, err + } + return pluginConfig, nil + } + // there is no registered config that matches on plugin name. + return nil, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/configuration/configuration_manager.go b/vendor/k8s.io/apiserver/pkg/admission/configuration/configuration_manager.go new file mode 100644 index 000000000..4c4bf74c9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/configuration/configuration_manager.go @@ -0,0 +1,166 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configuration + +import ( + "fmt" + "sync" + "time" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" +) + +const ( + defaultInterval = 1 * time.Second + defaultFailureThreshold = 5 + defaultBootstrapRetries = 5 + defaultBootstrapGraceperiod = 5 * time.Second +) + +var ( + ErrNotReady = fmt.Errorf("configuration is not ready") + ErrDisabled = fmt.Errorf("disabled") +) + +type getFunc func() (runtime.Object, error) + +// When running, poller calls `get` every `interval`. If `get` is +// successful, `Ready()` returns ready and `configuration()` returns the +// `mergedConfiguration`; if `get` has failed more than `failureThreshold ` times, +// `Ready()` returns not ready and `configuration()` returns nil configuration. +// In an HA setup, the poller is consistent only if the `get` is +// doing consistent read. +type poller struct { + // a function to consistently read the latest configuration + get getFunc + // consistent read interval + // read-only + interval time.Duration + // if the number of consecutive read failure equals or exceeds the failureThreshold , the + // configuration is regarded as not ready. + // read-only + failureThreshold int + // number of consecutive failures so far. + failures int + // If the poller has passed the bootstrap phase. The poller is considered + // bootstrapped either bootstrapGracePeriod after the first call of + // configuration(), or when setConfigurationAndReady() is called, whichever + // comes first. + bootstrapped bool + // configuration() retries bootstrapRetries times if poller is not bootstrapped + // read-only + bootstrapRetries int + // Grace period for bootstrapping + // read-only + bootstrapGracePeriod time.Duration + once sync.Once + // if the configuration is regarded as ready. + ready bool + mergedConfiguration runtime.Object + lastErr error + // lock must be hold when reading/writing the data fields of poller. + lock sync.RWMutex +} + +func newPoller(get getFunc) *poller { + p := poller{ + get: get, + interval: defaultInterval, + failureThreshold: defaultFailureThreshold, + bootstrapRetries: defaultBootstrapRetries, + bootstrapGracePeriod: defaultBootstrapGraceperiod, + } + return &p +} + +func (a *poller) lastError(err error) { + a.lock.Lock() + defer a.lock.Unlock() + a.lastErr = err +} + +func (a *poller) notReady() { + a.lock.Lock() + defer a.lock.Unlock() + a.ready = false +} + +func (a *poller) bootstrapping() { + // bootstrapGracePeriod is read-only, so no lock is required + timer := time.NewTimer(a.bootstrapGracePeriod) + go func() { + defer timer.Stop() + <-timer.C + a.lock.Lock() + defer a.lock.Unlock() + a.bootstrapped = true + }() +} + +// If the poller is not bootstrapped yet, the configuration() gets a few chances +// to retry. This hides transient failures during system startup. +func (a *poller) configuration() (runtime.Object, error) { + a.once.Do(a.bootstrapping) + a.lock.RLock() + defer a.lock.RUnlock() + retries := 1 + if !a.bootstrapped { + retries = a.bootstrapRetries + } + for count := 0; count < retries; count++ { + if count > 0 { + a.lock.RUnlock() + time.Sleep(a.interval) + a.lock.RLock() + } + if a.ready { + return a.mergedConfiguration, nil + } + } + if a.lastErr != nil { + return nil, a.lastErr + } + return nil, ErrNotReady +} + +func (a *poller) setConfigurationAndReady(value runtime.Object) { + a.lock.Lock() + defer a.lock.Unlock() + a.bootstrapped = true + a.mergedConfiguration = value + a.ready = true + a.lastErr = nil +} + +func (a *poller) Run(stopCh <-chan struct{}) { + go wait.Until(a.sync, a.interval, stopCh) +} + +func (a *poller) sync() { + configuration, err := a.get() + if err != nil { + a.failures++ + a.lastError(err) + if a.failures >= a.failureThreshold { + a.notReady() + } + return + } + a.failures = 0 + a.setConfigurationAndReady(configuration) +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go b/vendor/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go new file mode 100644 index 000000000..d9b28ad78 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go @@ -0,0 +1,106 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configuration + +import ( + "fmt" + "sort" + "sync/atomic" + + "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission/plugin/webhook" + "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" + "k8s.io/client-go/informers" + admissionregistrationlisters "k8s.io/client-go/listers/admissionregistration/v1" + "k8s.io/client-go/tools/cache" +) + +// mutatingWebhookConfigurationManager collects the mutating webhook objects so that they can be called. +type mutatingWebhookConfigurationManager struct { + configuration *atomic.Value + lister admissionregistrationlisters.MutatingWebhookConfigurationLister + hasSynced func() bool +} + +var _ generic.Source = &mutatingWebhookConfigurationManager{} + +func NewMutatingWebhookConfigurationManager(f informers.SharedInformerFactory) generic.Source { + informer := f.Admissionregistration().V1().MutatingWebhookConfigurations() + manager := &mutatingWebhookConfigurationManager{ + configuration: &atomic.Value{}, + lister: informer.Lister(), + hasSynced: informer.Informer().HasSynced, + } + + // Start with an empty list + manager.configuration.Store([]webhook.WebhookAccessor{}) + + // On any change, rebuild the config + informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(_ interface{}) { manager.updateConfiguration() }, + UpdateFunc: func(_, _ interface{}) { manager.updateConfiguration() }, + DeleteFunc: func(_ interface{}) { manager.updateConfiguration() }, + }) + + return manager +} + +// Webhooks returns the merged MutatingWebhookConfiguration. +func (m *mutatingWebhookConfigurationManager) Webhooks() []webhook.WebhookAccessor { + return m.configuration.Load().([]webhook.WebhookAccessor) +} + +func (m *mutatingWebhookConfigurationManager) HasSynced() bool { + return m.hasSynced() +} + +func (m *mutatingWebhookConfigurationManager) updateConfiguration() { + configurations, err := m.lister.List(labels.Everything()) + if err != nil { + utilruntime.HandleError(fmt.Errorf("error updating configuration: %v", err)) + return + } + m.configuration.Store(mergeMutatingWebhookConfigurations(configurations)) +} + +func mergeMutatingWebhookConfigurations(configurations []*v1.MutatingWebhookConfiguration) []webhook.WebhookAccessor { + // The internal order of webhooks for each configuration is provided by the user + // but configurations themselves can be in any order. As we are going to run these + // webhooks in serial, they are sorted here to have a deterministic order. + sort.SliceStable(configurations, MutatingWebhookConfigurationSorter(configurations).ByName) + accessors := []webhook.WebhookAccessor{} + for _, c := range configurations { + // webhook names are not validated for uniqueness, so we check for duplicates and + // add a int suffix to distinguish between them + names := map[string]int{} + for i := range c.Webhooks { + n := c.Webhooks[i].Name + uid := fmt.Sprintf("%s/%s/%d", c.Name, n, names[n]) + names[n]++ + accessors = append(accessors, webhook.NewMutatingWebhookAccessor(uid, c.Name, &c.Webhooks[i])) + } + } + return accessors +} + +type MutatingWebhookConfigurationSorter []*v1.MutatingWebhookConfiguration + +func (a MutatingWebhookConfigurationSorter) ByName(i, j int) bool { + return a[i].Name < a[j].Name +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go b/vendor/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go new file mode 100644 index 000000000..37062b082 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go @@ -0,0 +1,104 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configuration + +import ( + "fmt" + "sort" + "sync/atomic" + + "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission/plugin/webhook" + "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" + "k8s.io/client-go/informers" + admissionregistrationlisters "k8s.io/client-go/listers/admissionregistration/v1" + "k8s.io/client-go/tools/cache" +) + +// validatingWebhookConfigurationManager collects the validating webhook objects so that they can be called. +type validatingWebhookConfigurationManager struct { + configuration *atomic.Value + lister admissionregistrationlisters.ValidatingWebhookConfigurationLister + hasSynced func() bool +} + +var _ generic.Source = &validatingWebhookConfigurationManager{} + +func NewValidatingWebhookConfigurationManager(f informers.SharedInformerFactory) generic.Source { + informer := f.Admissionregistration().V1().ValidatingWebhookConfigurations() + manager := &validatingWebhookConfigurationManager{ + configuration: &atomic.Value{}, + lister: informer.Lister(), + hasSynced: informer.Informer().HasSynced, + } + + // Start with an empty list + manager.configuration.Store([]webhook.WebhookAccessor{}) + + // On any change, rebuild the config + informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(_ interface{}) { manager.updateConfiguration() }, + UpdateFunc: func(_, _ interface{}) { manager.updateConfiguration() }, + DeleteFunc: func(_ interface{}) { manager.updateConfiguration() }, + }) + + return manager +} + +// Webhooks returns the merged ValidatingWebhookConfiguration. +func (v *validatingWebhookConfigurationManager) Webhooks() []webhook.WebhookAccessor { + return v.configuration.Load().([]webhook.WebhookAccessor) +} + +// HasSynced returns true if the shared informers have synced. +func (v *validatingWebhookConfigurationManager) HasSynced() bool { + return v.hasSynced() +} + +func (v *validatingWebhookConfigurationManager) updateConfiguration() { + configurations, err := v.lister.List(labels.Everything()) + if err != nil { + utilruntime.HandleError(fmt.Errorf("error updating configuration: %v", err)) + return + } + v.configuration.Store(mergeValidatingWebhookConfigurations(configurations)) +} + +func mergeValidatingWebhookConfigurations(configurations []*v1.ValidatingWebhookConfiguration) []webhook.WebhookAccessor { + sort.SliceStable(configurations, ValidatingWebhookConfigurationSorter(configurations).ByName) + accessors := []webhook.WebhookAccessor{} + for _, c := range configurations { + // webhook names are not validated for uniqueness, so we check for duplicates and + // add a int suffix to distinguish between them + names := map[string]int{} + for i := range c.Webhooks { + n := c.Webhooks[i].Name + uid := fmt.Sprintf("%s/%s/%d", c.Name, n, names[n]) + names[n]++ + accessors = append(accessors, webhook.NewValidatingWebhookAccessor(uid, c.Name, &c.Webhooks[i])) + } + } + return accessors +} + +type ValidatingWebhookConfigurationSorter []*v1.ValidatingWebhookConfiguration + +func (a ValidatingWebhookConfigurationSorter) ByName(i, j int) bool { + return a[i].Name < a[j].Name +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/decorator.go b/vendor/k8s.io/apiserver/pkg/admission/decorator.go new file mode 100644 index 000000000..a4b0b28b5 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/decorator.go @@ -0,0 +1,39 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +type Decorator interface { + Decorate(handler Interface, name string) Interface +} + +type DecoratorFunc func(handler Interface, name string) Interface + +func (d DecoratorFunc) Decorate(handler Interface, name string) Interface { + return d(handler, name) +} + +type Decorators []Decorator + +// Decorate applies the decorator in inside-out order, i.e. the first decorator in the slice is first applied to the given handler. +func (d Decorators) Decorate(handler Interface, name string) Interface { + result := handler + for _, d := range d { + result = d.Decorate(result, name) + } + + return result +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/errors.go b/vendor/k8s.io/apiserver/pkg/admission/errors.go new file mode 100644 index 000000000..9a069a2c9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/errors.go @@ -0,0 +1,72 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" +) + +func extractResourceName(a Attributes) (name string, resource schema.GroupResource, err error) { + resource = a.GetResource().GroupResource() + + if len(a.GetName()) > 0 { + return a.GetName(), resource, nil + } + + name = "Unknown" + obj := a.GetObject() + if obj != nil { + accessor, err := meta.Accessor(obj) + if err != nil { + // not all object have ObjectMeta. If we don't, return a name with a slash (always illegal) + return "Unknown/errorGettingName", resource, nil + } + + // this is necessary because name object name generation has not occurred yet + if len(accessor.GetName()) > 0 { + name = accessor.GetName() + } else if len(accessor.GetGenerateName()) > 0 { + name = accessor.GetGenerateName() + } + } + return name, resource, nil +} + +// NewForbidden is a utility function to return a well-formatted admission control error response +func NewForbidden(a Attributes, internalError error) error { + // do not double wrap an error of same type + if apierrors.IsForbidden(internalError) { + return internalError + } + name, resource, err := extractResourceName(a) + if err != nil { + return apierrors.NewInternalError(utilerrors.NewAggregate([]error{internalError, err})) + } + return apierrors.NewForbidden(resource, name, internalError) +} + +// NewNotFound is a utility function to return a well-formatted admission control error response +func NewNotFound(a Attributes) error { + name, resource, err := extractResourceName(a) + if err != nil { + return apierrors.NewInternalError(err) + } + return apierrors.NewNotFound(resource, name) +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/handler.go b/vendor/k8s.io/apiserver/pkg/admission/handler.go new file mode 100644 index 000000000..d2a9e7d4c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/handler.go @@ -0,0 +1,79 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "time" + + "k8s.io/apimachinery/pkg/util/sets" +) + +const ( + // timeToWaitForReady is the amount of time to wait to let an admission controller to be ready to satisfy a request. + // this is useful when admission controllers need to warm their caches before letting requests through. + timeToWaitForReady = 10 * time.Second +) + +// ReadyFunc is a function that returns true if the admission controller is ready to handle requests. +type ReadyFunc func() bool + +// Handler is a base for admission control handlers that +// support a predefined set of operations +type Handler struct { + operations sets.String + readyFunc ReadyFunc +} + +// Handles returns true for methods that this handler supports +func (h *Handler) Handles(operation Operation) bool { + return h.operations.Has(string(operation)) +} + +// NewHandler creates a new base handler that handles the passed +// in operations +func NewHandler(ops ...Operation) *Handler { + operations := sets.NewString() + for _, op := range ops { + operations.Insert(string(op)) + } + return &Handler{ + operations: operations, + } +} + +// SetReadyFunc allows late registration of a ReadyFunc to know if the handler is ready to process requests. +func (h *Handler) SetReadyFunc(readyFunc ReadyFunc) { + h.readyFunc = readyFunc +} + +// WaitForReady will wait for the readyFunc (if registered) to return ready, and in case of timeout, will return false. +func (h *Handler) WaitForReady() bool { + // there is no ready func configured, so we return immediately + if h.readyFunc == nil { + return true + } + + timeout := time.After(timeToWaitForReady) + for !h.readyFunc() { + select { + case <-time.After(100 * time.Millisecond): + case <-timeout: + return h.readyFunc() + } + } + return true +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/initializer/initializer.go b/vendor/k8s.io/apiserver/pkg/admission/initializer/initializer.go new file mode 100644 index 000000000..613baf8ef --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/initializer/initializer.go @@ -0,0 +1,72 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package initializer + +import ( + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/component-base/featuregate" +) + +type pluginInitializer struct { + externalClient kubernetes.Interface + externalInformers informers.SharedInformerFactory + authorizer authorizer.Authorizer + featureGates featuregate.FeatureGate +} + +// New creates an instance of admission plugins initializer. +// This constructor is public with a long param list so that callers immediately know that new information can be expected +// during compilation when they update a level. +func New( + extClientset kubernetes.Interface, + extInformers informers.SharedInformerFactory, + authz authorizer.Authorizer, + featureGates featuregate.FeatureGate, +) pluginInitializer { + return pluginInitializer{ + externalClient: extClientset, + externalInformers: extInformers, + authorizer: authz, + featureGates: featureGates, + } +} + +// Initialize checks the initialization interfaces implemented by a plugin +// and provide the appropriate initialization data +func (i pluginInitializer) Initialize(plugin admission.Interface) { + // First tell the plugin about enabled features, so it can decide whether to start informers or not + if wants, ok := plugin.(WantsFeatures); ok { + wants.InspectFeatureGates(i.featureGates) + } + + if wants, ok := plugin.(WantsExternalKubeClientSet); ok { + wants.SetExternalKubeClientSet(i.externalClient) + } + + if wants, ok := plugin.(WantsExternalKubeInformerFactory); ok { + wants.SetExternalKubeInformerFactory(i.externalInformers) + } + + if wants, ok := plugin.(WantsAuthorizer); ok { + wants.SetAuthorizer(i.authorizer) + } +} + +var _ admission.PluginInitializer = pluginInitializer{} diff --git a/vendor/k8s.io/apiserver/pkg/admission/initializer/interfaces.go b/vendor/k8s.io/apiserver/pkg/admission/initializer/interfaces.go new file mode 100644 index 000000000..86a6df1c2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/initializer/interfaces.go @@ -0,0 +1,61 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package initializer + +import ( + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authorization/authorizer" + quota "k8s.io/apiserver/pkg/quota/v1" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/component-base/featuregate" +) + +// WantsExternalKubeClientSet defines a function which sets external ClientSet for admission plugins that need it +type WantsExternalKubeClientSet interface { + SetExternalKubeClientSet(kubernetes.Interface) + admission.InitializationValidator +} + +// WantsExternalKubeInformerFactory defines a function which sets InformerFactory for admission plugins that need it +type WantsExternalKubeInformerFactory interface { + SetExternalKubeInformerFactory(informers.SharedInformerFactory) + admission.InitializationValidator +} + +// WantsAuthorizer defines a function which sets Authorizer for admission plugins that need it. +type WantsAuthorizer interface { + SetAuthorizer(authorizer.Authorizer) + admission.InitializationValidator +} + +// WantsQuotaConfiguration defines a function which sets quota configuration for admission plugins that need it. +type WantsQuotaConfiguration interface { + SetQuotaConfiguration(quota.Configuration) + admission.InitializationValidator +} + +// WantsFeatureGate defines a function which passes the featureGates for inspection by an admission plugin. +// Admission plugins should not hold a reference to the featureGates. Instead, they should query a particular one +// and assign it to a simple bool in the admission plugin struct. +// func (a *admissionPlugin) InspectFeatureGates(features featuregate.FeatureGate){ +// a.myFeatureIsOn = features.Enabled("my-feature") +// } +type WantsFeatures interface { + InspectFeatureGates(featuregate.FeatureGate) + admission.InitializationValidator +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/interfaces.go b/vendor/k8s.io/apiserver/pkg/admission/interfaces.go new file mode 100644 index 000000000..8882680b2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/interfaces.go @@ -0,0 +1,172 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "io" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/authentication/user" +) + +// Attributes is an interface used by AdmissionController to get information about a request +// that is used to make an admission decision. +type Attributes interface { + // GetName returns the name of the object as presented in the request. On a CREATE operation, the client + // may omit name and rely on the server to generate the name. If that is the case, this method will return + // the empty string + GetName() string + // GetNamespace is the namespace associated with the request (if any) + GetNamespace() string + // GetResource is the name of the resource being requested. This is not the kind. For example: pods + GetResource() schema.GroupVersionResource + // GetSubresource is the name of the subresource being requested. This is a different resource, scoped to the parent resource, but it may have a different kind. + // For instance, /pods has the resource "pods" and the kind "Pod", while /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod" + // (because status operates on pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource "binding", and kind "Binding". + GetSubresource() string + // GetOperation is the operation being performed + GetOperation() Operation + // GetOperationOptions is the options for the operation being performed + GetOperationOptions() runtime.Object + // IsDryRun indicates that modifications will definitely not be persisted for this request. This is to prevent + // admission controllers with side effects and a method of reconciliation from being overwhelmed. + // However, a value of false for this does not mean that the modification will be persisted, because it + // could still be rejected by a subsequent validation step. + IsDryRun() bool + // GetObject is the object from the incoming request prior to default values being applied + GetObject() runtime.Object + // GetOldObject is the existing object. Only populated for UPDATE requests. + GetOldObject() runtime.Object + // GetKind is the type of object being manipulated. For example: Pod + GetKind() schema.GroupVersionKind + // GetUserInfo is information about the requesting user + GetUserInfo() user.Info + + // AddAnnotation sets annotation according to key-value pair. The key should be qualified, e.g., podsecuritypolicy.admission.k8s.io/admit-policy, where + // "podsecuritypolicy" is the name of the plugin, "admission.k8s.io" is the name of the organization, "admit-policy" is the key name. + // An error is returned if the format of key is invalid. When trying to overwrite annotation with a new value, an error is returned. + // Both ValidationInterface and MutationInterface are allowed to add Annotations. + // By default, an annotation gets logged into audit event if the request's audit level is greater or + // equal to Metadata. + AddAnnotation(key, value string) error + + // AddAnnotationWithLevel sets annotation according to key-value pair with additional intended audit level. + // An Annotation gets logged into audit event if the request's audit level is greater or equal to the + // intended audit level. + AddAnnotationWithLevel(key, value string, level auditinternal.Level) error + + // GetReinvocationContext tracks the admission request information relevant to the re-invocation policy. + GetReinvocationContext() ReinvocationContext +} + +// ObjectInterfaces is an interface used by AdmissionController to get object interfaces +// such as Converter or Defaulter. These interfaces are normally coming from Request Scope +// to handle special cases like CRDs. +type ObjectInterfaces interface { + // GetObjectCreater is the ObjectCreator appropriate for the requested object. + GetObjectCreater() runtime.ObjectCreater + // GetObjectTyper is the ObjectTyper appropriate for the requested object. + GetObjectTyper() runtime.ObjectTyper + // GetObjectDefaulter is the ObjectDefaulter appropriate for the requested object. + GetObjectDefaulter() runtime.ObjectDefaulter + // GetObjectConvertor is the ObjectConvertor appropriate for the requested object. + GetObjectConvertor() runtime.ObjectConvertor + // GetEquivalentResourceMapper is the EquivalentResourceMapper appropriate for finding equivalent resources and expected kind for the requested object. + GetEquivalentResourceMapper() runtime.EquivalentResourceMapper +} + +// privateAnnotationsGetter is a private interface which allows users to get annotations from Attributes. +type privateAnnotationsGetter interface { + getAnnotations(maxLevel auditinternal.Level) map[string]string +} + +// AnnotationsGetter allows users to get annotations from Attributes. An alternate Attribute should implement +// this interface. +type AnnotationsGetter interface { + GetAnnotations(maxLevel auditinternal.Level) map[string]string +} + +// ReinvocationContext provides access to the admission related state required to implement the re-invocation policy. +type ReinvocationContext interface { + // IsReinvoke returns true if the current admission check is a re-invocation. + IsReinvoke() bool + // SetIsReinvoke sets the current admission check as a re-invocation. + SetIsReinvoke() + // ShouldReinvoke returns true if any plugin has requested a re-invocation. + ShouldReinvoke() bool + // SetShouldReinvoke signals that a re-invocation is desired. + SetShouldReinvoke() + // AddValue set a value for a plugin name, possibly overriding a previous value. + SetValue(plugin string, v interface{}) + // Value reads a value for a webhook. + Value(plugin string) interface{} +} + +// Interface is an abstract, pluggable interface for Admission Control decisions. +type Interface interface { + // Handles returns true if this admission controller can handle the given operation + // where operation can be one of CREATE, UPDATE, DELETE, or CONNECT + Handles(operation Operation) bool +} + +type MutationInterface interface { + Interface + + // Admit makes an admission decision based on the request attributes. + // Context is used only for timeout/deadline/cancellation and tracing information. + Admit(ctx context.Context, a Attributes, o ObjectInterfaces) (err error) +} + +// ValidationInterface is an abstract, pluggable interface for Admission Control decisions. +type ValidationInterface interface { + Interface + + // Validate makes an admission decision based on the request attributes. It is NOT allowed to mutate + // Context is used only for timeout/deadline/cancellation and tracing information. + Validate(ctx context.Context, a Attributes, o ObjectInterfaces) (err error) +} + +// Operation is the type of resource operation being checked for admission control +type Operation string + +// Operation constants +const ( + Create Operation = "CREATE" + Update Operation = "UPDATE" + Delete Operation = "DELETE" + Connect Operation = "CONNECT" +) + +// PluginInitializer is used for initialization of shareable resources between admission plugins. +// After initialization the resources have to be set separately +type PluginInitializer interface { + Initialize(plugin Interface) +} + +// InitializationValidator holds ValidateInitialization functions, which are responsible for validation of initialized +// shared resources and should be implemented on admission plugins +type InitializationValidator interface { + ValidateInitialization() error +} + +// ConfigProvider provides a way to get configuration for an admission plugin based on its name +type ConfigProvider interface { + ConfigFor(pluginName string) (io.Reader, error) +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go new file mode 100644 index 000000000..c9edb48b4 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go @@ -0,0 +1,251 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + "fmt" + "strconv" + "time" + + "k8s.io/apiserver/pkg/admission" + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +// WebhookRejectionErrorType defines different error types that happen in a webhook rejection. +type WebhookRejectionErrorType string + +const ( + namespace = "apiserver" + subsystem = "admission" + + // WebhookRejectionCallingWebhookError identifies a calling webhook error which causes + // a webhook admission to reject a request + WebhookRejectionCallingWebhookError WebhookRejectionErrorType = "calling_webhook_error" + // WebhookRejectionAPIServerInternalError identifies an apiserver internal error which + // causes a webhook admission to reject a request + WebhookRejectionAPIServerInternalError WebhookRejectionErrorType = "apiserver_internal_error" + // WebhookRejectionNoError identifies a webhook properly rejected a request + WebhookRejectionNoError WebhookRejectionErrorType = "no_error" +) + +var ( + // Use buckets ranging from 5 ms to 2.5 seconds (admission webhooks timeout at 30 seconds by default). + latencyBuckets = []float64{0.005, 0.025, 0.1, 0.5, 2.5} + latencySummaryMaxAge = 5 * time.Hour + + // Metrics provides access to all admission metrics. + Metrics = newAdmissionMetrics() +) + +// ObserverFunc is a func that emits metrics. +type ObserverFunc func(elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string) + +const ( + stepValidate = "validate" + stepAdmit = "admit" +) + +// WithControllerMetrics is a decorator for named admission handlers. +func WithControllerMetrics(i admission.Interface, name string) admission.Interface { + return WithMetrics(i, Metrics.ObserveAdmissionController, name) +} + +// WithStepMetrics is a decorator for a whole admission phase, i.e. admit or validation.admission step. +func WithStepMetrics(i admission.Interface) admission.Interface { + return WithMetrics(i, Metrics.ObserveAdmissionStep) +} + +// WithMetrics is a decorator for admission handlers with a generic observer func. +func WithMetrics(i admission.Interface, observer ObserverFunc, extraLabels ...string) admission.Interface { + return &pluginHandlerWithMetrics{ + Interface: i, + observer: observer, + extraLabels: extraLabels, + } +} + +// pluginHandlerWithMetrics decorates a admission handler with metrics. +type pluginHandlerWithMetrics struct { + admission.Interface + observer ObserverFunc + extraLabels []string +} + +// Admit performs a mutating admission control check and emit metrics. +func (p pluginHandlerWithMetrics) Admit(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + mutatingHandler, ok := p.Interface.(admission.MutationInterface) + if !ok { + return nil + } + + start := time.Now() + err := mutatingHandler.Admit(ctx, a, o) + p.observer(time.Since(start), err != nil, a, stepAdmit, p.extraLabels...) + return err +} + +// Validate performs a non-mutating admission control check and emits metrics. +func (p pluginHandlerWithMetrics) Validate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + validatingHandler, ok := p.Interface.(admission.ValidationInterface) + if !ok { + return nil + } + + start := time.Now() + err := validatingHandler.Validate(ctx, a, o) + p.observer(time.Since(start), err != nil, a, stepValidate, p.extraLabels...) + return err +} + +// AdmissionMetrics instruments admission with prometheus metrics. +type AdmissionMetrics struct { + step *metricSet + controller *metricSet + webhook *metricSet + webhookRejection *metrics.CounterVec +} + +// newAdmissionMetrics create a new AdmissionMetrics, configured with default metric names. +func newAdmissionMetrics() *AdmissionMetrics { + // Admission metrics for a step of the admission flow. The entire admission flow is broken down into a series of steps + // Each step is identified by a distinct type label value. + step := newMetricSet("step", + []string{"type", "operation", "rejected"}, + "Admission sub-step %s, broken out for each operation and API resource and step type (validate or admit).", true) + + // Built-in admission controller metrics. Each admission controller is identified by name. + controller := newMetricSet("controller", + []string{"name", "type", "operation", "rejected"}, + "Admission controller %s, identified by name and broken out for each operation and API resource and type (validate or admit).", false) + + // Admission webhook metrics. Each webhook is identified by name. + webhook := newMetricSet("webhook", + []string{"name", "type", "operation", "rejected"}, + "Admission webhook %s, identified by name and broken out for each operation and API resource and type (validate or admit).", false) + + webhookRejection := metrics.NewCounterVec( + &metrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "webhook_rejection_count", + Help: "Admission webhook rejection count, identified by name and broken out for each admission type (validating or admit) and operation. Additional labels specify an error type (calling_webhook_error or apiserver_internal_error if an error occurred; no_error otherwise) and optionally a non-zero rejection code if the webhook rejects the request with an HTTP status code (honored by the apiserver when the code is greater or equal to 400). Codes greater than 600 are truncated to 600, to keep the metrics cardinality bounded.", + StabilityLevel: metrics.ALPHA, + }, + []string{"name", "type", "operation", "error_type", "rejection_code"}) + + step.mustRegister() + controller.mustRegister() + webhook.mustRegister() + legacyregistry.MustRegister(webhookRejection) + return &AdmissionMetrics{step: step, controller: controller, webhook: webhook, webhookRejection: webhookRejection} +} + +func (m *AdmissionMetrics) reset() { + m.step.reset() + m.controller.reset() + m.webhook.reset() +} + +// ObserveAdmissionStep records admission related metrics for a admission step, identified by step type. +func (m *AdmissionMetrics) ObserveAdmissionStep(elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string) { + m.step.observe(elapsed, append(extraLabels, stepType, string(attr.GetOperation()), strconv.FormatBool(rejected))...) +} + +// ObserveAdmissionController records admission related metrics for a built-in admission controller, identified by it's plugin handler name. +func (m *AdmissionMetrics) ObserveAdmissionController(elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string) { + m.controller.observe(elapsed, append(extraLabels, stepType, string(attr.GetOperation()), strconv.FormatBool(rejected))...) +} + +// ObserveWebhook records admission related metrics for a admission webhook. +func (m *AdmissionMetrics) ObserveWebhook(elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string) { + m.webhook.observe(elapsed, append(extraLabels, stepType, string(attr.GetOperation()), strconv.FormatBool(rejected))...) +} + +// ObserveWebhookRejection records admission related metrics for an admission webhook rejection. +func (m *AdmissionMetrics) ObserveWebhookRejection(name, stepType, operation string, errorType WebhookRejectionErrorType, rejectionCode int) { + // We truncate codes greater than 600 to keep the cardinality bounded. + // This should be rarely done by a malfunctioning webhook server. + if rejectionCode > 600 { + rejectionCode = 600 + } + m.webhookRejection.WithLabelValues(name, stepType, operation, string(errorType), strconv.Itoa(rejectionCode)).Inc() +} + +type metricSet struct { + latencies *metrics.HistogramVec + latenciesSummary *metrics.SummaryVec +} + +func newMetricSet(name string, labels []string, helpTemplate string, hasSummary bool) *metricSet { + var summary *metrics.SummaryVec + if hasSummary { + summary = metrics.NewSummaryVec( + &metrics.SummaryOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: fmt.Sprintf("%s_admission_duration_seconds_summary", name), + Help: fmt.Sprintf(helpTemplate, "latency summary in seconds"), + MaxAge: latencySummaryMaxAge, + StabilityLevel: metrics.ALPHA, + }, + labels, + ) + } + + return &metricSet{ + latencies: metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: fmt.Sprintf("%s_admission_duration_seconds", name), + Help: fmt.Sprintf(helpTemplate, "latency histogram in seconds"), + Buckets: latencyBuckets, + StabilityLevel: metrics.ALPHA, + }, + labels, + ), + + latenciesSummary: summary, + } +} + +// MustRegister registers all the prometheus metrics in the metricSet. +func (m *metricSet) mustRegister() { + legacyregistry.MustRegister(m.latencies) + if m.latenciesSummary != nil { + legacyregistry.MustRegister(m.latenciesSummary) + } +} + +// Reset resets all the prometheus metrics in the metricSet. +func (m *metricSet) reset() { + m.latencies.Reset() + if m.latenciesSummary != nil { + m.latenciesSummary.Reset() + } +} + +// Observe records an observed admission event to all metrics in the metricSet. +func (m *metricSet) observe(elapsed time.Duration, labels ...string) { + elapsedSeconds := elapsed.Seconds() + m.latencies.WithLabelValues(labels...).Observe(elapsedSeconds) + if m.latenciesSummary != nil { + m.latenciesSummary.WithLabelValues(labels...).Observe(elapsedSeconds) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go new file mode 100644 index 000000000..0fac569c4 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go @@ -0,0 +1,232 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lifecycle + +import ( + "context" + "fmt" + "io" + "time" + + "k8s.io/klog/v2" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + utilcache "k8s.io/apimachinery/pkg/util/cache" + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + corelisters "k8s.io/client-go/listers/core/v1" +) + +const ( + // PluginName indicates the name of admission plug-in + PluginName = "NamespaceLifecycle" + // how long a namespace stays in the force live lookup cache before expiration. + forceLiveLookupTTL = 30 * time.Second + // how long to wait for a missing namespace before re-checking the cache (and then doing a live lookup) + // this accomplishes two things: + // 1. It allows a watch-fed cache time to observe a namespace creation event + // 2. It allows time for a namespace creation to distribute to members of a storage cluster, + // so the live lookup has a better chance of succeeding even if it isn't performed against the leader. + missingNamespaceWait = 50 * time.Millisecond +) + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return NewLifecycle(sets.NewString(metav1.NamespaceDefault, metav1.NamespaceSystem, metav1.NamespacePublic)) + }) +} + +// Lifecycle is an implementation of admission.Interface. +// It enforces life-cycle constraints around a Namespace depending on its Phase +type Lifecycle struct { + *admission.Handler + client kubernetes.Interface + immortalNamespaces sets.String + namespaceLister corelisters.NamespaceLister + // forceLiveLookupCache holds a list of entries for namespaces that we have a strong reason to believe are stale in our local cache. + // if a namespace is in this cache, then we will ignore our local state and always fetch latest from api server. + forceLiveLookupCache *utilcache.LRUExpireCache +} + +var _ = initializer.WantsExternalKubeInformerFactory(&Lifecycle{}) +var _ = initializer.WantsExternalKubeClientSet(&Lifecycle{}) + +// Admit makes an admission decision based on the request attributes +func (l *Lifecycle) Admit(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + // prevent deletion of immortal namespaces + if a.GetOperation() == admission.Delete && a.GetKind().GroupKind() == v1.SchemeGroupVersion.WithKind("Namespace").GroupKind() && l.immortalNamespaces.Has(a.GetName()) { + return errors.NewForbidden(a.GetResource().GroupResource(), a.GetName(), fmt.Errorf("this namespace may not be deleted")) + } + + // always allow non-namespaced resources + if len(a.GetNamespace()) == 0 && a.GetKind().GroupKind() != v1.SchemeGroupVersion.WithKind("Namespace").GroupKind() { + return nil + } + + if a.GetKind().GroupKind() == v1.SchemeGroupVersion.WithKind("Namespace").GroupKind() { + // if a namespace is deleted, we want to prevent all further creates into it + // while it is undergoing termination. to reduce incidences where the cache + // is slow to update, we add the namespace into a force live lookup list to ensure + // we are not looking at stale state. + if a.GetOperation() == admission.Delete { + l.forceLiveLookupCache.Add(a.GetName(), true, forceLiveLookupTTL) + } + // allow all operations to namespaces + return nil + } + + // always allow deletion of other resources + if a.GetOperation() == admission.Delete { + return nil + } + + // always allow access review checks. Returning status about the namespace would be leaking information + if isAccessReview(a) { + return nil + } + + // we need to wait for our caches to warm + if !l.WaitForReady() { + return admission.NewForbidden(a, fmt.Errorf("not yet ready to handle request")) + } + + var ( + exists bool + err error + ) + + namespace, err := l.namespaceLister.Get(a.GetNamespace()) + if err != nil { + if !errors.IsNotFound(err) { + return errors.NewInternalError(err) + } + } else { + exists = true + } + + if !exists && a.GetOperation() == admission.Create { + // give the cache time to observe the namespace before rejecting a create. + // this helps when creating a namespace and immediately creating objects within it. + time.Sleep(missingNamespaceWait) + namespace, err = l.namespaceLister.Get(a.GetNamespace()) + switch { + case errors.IsNotFound(err): + // no-op + case err != nil: + return errors.NewInternalError(err) + default: + exists = true + } + if exists { + klog.V(4).Infof("found %s in cache after waiting", a.GetNamespace()) + } + } + + // forceLiveLookup if true will skip looking at local cache state and instead always make a live call to server. + forceLiveLookup := false + if _, ok := l.forceLiveLookupCache.Get(a.GetNamespace()); ok { + // we think the namespace was marked for deletion, but our current local cache says otherwise, we will force a live lookup. + forceLiveLookup = exists && namespace.Status.Phase == v1.NamespaceActive + } + + // refuse to operate on non-existent namespaces + if !exists || forceLiveLookup { + // as a last resort, make a call directly to storage + namespace, err = l.client.CoreV1().Namespaces().Get(context.TODO(), a.GetNamespace(), metav1.GetOptions{}) + switch { + case errors.IsNotFound(err): + return err + case err != nil: + return errors.NewInternalError(err) + } + klog.V(4).Infof("found %s via storage lookup", a.GetNamespace()) + } + + // ensure that we're not trying to create objects in terminating namespaces + if a.GetOperation() == admission.Create { + if namespace.Status.Phase != v1.NamespaceTerminating { + return nil + } + + err := admission.NewForbidden(a, fmt.Errorf("unable to create new content in namespace %s because it is being terminated", a.GetNamespace())) + if apierr, ok := err.(*errors.StatusError); ok { + apierr.ErrStatus.Details.Causes = append(apierr.ErrStatus.Details.Causes, metav1.StatusCause{ + Type: v1.NamespaceTerminatingCause, + Message: fmt.Sprintf("namespace %s is being terminated", a.GetNamespace()), + Field: "metadata.namespace", + }) + } + return err + } + + return nil +} + +// NewLifecycle creates a new namespace Lifecycle admission control handler +func NewLifecycle(immortalNamespaces sets.String) (*Lifecycle, error) { + return newLifecycleWithClock(immortalNamespaces, clock.RealClock{}) +} + +func newLifecycleWithClock(immortalNamespaces sets.String, clock utilcache.Clock) (*Lifecycle, error) { + forceLiveLookupCache := utilcache.NewLRUExpireCacheWithClock(100, clock) + return &Lifecycle{ + Handler: admission.NewHandler(admission.Create, admission.Update, admission.Delete), + immortalNamespaces: immortalNamespaces, + forceLiveLookupCache: forceLiveLookupCache, + }, nil +} + +// SetExternalKubeInformerFactory implements the WantsExternalKubeInformerFactory interface. +func (l *Lifecycle) SetExternalKubeInformerFactory(f informers.SharedInformerFactory) { + namespaceInformer := f.Core().V1().Namespaces() + l.namespaceLister = namespaceInformer.Lister() + l.SetReadyFunc(namespaceInformer.Informer().HasSynced) +} + +// SetExternalKubeClientSet implements the WantsExternalKubeClientSet interface. +func (l *Lifecycle) SetExternalKubeClientSet(client kubernetes.Interface) { + l.client = client +} + +// ValidateInitialization implements the InitializationValidator interface. +func (l *Lifecycle) ValidateInitialization() error { + if l.namespaceLister == nil { + return fmt.Errorf("missing namespaceLister") + } + if l.client == nil { + return fmt.Errorf("missing client") + } + return nil +} + +// accessReviewResources are resources which give a view into permissions in a namespace. Users must be allowed to create these +// resources because returning "not found" errors allows someone to search for the "people I'm going to fire in 2017" namespace. +var accessReviewResources = map[schema.GroupResource]bool{ + {Group: "authorization.k8s.io", Resource: "localsubjectaccessreviews"}: true, +} + +func isAccessReview(a admission.Attributes) bool { + return accessReviewResources[a.GetResource().GroupResource()] +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go new file mode 100644 index 000000000..0cbc21c82 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go @@ -0,0 +1,297 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "sync" + + "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + webhookutil "k8s.io/apiserver/pkg/util/webhook" + "k8s.io/client-go/rest" +) + +// WebhookAccessor provides a common interface to both mutating and validating webhook types. +type WebhookAccessor interface { + // GetUID gets a string that uniquely identifies the webhook. + GetUID() string + + // GetConfigurationName gets the name of the webhook configuration that owns this webhook. + GetConfigurationName() string + + // GetRESTClient gets the webhook client + GetRESTClient(clientManager *webhookutil.ClientManager) (*rest.RESTClient, error) + // GetParsedNamespaceSelector gets the webhook NamespaceSelector field. + GetParsedNamespaceSelector() (labels.Selector, error) + // GetParsedObjectSelector gets the webhook ObjectSelector field. + GetParsedObjectSelector() (labels.Selector, error) + + // GetName gets the webhook Name field. Note that the name is scoped to the webhook + // configuration and does not provide a globally unique identity, if a unique identity is + // needed, use GetUID. + GetName() string + // GetClientConfig gets the webhook ClientConfig field. + GetClientConfig() v1.WebhookClientConfig + // GetRules gets the webhook Rules field. + GetRules() []v1.RuleWithOperations + // GetFailurePolicy gets the webhook FailurePolicy field. + GetFailurePolicy() *v1.FailurePolicyType + // GetMatchPolicy gets the webhook MatchPolicy field. + GetMatchPolicy() *v1.MatchPolicyType + // GetNamespaceSelector gets the webhook NamespaceSelector field. + GetNamespaceSelector() *metav1.LabelSelector + // GetObjectSelector gets the webhook ObjectSelector field. + GetObjectSelector() *metav1.LabelSelector + // GetSideEffects gets the webhook SideEffects field. + GetSideEffects() *v1.SideEffectClass + // GetTimeoutSeconds gets the webhook TimeoutSeconds field. + GetTimeoutSeconds() *int32 + // GetAdmissionReviewVersions gets the webhook AdmissionReviewVersions field. + GetAdmissionReviewVersions() []string + + // GetMutatingWebhook if the accessor contains a MutatingWebhook, returns it and true, else returns false. + GetMutatingWebhook() (*v1.MutatingWebhook, bool) + // GetValidatingWebhook if the accessor contains a ValidatingWebhook, returns it and true, else returns false. + GetValidatingWebhook() (*v1.ValidatingWebhook, bool) +} + +// NewMutatingWebhookAccessor creates an accessor for a MutatingWebhook. +func NewMutatingWebhookAccessor(uid, configurationName string, h *v1.MutatingWebhook) WebhookAccessor { + return &mutatingWebhookAccessor{uid: uid, configurationName: configurationName, MutatingWebhook: h} +} + +type mutatingWebhookAccessor struct { + *v1.MutatingWebhook + uid string + configurationName string + + initObjectSelector sync.Once + objectSelector labels.Selector + objectSelectorErr error + + initNamespaceSelector sync.Once + namespaceSelector labels.Selector + namespaceSelectorErr error + + initClient sync.Once + client *rest.RESTClient + clientErr error +} + +func (m *mutatingWebhookAccessor) GetUID() string { + return m.uid +} + +func (m *mutatingWebhookAccessor) GetConfigurationName() string { + return m.configurationName +} + +func (m *mutatingWebhookAccessor) GetRESTClient(clientManager *webhookutil.ClientManager) (*rest.RESTClient, error) { + m.initClient.Do(func() { + m.client, m.clientErr = clientManager.HookClient(hookClientConfigForWebhook(m)) + }) + return m.client, m.clientErr +} + +func (m *mutatingWebhookAccessor) GetParsedNamespaceSelector() (labels.Selector, error) { + m.initNamespaceSelector.Do(func() { + m.namespaceSelector, m.namespaceSelectorErr = metav1.LabelSelectorAsSelector(m.NamespaceSelector) + }) + return m.namespaceSelector, m.namespaceSelectorErr +} + +func (m *mutatingWebhookAccessor) GetParsedObjectSelector() (labels.Selector, error) { + m.initObjectSelector.Do(func() { + m.objectSelector, m.objectSelectorErr = metav1.LabelSelectorAsSelector(m.ObjectSelector) + }) + return m.objectSelector, m.objectSelectorErr +} + +func (m *mutatingWebhookAccessor) GetName() string { + return m.Name +} + +func (m *mutatingWebhookAccessor) GetClientConfig() v1.WebhookClientConfig { + return m.ClientConfig +} + +func (m *mutatingWebhookAccessor) GetRules() []v1.RuleWithOperations { + return m.Rules +} + +func (m *mutatingWebhookAccessor) GetFailurePolicy() *v1.FailurePolicyType { + return m.FailurePolicy +} + +func (m *mutatingWebhookAccessor) GetMatchPolicy() *v1.MatchPolicyType { + return m.MatchPolicy +} + +func (m *mutatingWebhookAccessor) GetNamespaceSelector() *metav1.LabelSelector { + return m.NamespaceSelector +} + +func (m *mutatingWebhookAccessor) GetObjectSelector() *metav1.LabelSelector { + return m.ObjectSelector +} + +func (m *mutatingWebhookAccessor) GetSideEffects() *v1.SideEffectClass { + return m.SideEffects +} + +func (m *mutatingWebhookAccessor) GetTimeoutSeconds() *int32 { + return m.TimeoutSeconds +} + +func (m *mutatingWebhookAccessor) GetAdmissionReviewVersions() []string { + return m.AdmissionReviewVersions +} + +func (m *mutatingWebhookAccessor) GetMutatingWebhook() (*v1.MutatingWebhook, bool) { + return m.MutatingWebhook, true +} + +func (m *mutatingWebhookAccessor) GetValidatingWebhook() (*v1.ValidatingWebhook, bool) { + return nil, false +} + +// NewValidatingWebhookAccessor creates an accessor for a ValidatingWebhook. +func NewValidatingWebhookAccessor(uid, configurationName string, h *v1.ValidatingWebhook) WebhookAccessor { + return &validatingWebhookAccessor{uid: uid, configurationName: configurationName, ValidatingWebhook: h} +} + +type validatingWebhookAccessor struct { + *v1.ValidatingWebhook + uid string + configurationName string + + initObjectSelector sync.Once + objectSelector labels.Selector + objectSelectorErr error + + initNamespaceSelector sync.Once + namespaceSelector labels.Selector + namespaceSelectorErr error + + initClient sync.Once + client *rest.RESTClient + clientErr error +} + +func (v *validatingWebhookAccessor) GetUID() string { + return v.uid +} + +func (v *validatingWebhookAccessor) GetConfigurationName() string { + return v.configurationName +} + +func (v *validatingWebhookAccessor) GetRESTClient(clientManager *webhookutil.ClientManager) (*rest.RESTClient, error) { + v.initClient.Do(func() { + v.client, v.clientErr = clientManager.HookClient(hookClientConfigForWebhook(v)) + }) + return v.client, v.clientErr +} + +func (v *validatingWebhookAccessor) GetParsedNamespaceSelector() (labels.Selector, error) { + v.initNamespaceSelector.Do(func() { + v.namespaceSelector, v.namespaceSelectorErr = metav1.LabelSelectorAsSelector(v.NamespaceSelector) + }) + return v.namespaceSelector, v.namespaceSelectorErr +} + +func (v *validatingWebhookAccessor) GetParsedObjectSelector() (labels.Selector, error) { + v.initObjectSelector.Do(func() { + v.objectSelector, v.objectSelectorErr = metav1.LabelSelectorAsSelector(v.ObjectSelector) + }) + return v.objectSelector, v.objectSelectorErr +} + +func (v *validatingWebhookAccessor) GetName() string { + return v.Name +} + +func (v *validatingWebhookAccessor) GetClientConfig() v1.WebhookClientConfig { + return v.ClientConfig +} + +func (v *validatingWebhookAccessor) GetRules() []v1.RuleWithOperations { + return v.Rules +} + +func (v *validatingWebhookAccessor) GetFailurePolicy() *v1.FailurePolicyType { + return v.FailurePolicy +} + +func (v *validatingWebhookAccessor) GetMatchPolicy() *v1.MatchPolicyType { + return v.MatchPolicy +} + +func (v *validatingWebhookAccessor) GetNamespaceSelector() *metav1.LabelSelector { + return v.NamespaceSelector +} + +func (v *validatingWebhookAccessor) GetObjectSelector() *metav1.LabelSelector { + return v.ObjectSelector +} + +func (v *validatingWebhookAccessor) GetSideEffects() *v1.SideEffectClass { + return v.SideEffects +} + +func (v *validatingWebhookAccessor) GetTimeoutSeconds() *int32 { + return v.TimeoutSeconds +} + +func (v *validatingWebhookAccessor) GetAdmissionReviewVersions() []string { + return v.AdmissionReviewVersions +} + +func (v *validatingWebhookAccessor) GetMutatingWebhook() (*v1.MutatingWebhook, bool) { + return nil, false +} + +func (v *validatingWebhookAccessor) GetValidatingWebhook() (*v1.ValidatingWebhook, bool) { + return v.ValidatingWebhook, true +} + +// hookClientConfigForWebhook construct a webhookutil.ClientConfig using a WebhookAccessor to access +// v1beta1.MutatingWebhook and v1beta1.ValidatingWebhook API objects. webhookutil.ClientConfig is used +// to create a HookClient and the purpose of the config struct is to share that with other packages +// that need to create a HookClient. +func hookClientConfigForWebhook(w WebhookAccessor) webhookutil.ClientConfig { + ret := webhookutil.ClientConfig{Name: w.GetName(), CABundle: w.GetClientConfig().CABundle} + if w.GetClientConfig().URL != nil { + ret.URL = *w.GetClientConfig().URL + } + if w.GetClientConfig().Service != nil { + ret.Service = &webhookutil.ClientConfigService{ + Name: w.GetClientConfig().Service.Name, + Namespace: w.GetClientConfig().Service.Namespace, + } + if w.GetClientConfig().Service.Port != nil { + ret.Service.Port = *w.GetClientConfig().Service.Port + } else { + ret.Service.Port = 443 + } + if w.GetClientConfig().Service.Path != nil { + ret.Service.Path = *w.GetClientConfig().Service.Path + } + } + return ret +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/doc.go new file mode 100644 index 000000000..63ab31039 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +package webhookadmission diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/register.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/register.go new file mode 100644 index 000000000..2f49b8976 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhookadmission + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// GroupName is the group name use in this package +const GroupName = "apiserver.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &WebhookAdmission{}, + ) + scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("WebhookAdmissionConfiguration"), + &WebhookAdmission{}, + ) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/types.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/types.go new file mode 100644 index 000000000..71ce47b1f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/types.go @@ -0,0 +1,29 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhookadmission + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// WebhookAdmission provides configuration for the webhook admission controller. +type WebhookAdmission struct { + metav1.TypeMeta + + // KubeConfigFile is the path to the kubeconfig file. + KubeConfigFile string +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/doc.go new file mode 100644 index 000000000..92cfed107 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission +// +k8s:defaulter-gen=TypeMeta +// +groupName=apiserver.config.k8s.io + +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/register.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/register.go new file mode 100644 index 000000000..4a9c0a689 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "apiserver.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("WebhookAdmissionConfiguration"), + &WebhookAdmission{}, + ) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/types.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/types.go new file mode 100644 index 000000000..632427d7d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/types.go @@ -0,0 +1,29 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// WebhookAdmission provides configuration for the webhook admission controller. +type WebhookAdmission struct { + metav1.TypeMeta `json:",inline"` + + // KubeConfigFile is the path to the kubeconfig file. + KubeConfigFile string `json:"kubeConfigFile"` +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.conversion.go new file mode 100644 index 000000000..65eb414fc --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.conversion.go @@ -0,0 +1,67 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + webhookadmission "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*WebhookAdmission)(nil), (*webhookadmission.WebhookAdmission)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_WebhookAdmission_To_webhookadmission_WebhookAdmission(a.(*WebhookAdmission), b.(*webhookadmission.WebhookAdmission), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*webhookadmission.WebhookAdmission)(nil), (*WebhookAdmission)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_webhookadmission_WebhookAdmission_To_v1_WebhookAdmission(a.(*webhookadmission.WebhookAdmission), b.(*WebhookAdmission), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_WebhookAdmission_To_webhookadmission_WebhookAdmission(in *WebhookAdmission, out *webhookadmission.WebhookAdmission, s conversion.Scope) error { + out.KubeConfigFile = in.KubeConfigFile + return nil +} + +// Convert_v1_WebhookAdmission_To_webhookadmission_WebhookAdmission is an autogenerated conversion function. +func Convert_v1_WebhookAdmission_To_webhookadmission_WebhookAdmission(in *WebhookAdmission, out *webhookadmission.WebhookAdmission, s conversion.Scope) error { + return autoConvert_v1_WebhookAdmission_To_webhookadmission_WebhookAdmission(in, out, s) +} + +func autoConvert_webhookadmission_WebhookAdmission_To_v1_WebhookAdmission(in *webhookadmission.WebhookAdmission, out *WebhookAdmission, s conversion.Scope) error { + out.KubeConfigFile = in.KubeConfigFile + return nil +} + +// Convert_webhookadmission_WebhookAdmission_To_v1_WebhookAdmission is an autogenerated conversion function. +func Convert_webhookadmission_WebhookAdmission_To_v1_WebhookAdmission(in *webhookadmission.WebhookAdmission, out *WebhookAdmission, s conversion.Scope) error { + return autoConvert_webhookadmission_WebhookAdmission_To_v1_WebhookAdmission(in, out, s) +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..99fc6a6fa --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.deepcopy.go @@ -0,0 +1,50 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookAdmission) DeepCopyInto(out *WebhookAdmission) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookAdmission. +func (in *WebhookAdmission) DeepCopy() *WebhookAdmission { + if in == nil { + return nil + } + out := new(WebhookAdmission) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WebhookAdmission) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.defaults.go new file mode 100644 index 000000000..cce2e603a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/doc.go new file mode 100644 index 000000000..703f467f9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission +// +k8s:defaulter-gen=TypeMeta +// +groupName=apiserver.config.k8s.io + +// Package v1alpha1 is the v1alpha1 version of the API. +package v1alpha1 diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/register.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/register.go new file mode 100644 index 000000000..56489f780 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "apiserver.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &WebhookAdmission{}, + ) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/types.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/types.go new file mode 100644 index 000000000..a49a6a813 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/types.go @@ -0,0 +1,29 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// WebhookAdmission provides configuration for the webhook admission controller. +type WebhookAdmission struct { + metav1.TypeMeta `json:",inline"` + + // KubeConfigFile is the path to the kubeconfig file. + KubeConfigFile string `json:"kubeConfigFile"` +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.conversion.go new file mode 100644 index 000000000..eadb147c4 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,67 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + webhookadmission "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*WebhookAdmission)(nil), (*webhookadmission.WebhookAdmission)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_WebhookAdmission_To_webhookadmission_WebhookAdmission(a.(*WebhookAdmission), b.(*webhookadmission.WebhookAdmission), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*webhookadmission.WebhookAdmission)(nil), (*WebhookAdmission)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_webhookadmission_WebhookAdmission_To_v1alpha1_WebhookAdmission(a.(*webhookadmission.WebhookAdmission), b.(*WebhookAdmission), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha1_WebhookAdmission_To_webhookadmission_WebhookAdmission(in *WebhookAdmission, out *webhookadmission.WebhookAdmission, s conversion.Scope) error { + out.KubeConfigFile = in.KubeConfigFile + return nil +} + +// Convert_v1alpha1_WebhookAdmission_To_webhookadmission_WebhookAdmission is an autogenerated conversion function. +func Convert_v1alpha1_WebhookAdmission_To_webhookadmission_WebhookAdmission(in *WebhookAdmission, out *webhookadmission.WebhookAdmission, s conversion.Scope) error { + return autoConvert_v1alpha1_WebhookAdmission_To_webhookadmission_WebhookAdmission(in, out, s) +} + +func autoConvert_webhookadmission_WebhookAdmission_To_v1alpha1_WebhookAdmission(in *webhookadmission.WebhookAdmission, out *WebhookAdmission, s conversion.Scope) error { + out.KubeConfigFile = in.KubeConfigFile + return nil +} + +// Convert_webhookadmission_WebhookAdmission_To_v1alpha1_WebhookAdmission is an autogenerated conversion function. +func Convert_webhookadmission_WebhookAdmission_To_v1alpha1_WebhookAdmission(in *webhookadmission.WebhookAdmission, out *WebhookAdmission, s conversion.Scope) error { + return autoConvert_webhookadmission_WebhookAdmission_To_v1alpha1_WebhookAdmission(in, out, s) +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..a59d62d6c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,50 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookAdmission) DeepCopyInto(out *WebhookAdmission) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookAdmission. +func (in *WebhookAdmission) DeepCopy() *WebhookAdmission { + if in == nil { + return nil + } + out := new(WebhookAdmission) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WebhookAdmission) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.defaults.go new file mode 100644 index 000000000..dd621a3ac --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/zz_generated.deepcopy.go new file mode 100644 index 000000000..90b7e0ae6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/zz_generated.deepcopy.go @@ -0,0 +1,50 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package webhookadmission + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookAdmission) DeepCopyInto(out *WebhookAdmission) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookAdmission. +func (in *WebhookAdmission) DeepCopy() *WebhookAdmission { + if in == nil { + return nil + } + out := new(WebhookAdmission) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WebhookAdmission) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go new file mode 100644 index 000000000..78f5312a4 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go @@ -0,0 +1,71 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "io" + "io/ioutil" + "path" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission" + "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1" + "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1" +) + +var ( + scheme = runtime.NewScheme() + codecs = serializer.NewCodecFactory(scheme) +) + +func init() { + utilruntime.Must(webhookadmission.AddToScheme(scheme)) + utilruntime.Must(v1.AddToScheme(scheme)) + utilruntime.Must(v1alpha1.AddToScheme(scheme)) +} + +// LoadConfig extract the KubeConfigFile from configFile +func LoadConfig(configFile io.Reader) (string, error) { + var kubeconfigFile string + if configFile != nil { + // we have a config so parse it. + data, err := ioutil.ReadAll(configFile) + if err != nil { + return "", err + } + decoder := codecs.UniversalDecoder() + decodedObj, err := runtime.Decode(decoder, data) + if err != nil { + return "", err + } + config, ok := decodedObj.(*webhookadmission.WebhookAdmission) + if !ok { + return "", fmt.Errorf("unexpected type: %T", decodedObj) + } + + if !path.IsAbs(config.KubeConfigFile) { + return "", field.Invalid(field.NewPath("kubeConfigFile"), config.KubeConfigFile, "must be an absolute file path") + } + + kubeconfigFile = config.KubeConfigFile + } + return kubeconfigFile, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/doc.go new file mode 100644 index 000000000..6e86a1b5f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package errors contains utilities for admission webhook specific errors +package errors // import "k8s.io/apiserver/pkg/admission/plugin/webhook/errors" diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/statuserror.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/statuserror.go new file mode 100644 index 000000000..00bbf54d2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/statuserror.go @@ -0,0 +1,63 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "fmt" + "net/http" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ToStatusErr returns a StatusError with information about the webhook plugin +func ToStatusErr(webhookName string, result *metav1.Status) *apierrors.StatusError { + deniedBy := fmt.Sprintf("admission webhook %q denied the request", webhookName) + const noExp = "without explanation" + + if result == nil { + result = &metav1.Status{Status: metav1.StatusFailure} + } + + // Make sure we don't return < 400 status codes along with a rejection + if result.Code < http.StatusBadRequest { + result.Code = http.StatusBadRequest + } + // Make sure we don't return "" or "Success" status along with a rejection + if result.Status == "" || result.Status == metav1.StatusSuccess { + result.Status = metav1.StatusFailure + } + + switch { + case len(result.Message) > 0: + result.Message = fmt.Sprintf("%s: %s", deniedBy, result.Message) + case len(result.Reason) > 0: + result.Message = fmt.Sprintf("%s: %s", deniedBy, result.Reason) + default: + result.Message = fmt.Sprintf("%s %s", deniedBy, noExp) + } + + return &apierrors.StatusError{ + ErrStatus: *result, + } +} + +// NewDryRunUnsupportedErr returns a StatusError with information about the webhook plugin +func NewDryRunUnsupportedErr(webhookName string) *apierrors.StatusError { + reason := fmt.Sprintf("admission webhook %q does not support dry run", webhookName) + return apierrors.NewBadRequest(reason) +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion.go new file mode 100644 index 000000000..f0e0ed79c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion.go @@ -0,0 +1,112 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generic + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" +) + +// ConvertToGVK converts object to the desired gvk. +func ConvertToGVK(obj runtime.Object, gvk schema.GroupVersionKind, o admission.ObjectInterfaces) (runtime.Object, error) { + // Unlike other resources, custom resources do not have internal version, so + // if obj is a custom resource, it should not need conversion. + if obj.GetObjectKind().GroupVersionKind() == gvk { + return obj, nil + } + out, err := o.GetObjectCreater().New(gvk) + if err != nil { + return nil, err + } + err = o.GetObjectConvertor().Convert(obj, out, nil) + if err != nil { + return nil, err + } + // Explicitly set the GVK + out.GetObjectKind().SetGroupVersionKind(gvk) + return out, nil +} + +// NewVersionedAttributes returns versioned attributes with the old and new object (if non-nil) converted to the requested kind +func NewVersionedAttributes(attr admission.Attributes, gvk schema.GroupVersionKind, o admission.ObjectInterfaces) (*VersionedAttributes, error) { + // convert the old and new objects to the requested version + versionedAttr := &VersionedAttributes{ + Attributes: attr, + VersionedKind: gvk, + } + if oldObj := attr.GetOldObject(); oldObj != nil { + out, err := ConvertToGVK(oldObj, gvk, o) + if err != nil { + return nil, err + } + versionedAttr.VersionedOldObject = out + } + if obj := attr.GetObject(); obj != nil { + out, err := ConvertToGVK(obj, gvk, o) + if err != nil { + return nil, err + } + versionedAttr.VersionedObject = out + } + return versionedAttr, nil +} + +// ConvertVersionedAttributes converts VersionedObject and VersionedOldObject to the specified kind, if needed. +// If attr.VersionedKind already matches the requested kind, no conversion is performed. +// If conversion is required: +// * attr.VersionedObject is used as the source for the new object if Dirty=true (and is round-tripped through attr.Attributes.Object, clearing Dirty in the process) +// * attr.Attributes.Object is used as the source for the new object if Dirty=false +// * attr.Attributes.OldObject is used as the source for the old object +func ConvertVersionedAttributes(attr *VersionedAttributes, gvk schema.GroupVersionKind, o admission.ObjectInterfaces) error { + // we already have the desired kind, we're done + if attr.VersionedKind == gvk { + return nil + } + + // convert the original old object to the desired GVK + if oldObj := attr.Attributes.GetOldObject(); oldObj != nil { + out, err := ConvertToGVK(oldObj, gvk, o) + if err != nil { + return err + } + attr.VersionedOldObject = out + } + + if attr.VersionedObject != nil { + // convert the existing versioned object to internal + if attr.Dirty { + err := o.GetObjectConvertor().Convert(attr.VersionedObject, attr.Attributes.GetObject(), nil) + if err != nil { + return err + } + } + + // and back to external + out, err := ConvertToGVK(attr.Attributes.GetObject(), gvk, o) + if err != nil { + return err + } + attr.VersionedObject = out + } + + // Remember we converted to this version + attr.VersionedKind = gvk + attr.Dirty = false + + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/interfaces.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/interfaces.go new file mode 100644 index 000000000..4381691ef --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/interfaces.go @@ -0,0 +1,75 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generic + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/plugin/webhook" +) + +// Source can list dynamic webhook plugins. +type Source interface { + Webhooks() []webhook.WebhookAccessor + HasSynced() bool +} + +// VersionedAttributes is a wrapper around the original admission attributes, adding versioned +// variants of the object and old object. +type VersionedAttributes struct { + // Attributes holds the original admission attributes + admission.Attributes + // VersionedOldObject holds Attributes.OldObject (if non-nil), converted to VersionedKind. + // It must never be mutated. + VersionedOldObject runtime.Object + // VersionedObject holds Attributes.Object (if non-nil), converted to VersionedKind. + // If mutated, Dirty must be set to true by the mutator. + VersionedObject runtime.Object + // VersionedKind holds the fully qualified kind + VersionedKind schema.GroupVersionKind + // Dirty indicates VersionedObject has been modified since being converted from Attributes.Object + Dirty bool +} + +// GetObject overrides the Attributes.GetObject() +func (v *VersionedAttributes) GetObject() runtime.Object { + if v.VersionedObject != nil { + return v.VersionedObject + } + return v.Attributes.GetObject() +} + +// WebhookInvocation describes how to call a webhook, including the resource and subresource the webhook registered for, +// and the kind that should be sent to the webhook. +type WebhookInvocation struct { + Webhook webhook.WebhookAccessor + Resource schema.GroupVersionResource + Subresource string + Kind schema.GroupVersionKind +} + +// Dispatcher dispatches webhook call to a list of webhooks with admission attributes as argument. +type Dispatcher interface { + // Dispatch a request to the webhooks. Dispatcher may choose not to + // call a hook, either because the rules of the hook does not match, or + // the namespaceSelector or the objectSelector of the hook does not + // match. A non-nil error means the request is rejected. + Dispatch(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces, hooks []webhook.WebhookAccessor) error +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go new file mode 100644 index 000000000..c04225e94 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go @@ -0,0 +1,230 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generic + +import ( + "context" + "fmt" + "io" + + admissionv1 "k8s.io/api/admission/v1" + admissionv1beta1 "k8s.io/api/admission/v1beta1" + "k8s.io/api/admissionregistration/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + genericadmissioninit "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/admission/plugin/webhook" + "k8s.io/apiserver/pkg/admission/plugin/webhook/config" + "k8s.io/apiserver/pkg/admission/plugin/webhook/namespace" + "k8s.io/apiserver/pkg/admission/plugin/webhook/object" + "k8s.io/apiserver/pkg/admission/plugin/webhook/rules" + webhookutil "k8s.io/apiserver/pkg/util/webhook" + "k8s.io/client-go/informers" + clientset "k8s.io/client-go/kubernetes" +) + +// Webhook is an abstract admission plugin with all the infrastructure to define Admit or Validate on-top. +type Webhook struct { + *admission.Handler + + sourceFactory sourceFactory + + hookSource Source + clientManager *webhookutil.ClientManager + namespaceMatcher *namespace.Matcher + objectMatcher *object.Matcher + dispatcher Dispatcher +} + +var ( + _ genericadmissioninit.WantsExternalKubeClientSet = &Webhook{} + _ admission.Interface = &Webhook{} +) + +type sourceFactory func(f informers.SharedInformerFactory) Source +type dispatcherFactory func(cm *webhookutil.ClientManager) Dispatcher + +// NewWebhook creates a new generic admission webhook. +func NewWebhook(handler *admission.Handler, configFile io.Reader, sourceFactory sourceFactory, dispatcherFactory dispatcherFactory) (*Webhook, error) { + kubeconfigFile, err := config.LoadConfig(configFile) + if err != nil { + return nil, err + } + + cm, err := webhookutil.NewClientManager( + []schema.GroupVersion{ + admissionv1beta1.SchemeGroupVersion, + admissionv1.SchemeGroupVersion, + }, + admissionv1beta1.AddToScheme, + admissionv1.AddToScheme, + ) + if err != nil { + return nil, err + } + authInfoResolver, err := webhookutil.NewDefaultAuthenticationInfoResolver(kubeconfigFile) + if err != nil { + return nil, err + } + // Set defaults which may be overridden later. + cm.SetAuthenticationInfoResolver(authInfoResolver) + cm.SetServiceResolver(webhookutil.NewDefaultServiceResolver()) + + return &Webhook{ + Handler: handler, + sourceFactory: sourceFactory, + clientManager: &cm, + namespaceMatcher: &namespace.Matcher{}, + objectMatcher: &object.Matcher{}, + dispatcher: dispatcherFactory(&cm), + }, nil +} + +// SetAuthenticationInfoResolverWrapper sets the +// AuthenticationInfoResolverWrapper. +// TODO find a better way wire this, but keep this pull small for now. +func (a *Webhook) SetAuthenticationInfoResolverWrapper(wrapper webhookutil.AuthenticationInfoResolverWrapper) { + a.clientManager.SetAuthenticationInfoResolverWrapper(wrapper) +} + +// SetServiceResolver sets a service resolver for the webhook admission plugin. +// Passing a nil resolver does not have an effect, instead a default one will be used. +func (a *Webhook) SetServiceResolver(sr webhookutil.ServiceResolver) { + a.clientManager.SetServiceResolver(sr) +} + +// SetExternalKubeClientSet implements the WantsExternalKubeInformerFactory interface. +// It sets external ClientSet for admission plugins that need it +func (a *Webhook) SetExternalKubeClientSet(client clientset.Interface) { + a.namespaceMatcher.Client = client +} + +// SetExternalKubeInformerFactory implements the WantsExternalKubeInformerFactory interface. +func (a *Webhook) SetExternalKubeInformerFactory(f informers.SharedInformerFactory) { + namespaceInformer := f.Core().V1().Namespaces() + a.namespaceMatcher.NamespaceLister = namespaceInformer.Lister() + a.hookSource = a.sourceFactory(f) + a.SetReadyFunc(func() bool { + return namespaceInformer.Informer().HasSynced() && a.hookSource.HasSynced() + }) +} + +// ValidateInitialization implements the InitializationValidator interface. +func (a *Webhook) ValidateInitialization() error { + if a.hookSource == nil { + return fmt.Errorf("kubernetes client is not properly setup") + } + if err := a.namespaceMatcher.Validate(); err != nil { + return fmt.Errorf("namespaceMatcher is not properly setup: %v", err) + } + if err := a.clientManager.Validate(); err != nil { + return fmt.Errorf("clientManager is not properly setup: %v", err) + } + return nil +} + +// ShouldCallHook returns invocation details if the webhook should be called, nil if the webhook should not be called, +// or an error if an error was encountered during evaluation. +func (a *Webhook) ShouldCallHook(h webhook.WebhookAccessor, attr admission.Attributes, o admission.ObjectInterfaces) (*WebhookInvocation, *apierrors.StatusError) { + matches, matchNsErr := a.namespaceMatcher.MatchNamespaceSelector(h, attr) + // Should not return an error here for webhooks which do not apply to the request, even if err is an unexpected scenario. + if !matches && matchNsErr == nil { + return nil, nil + } + + // Should not return an error here for webhooks which do not apply to the request, even if err is an unexpected scenario. + matches, matchObjErr := a.objectMatcher.MatchObjectSelector(h, attr) + if !matches && matchObjErr == nil { + return nil, nil + } + + var invocation *WebhookInvocation + for _, r := range h.GetRules() { + m := rules.Matcher{Rule: r, Attr: attr} + if m.Matches() { + invocation = &WebhookInvocation{ + Webhook: h, + Resource: attr.GetResource(), + Subresource: attr.GetSubresource(), + Kind: attr.GetKind(), + } + break + } + } + if invocation == nil && h.GetMatchPolicy() != nil && *h.GetMatchPolicy() == v1.Equivalent { + attrWithOverride := &attrWithResourceOverride{Attributes: attr} + equivalents := o.GetEquivalentResourceMapper().EquivalentResourcesFor(attr.GetResource(), attr.GetSubresource()) + // honor earlier rules first + OuterLoop: + for _, r := range h.GetRules() { + // see if the rule matches any of the equivalent resources + for _, equivalent := range equivalents { + if equivalent == attr.GetResource() { + // exclude attr.GetResource(), which we already checked + continue + } + attrWithOverride.resource = equivalent + m := rules.Matcher{Rule: r, Attr: attrWithOverride} + if m.Matches() { + kind := o.GetEquivalentResourceMapper().KindFor(equivalent, attr.GetSubresource()) + if kind.Empty() { + return nil, apierrors.NewInternalError(fmt.Errorf("unable to convert to %v: unknown kind", equivalent)) + } + invocation = &WebhookInvocation{ + Webhook: h, + Resource: equivalent, + Subresource: attr.GetSubresource(), + Kind: kind, + } + break OuterLoop + } + } + } + } + + if invocation == nil { + return nil, nil + } + if matchNsErr != nil { + return nil, matchNsErr + } + if matchObjErr != nil { + return nil, matchObjErr + } + + return invocation, nil +} + +type attrWithResourceOverride struct { + admission.Attributes + resource schema.GroupVersionResource +} + +func (a *attrWithResourceOverride) GetResource() schema.GroupVersionResource { return a.resource } + +// Dispatch is called by the downstream Validate or Admit methods. +func (a *Webhook) Dispatch(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces) error { + if rules.IsWebhookConfigurationResource(attr) { + return nil + } + if !a.WaitForReady() { + return admission.NewForbidden(attr, fmt.Errorf("not yet ready to handle request")) + } + hooks := a.hookSource.Webhooks() + return a.dispatcher.Dispatch(ctx, attr, o, hooks) +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go new file mode 100644 index 000000000..4cf9f3711 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go @@ -0,0 +1,430 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package mutating delegates admission checks to dynamically configured +// mutating webhooks. +package mutating + +import ( + "context" + "fmt" + "time" + + jsonpatch "github.com/evanphx/json-patch" + + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/klog/v2" + + admissionv1 "k8s.io/api/admission/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer/json" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission" + admissionmetrics "k8s.io/apiserver/pkg/admission/metrics" + "k8s.io/apiserver/pkg/admission/plugin/webhook" + webhookerrors "k8s.io/apiserver/pkg/admission/plugin/webhook/errors" + "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" + webhookrequest "k8s.io/apiserver/pkg/admission/plugin/webhook/request" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + webhookutil "k8s.io/apiserver/pkg/util/webhook" + "k8s.io/apiserver/pkg/warning" + utiltrace "k8s.io/utils/trace" +) + +const ( + // PatchAuditAnnotationPrefix is a prefix for persisting webhook patch in audit annotation. + // Audit handler decides whether annotation with this prefix should be logged based on audit level. + // Since mutating webhook patches the request body, audit level must be greater or equal to Request + // for the annotation to be logged + PatchAuditAnnotationPrefix = "patch.webhook.admission.k8s.io/" + // MutationAuditAnnotationPrefix is a prefix for presisting webhook mutation existence in audit annotation. + MutationAuditAnnotationPrefix = "mutation.webhook.admission.k8s.io/" +) + +var encodingjson = json.CaseSensitiveJSONIterator() + +type mutatingDispatcher struct { + cm *webhookutil.ClientManager + plugin *Plugin +} + +func newMutatingDispatcher(p *Plugin) func(cm *webhookutil.ClientManager) generic.Dispatcher { + return func(cm *webhookutil.ClientManager) generic.Dispatcher { + return &mutatingDispatcher{cm, p} + } +} + +var _ generic.Dispatcher = &mutatingDispatcher{} + +func (a *mutatingDispatcher) Dispatch(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces, hooks []webhook.WebhookAccessor) error { + reinvokeCtx := attr.GetReinvocationContext() + var webhookReinvokeCtx *webhookReinvokeContext + if v := reinvokeCtx.Value(PluginName); v != nil { + webhookReinvokeCtx = v.(*webhookReinvokeContext) + } else { + webhookReinvokeCtx = &webhookReinvokeContext{} + reinvokeCtx.SetValue(PluginName, webhookReinvokeCtx) + } + + if reinvokeCtx.IsReinvoke() && webhookReinvokeCtx.IsOutputChangedSinceLastWebhookInvocation(attr.GetObject()) { + // If the object has changed, we know the in-tree plugin re-invocations have mutated the object, + // and we need to reinvoke all eligible webhooks. + webhookReinvokeCtx.RequireReinvokingPreviouslyInvokedPlugins() + } + defer func() { + webhookReinvokeCtx.SetLastWebhookInvocationOutput(attr.GetObject()) + }() + var versionedAttr *generic.VersionedAttributes + for i, hook := range hooks { + attrForCheck := attr + if versionedAttr != nil { + attrForCheck = versionedAttr + } + invocation, statusErr := a.plugin.ShouldCallHook(hook, attrForCheck, o) + if statusErr != nil { + return statusErr + } + if invocation == nil { + continue + } + hook, ok := invocation.Webhook.GetMutatingWebhook() + if !ok { + return fmt.Errorf("mutating webhook dispatch requires v1.MutatingWebhook, but got %T", hook) + } + // This means that during reinvocation, a webhook will not be + // called for the first time. For example, if the webhook is + // skipped in the first round because of mismatching labels, + // even if the labels become matching, the webhook does not + // get called during reinvocation. + if reinvokeCtx.IsReinvoke() && !webhookReinvokeCtx.ShouldReinvokeWebhook(invocation.Webhook.GetUID()) { + continue + } + + if versionedAttr == nil { + // First webhook, create versioned attributes + var err error + if versionedAttr, err = generic.NewVersionedAttributes(attr, invocation.Kind, o); err != nil { + return apierrors.NewInternalError(err) + } + } else { + // Subsequent webhook, convert existing versioned attributes to this webhook's version + if err := generic.ConvertVersionedAttributes(versionedAttr, invocation.Kind, o); err != nil { + return apierrors.NewInternalError(err) + } + } + + t := time.Now() + round := 0 + if reinvokeCtx.IsReinvoke() { + round = 1 + } + changed, err := a.callAttrMutatingHook(ctx, hook, invocation, versionedAttr, o, round, i) + ignoreClientCallFailures := hook.FailurePolicy != nil && *hook.FailurePolicy == admissionregistrationv1.Ignore + rejected := false + if err != nil { + switch err := err.(type) { + case *webhookutil.ErrCallingWebhook: + if !ignoreClientCallFailures { + rejected = true + admissionmetrics.Metrics.ObserveWebhookRejection(hook.Name, "admit", string(versionedAttr.Attributes.GetOperation()), admissionmetrics.WebhookRejectionCallingWebhookError, 0) + } + case *webhookutil.ErrWebhookRejection: + rejected = true + admissionmetrics.Metrics.ObserveWebhookRejection(hook.Name, "admit", string(versionedAttr.Attributes.GetOperation()), admissionmetrics.WebhookRejectionNoError, int(err.Status.ErrStatus.Code)) + default: + rejected = true + admissionmetrics.Metrics.ObserveWebhookRejection(hook.Name, "admit", string(versionedAttr.Attributes.GetOperation()), admissionmetrics.WebhookRejectionAPIServerInternalError, 0) + } + } + admissionmetrics.Metrics.ObserveWebhook(time.Since(t), rejected, versionedAttr.Attributes, "admit", hook.Name) + if changed { + // Patch had changed the object. Prepare to reinvoke all previous webhooks that are eligible for re-invocation. + webhookReinvokeCtx.RequireReinvokingPreviouslyInvokedPlugins() + reinvokeCtx.SetShouldReinvoke() + } + if hook.ReinvocationPolicy != nil && *hook.ReinvocationPolicy == admissionregistrationv1.IfNeededReinvocationPolicy { + webhookReinvokeCtx.AddReinvocableWebhookToPreviouslyInvoked(invocation.Webhook.GetUID()) + } + if err == nil { + continue + } + + if callErr, ok := err.(*webhookutil.ErrCallingWebhook); ok { + if ignoreClientCallFailures { + klog.Warningf("Failed calling webhook, failing open %v: %v", hook.Name, callErr) + utilruntime.HandleError(callErr) + + select { + case <-ctx.Done(): + // parent context is canceled or timed out, no point in continuing + return apierrors.NewTimeoutError("request did not complete within requested timeout", 0) + default: + // individual webhook timed out, but parent context did not, continue + continue + } + } + klog.Warningf("Failed calling webhook, failing closed %v: %v", hook.Name, err) + return apierrors.NewInternalError(err) + } + if rejectionErr, ok := err.(*webhookutil.ErrWebhookRejection); ok { + return rejectionErr.Status + } + return err + } + + // convert versionedAttr.VersionedObject to the internal version in the underlying admission.Attributes + if versionedAttr != nil && versionedAttr.VersionedObject != nil && versionedAttr.Dirty { + return o.GetObjectConvertor().Convert(versionedAttr.VersionedObject, versionedAttr.Attributes.GetObject(), nil) + } + + return nil +} + +// note that callAttrMutatingHook updates attr + +func (a *mutatingDispatcher) callAttrMutatingHook(ctx context.Context, h *admissionregistrationv1.MutatingWebhook, invocation *generic.WebhookInvocation, attr *generic.VersionedAttributes, o admission.ObjectInterfaces, round, idx int) (bool, error) { + configurationName := invocation.Webhook.GetConfigurationName() + annotator := newWebhookAnnotator(attr, round, idx, h.Name, configurationName) + changed := false + defer func() { annotator.addMutationAnnotation(changed) }() + if attr.Attributes.IsDryRun() { + if h.SideEffects == nil { + return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("Webhook SideEffects is nil")} + } + if !(*h.SideEffects == admissionregistrationv1.SideEffectClassNone || *h.SideEffects == admissionregistrationv1.SideEffectClassNoneOnDryRun) { + return false, webhookerrors.NewDryRunUnsupportedErr(h.Name) + } + } + + uid, request, response, err := webhookrequest.CreateAdmissionObjects(attr, invocation) + if err != nil { + return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: err} + } + // Make the webhook request + client, err := invocation.Webhook.GetRESTClient(a.cm) + if err != nil { + return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: err} + } + trace := utiltrace.New("Call mutating webhook", + utiltrace.Field{"configuration", configurationName}, + utiltrace.Field{"webhook", h.Name}, + utiltrace.Field{"resource", attr.GetResource()}, + utiltrace.Field{"subresource", attr.GetSubresource()}, + utiltrace.Field{"operation", attr.GetOperation()}, + utiltrace.Field{"UID", uid}) + defer trace.LogIfLong(500 * time.Millisecond) + + // if the webhook has a specific timeout, wrap the context to apply it + if h.TimeoutSeconds != nil { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, time.Duration(*h.TimeoutSeconds)*time.Second) + defer cancel() + } + + r := client.Post().Body(request) + + // if the context has a deadline, set it as a parameter to inform the backend + if deadline, hasDeadline := ctx.Deadline(); hasDeadline { + // compute the timeout + if timeout := time.Until(deadline); timeout > 0 { + // if it's not an even number of seconds, round up to the nearest second + if truncated := timeout.Truncate(time.Second); truncated != timeout { + timeout = truncated + time.Second + } + // set the timeout + r.Timeout(timeout) + } + } + + if err := r.Do(ctx).Into(response); err != nil { + return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: err} + } + trace.Step("Request completed") + + result, err := webhookrequest.VerifyAdmissionResponse(uid, true, response) + if err != nil { + return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: err} + } + + for k, v := range result.AuditAnnotations { + key := h.Name + "/" + k + if err := attr.Attributes.AddAnnotation(key, v); err != nil { + klog.Warningf("Failed to set admission audit annotation %s to %s for mutating webhook %s: %v", key, v, h.Name, err) + } + } + for _, w := range result.Warnings { + warning.AddWarning(ctx, "", w) + } + + if !result.Allowed { + return false, &webhookutil.ErrWebhookRejection{Status: webhookerrors.ToStatusErr(h.Name, result.Result)} + } + + if len(result.Patch) == 0 { + return false, nil + } + patchObj, err := jsonpatch.DecodePatch(result.Patch) + if err != nil { + return false, apierrors.NewInternalError(err) + } + + if len(patchObj) == 0 { + return false, nil + } + + // if a non-empty patch was provided, and we have no object we can apply it to (e.g. a DELETE admission operation), error + if attr.VersionedObject == nil { + return false, apierrors.NewInternalError(fmt.Errorf("admission webhook %q attempted to modify the object, which is not supported for this operation", h.Name)) + } + + var patchedJS []byte + jsonSerializer := json.NewSerializer(json.DefaultMetaFactory, o.GetObjectCreater(), o.GetObjectTyper(), false) + switch result.PatchType { + // VerifyAdmissionResponse normalizes to v1 patch types, regardless of the AdmissionReview version used + case admissionv1.PatchTypeJSONPatch: + objJS, err := runtime.Encode(jsonSerializer, attr.VersionedObject) + if err != nil { + return false, apierrors.NewInternalError(err) + } + patchedJS, err = patchObj.Apply(objJS) + if err != nil { + return false, apierrors.NewInternalError(err) + } + default: + return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("unsupported patch type %q", result.PatchType)} + } + + var newVersionedObject runtime.Object + if _, ok := attr.VersionedObject.(*unstructured.Unstructured); ok { + // Custom Resources don't have corresponding Go struct's. + // They are represented as Unstructured. + newVersionedObject = &unstructured.Unstructured{} + } else { + newVersionedObject, err = o.GetObjectCreater().New(attr.VersionedKind) + if err != nil { + return false, apierrors.NewInternalError(err) + } + } + + // TODO: if we have multiple mutating webhooks, we can remember the json + // instead of encoding and decoding for each one. + if newVersionedObject, _, err = jsonSerializer.Decode(patchedJS, nil, newVersionedObject); err != nil { + return false, apierrors.NewInternalError(err) + } + + changed = !apiequality.Semantic.DeepEqual(attr.VersionedObject, newVersionedObject) + trace.Step("Patch applied") + annotator.addPatchAnnotation(patchObj, result.PatchType) + attr.Dirty = true + attr.VersionedObject = newVersionedObject + o.GetObjectDefaulter().Default(attr.VersionedObject) + return changed, nil +} + +type webhookAnnotator struct { + attr *generic.VersionedAttributes + patchAnnotationKey string + mutationAnnotationKey string + webhook string + configuration string +} + +func newWebhookAnnotator(attr *generic.VersionedAttributes, round, idx int, webhook, configuration string) *webhookAnnotator { + return &webhookAnnotator{ + attr: attr, + patchAnnotationKey: fmt.Sprintf("%sround_%d_index_%d", PatchAuditAnnotationPrefix, round, idx), + mutationAnnotationKey: fmt.Sprintf("%sround_%d_index_%d", MutationAuditAnnotationPrefix, round, idx), + webhook: webhook, + configuration: configuration, + } +} + +func (w *webhookAnnotator) addMutationAnnotation(mutated bool) { + if w.attr == nil || w.attr.Attributes == nil { + return + } + value, err := mutationAnnotationValue(w.configuration, w.webhook, mutated) + if err != nil { + klog.Warningf("unexpected error composing mutating webhook annotation: %v", err) + return + } + if err := w.attr.Attributes.AddAnnotation(w.mutationAnnotationKey, value); err != nil { + klog.Warningf("failed to set mutation annotation for mutating webhook key %s to %s: %v", w.mutationAnnotationKey, value, err) + } +} + +func (w *webhookAnnotator) addPatchAnnotation(patch interface{}, patchType admissionv1.PatchType) { + if w.attr == nil || w.attr.Attributes == nil { + return + } + var value string + var err error + switch patchType { + case admissionv1.PatchTypeJSONPatch: + value, err = jsonPatchAnnotationValue(w.configuration, w.webhook, patch) + if err != nil { + klog.Warningf("unexpected error composing mutating webhook JSON patch annotation: %v", err) + return + } + default: + klog.Warningf("unsupported patch type for mutating webhook annotation: %v", patchType) + return + } + if err := w.attr.Attributes.AddAnnotationWithLevel(w.patchAnnotationKey, value, auditinternal.LevelRequest); err != nil { + // NOTE: we don't log actual patch in kube-apiserver log to avoid potentially + // leaking information + klog.Warningf("failed to set patch annotation for mutating webhook key %s; confugiration name: %s, webhook name: %s", w.patchAnnotationKey, w.configuration, w.webhook) + } +} + +// MutationAuditAnnotation logs if a webhook invocation mutated the request object +type MutationAuditAnnotation struct { + Configuration string `json:"configuration"` + Webhook string `json:"webhook"` + Mutated bool `json:"mutated"` +} + +// PatchAuditAnnotation logs a patch from a mutating webhook +type PatchAuditAnnotation struct { + Configuration string `json:"configuration"` + Webhook string `json:"webhook"` + Patch interface{} `json:"patch,omitempty"` + PatchType string `json:"patchType,omitempty"` +} + +func mutationAnnotationValue(configuration, webhook string, mutated bool) (string, error) { + m := MutationAuditAnnotation{ + Configuration: configuration, + Webhook: webhook, + Mutated: mutated, + } + bytes, err := encodingjson.Marshal(m) + return string(bytes), err +} + +func jsonPatchAnnotationValue(configuration, webhook string, patch interface{}) (string, error) { + p := PatchAuditAnnotation{ + Configuration: configuration, + Webhook: webhook, + Patch: patch, + PatchType: string(admissionv1.PatchTypeJSONPatch), + } + bytes, err := encodingjson.Marshal(p) + return string(bytes), err +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/doc.go new file mode 100644 index 000000000..d804aca1c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package mutating makes calls to mutating webhooks during the admission +// process. +package mutating // import "k8s.io/apiserver/pkg/admission/plugin/webhook/mutating" diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/plugin.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/plugin.go new file mode 100644 index 000000000..fb07a61c1 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/plugin.go @@ -0,0 +1,76 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mutating + +import ( + "context" + "io" + + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/configuration" + "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" +) + +const ( + // PluginName indicates the name of admission plug-in + PluginName = "MutatingAdmissionWebhook" +) + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(configFile io.Reader) (admission.Interface, error) { + plugin, err := NewMutatingWebhook(configFile) + if err != nil { + return nil, err + } + + return plugin, nil + }) +} + +// Plugin is an implementation of admission.Interface. +type Plugin struct { + *generic.Webhook +} + +var _ admission.MutationInterface = &Plugin{} + +// NewMutatingWebhook returns a generic admission webhook plugin. +func NewMutatingWebhook(configFile io.Reader) (*Plugin, error) { + handler := admission.NewHandler(admission.Connect, admission.Create, admission.Delete, admission.Update) + p := &Plugin{} + var err error + p.Webhook, err = generic.NewWebhook(handler, configFile, configuration.NewMutatingWebhookConfigurationManager, newMutatingDispatcher(p)) + if err != nil { + return nil, err + } + + return p, nil +} + +// ValidateInitialization implements the InitializationValidator interface. +func (a *Plugin) ValidateInitialization() error { + if err := a.Webhook.ValidateInitialization(); err != nil { + return err + } + return nil +} + +// Admit makes an admission decision based on the request attributes. +func (a *Plugin) Admit(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces) error { + return a.Webhook.Dispatch(ctx, attr, o) +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/reinvocationcontext.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/reinvocationcontext.go new file mode 100644 index 000000000..de0af221e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/reinvocationcontext.go @@ -0,0 +1,68 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mutating + +import ( + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" +) + +type webhookReinvokeContext struct { + // lastWebhookOutput holds the result of the last webhook admission plugin call + lastWebhookOutput runtime.Object + // previouslyInvokedReinvocableWebhooks holds the set of webhooks that have been invoked and + // should be reinvoked if a later mutation occurs + previouslyInvokedReinvocableWebhooks sets.String + // reinvokeWebhooks holds the set of webhooks that should be reinvoked + reinvokeWebhooks sets.String +} + +func (rc *webhookReinvokeContext) ShouldReinvokeWebhook(webhook string) bool { + return rc.reinvokeWebhooks.Has(webhook) +} + +func (rc *webhookReinvokeContext) IsOutputChangedSinceLastWebhookInvocation(object runtime.Object) bool { + return !apiequality.Semantic.DeepEqual(rc.lastWebhookOutput, object) +} + +func (rc *webhookReinvokeContext) SetLastWebhookInvocationOutput(object runtime.Object) { + if object == nil { + rc.lastWebhookOutput = nil + return + } + rc.lastWebhookOutput = object.DeepCopyObject() +} + +func (rc *webhookReinvokeContext) AddReinvocableWebhookToPreviouslyInvoked(webhook string) { + if rc.previouslyInvokedReinvocableWebhooks == nil { + rc.previouslyInvokedReinvocableWebhooks = sets.NewString() + } + rc.previouslyInvokedReinvocableWebhooks.Insert(webhook) +} + +func (rc *webhookReinvokeContext) RequireReinvokingPreviouslyInvokedPlugins() { + if len(rc.previouslyInvokedReinvocableWebhooks) > 0 { + if rc.reinvokeWebhooks == nil { + rc.reinvokeWebhooks = sets.NewString() + } + for s := range rc.previouslyInvokedReinvocableWebhooks { + rc.reinvokeWebhooks.Insert(s) + } + rc.previouslyInvokedReinvocableWebhooks = sets.NewString() + } +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/doc.go new file mode 100644 index 000000000..d1a285338 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package namespace defines the utilities that are used by the webhook +// plugin to decide if a webhook should be applied to an object based on its +// namespace. +package namespace // import "k8s.io/apiserver/pkg/admission/plugin/webhook/namespace" diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/matcher.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/matcher.go new file mode 100644 index 000000000..183be7b39 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/matcher.go @@ -0,0 +1,121 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package namespace + +import ( + "context" + "fmt" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/plugin/webhook" + clientset "k8s.io/client-go/kubernetes" + corelisters "k8s.io/client-go/listers/core/v1" +) + +// Matcher decides if a request is exempted by the NamespaceSelector of a +// webhook configuration. +type Matcher struct { + NamespaceLister corelisters.NamespaceLister + Client clientset.Interface +} + +// Validate checks if the Matcher has a NamespaceLister and Client. +func (m *Matcher) Validate() error { + var errs []error + if m.NamespaceLister == nil { + errs = append(errs, fmt.Errorf("the namespace matcher requires a namespaceLister")) + } + if m.Client == nil { + errs = append(errs, fmt.Errorf("the namespace matcher requires a namespaceLister")) + } + return utilerrors.NewAggregate(errs) +} + +// GetNamespaceLabels gets the labels of the namespace related to the attr. +func (m *Matcher) GetNamespaceLabels(attr admission.Attributes) (map[string]string, error) { + // If the request itself is creating or updating a namespace, then get the + // labels from attr.Object, because namespaceLister doesn't have the latest + // namespace yet. + // + // However, if the request is deleting a namespace, then get the label from + // the namespace in the namespaceLister, because a delete request is not + // going to change the object, and attr.Object will be a DeleteOptions + // rather than a namespace object. + if attr.GetResource().Resource == "namespaces" && + len(attr.GetSubresource()) == 0 && + (attr.GetOperation() == admission.Create || attr.GetOperation() == admission.Update) { + accessor, err := meta.Accessor(attr.GetObject()) + if err != nil { + return nil, err + } + return accessor.GetLabels(), nil + } + + namespaceName := attr.GetNamespace() + namespace, err := m.NamespaceLister.Get(namespaceName) + if err != nil && !apierrors.IsNotFound(err) { + return nil, err + } + if apierrors.IsNotFound(err) { + // in case of latency in our caches, make a call direct to storage to verify that it truly exists or not + namespace, err = m.Client.CoreV1().Namespaces().Get(context.TODO(), namespaceName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + } + return namespace.Labels, nil +} + +// MatchNamespaceSelector decideds whether the request matches the +// namespaceSelctor of the webhook. Only when they match, the webhook is called. +func (m *Matcher) MatchNamespaceSelector(h webhook.WebhookAccessor, attr admission.Attributes) (bool, *apierrors.StatusError) { + namespaceName := attr.GetNamespace() + if len(namespaceName) == 0 && attr.GetResource().Resource != "namespaces" { + // If the request is about a cluster scoped resource, and it is not a + // namespace, it is never exempted. + // TODO: figure out a way selective exempt cluster scoped resources. + // Also update the comment in types.go + return true, nil + } + selector, err := h.GetParsedNamespaceSelector() + if err != nil { + return false, apierrors.NewInternalError(err) + } + if selector.Empty() { + return true, nil + } + + namespaceLabels, err := m.GetNamespaceLabels(attr) + // this means the namespace is not found, for backwards compatibility, + // return a 404 + if apierrors.IsNotFound(err) { + status, ok := err.(apierrors.APIStatus) + if !ok { + return false, apierrors.NewInternalError(err) + } + return false, &apierrors.StatusError{status.Status()} + } + if err != nil { + return false, apierrors.NewInternalError(err) + } + return selector.Matches(labels.Set(namespaceLabels)), nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/object/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/object/doc.go new file mode 100644 index 000000000..93c473440 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/object/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package object defines the utilities that are used by the webhook plugin to +// decide if a webhook should run, as long as either the old object or the new +// object has labels matching the webhook config's objectSelector. +package object // import "k8s.io/apiserver/pkg/admission/plugin/webhook/object" diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/object/matcher.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/object/matcher.go new file mode 100644 index 000000000..773e3e6ee --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/object/matcher.go @@ -0,0 +1,57 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/plugin/webhook" + "k8s.io/klog/v2" +) + +// Matcher decides if a request selected by the ObjectSelector. +type Matcher struct { +} + +func matchObject(obj runtime.Object, selector labels.Selector) bool { + if obj == nil { + return false + } + accessor, err := meta.Accessor(obj) + if err != nil { + klog.V(5).Infof("cannot access metadata of %v: %v", obj, err) + return false + } + return selector.Matches(labels.Set(accessor.GetLabels())) + +} + +// MatchObjectSelector decideds whether the request matches the ObjectSelector +// of the webhook. Only when they match, the webhook is called. +func (m *Matcher) MatchObjectSelector(h webhook.WebhookAccessor, attr admission.Attributes) (bool, *apierrors.StatusError) { + selector, err := h.GetParsedObjectSelector() + if err != nil { + return false, apierrors.NewInternalError(err) + } + if selector.Empty() { + return true, nil + } + return matchObject(attr.GetObject(), selector) || matchObject(attr.GetOldObject(), selector), nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/admissionreview.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/admissionreview.go new file mode 100644 index 000000000..c60d0fb9e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/admissionreview.go @@ -0,0 +1,283 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package request + +import ( + "fmt" + + admissionv1 "k8s.io/api/admission/v1" + admissionv1beta1 "k8s.io/api/admission/v1beta1" + authenticationv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" +) + +// AdmissionResponse contains the fields extracted from an AdmissionReview response +type AdmissionResponse struct { + AuditAnnotations map[string]string + Allowed bool + Patch []byte + PatchType admissionv1.PatchType + Result *metav1.Status + Warnings []string +} + +// VerifyAdmissionResponse checks the validity of the provided admission review object, and returns the +// audit annotations, whether the response allowed the request, any provided patch/patchType/status, +// or an error if the provided admission review was not valid. +func VerifyAdmissionResponse(uid types.UID, mutating bool, review runtime.Object) (*AdmissionResponse, error) { + switch r := review.(type) { + case *admissionv1.AdmissionReview: + if r.Response == nil { + return nil, fmt.Errorf("webhook response was absent") + } + + // Verify UID matches + if r.Response.UID != uid { + return nil, fmt.Errorf("expected response.uid=%q, got %q", uid, r.Response.UID) + } + + // Verify GVK + v1GVK := admissionv1.SchemeGroupVersion.WithKind("AdmissionReview") + if r.GroupVersionKind() != v1GVK { + return nil, fmt.Errorf("expected webhook response of %v, got %v", v1GVK.String(), r.GroupVersionKind().String()) + } + + patch := []byte(nil) + patchType := admissionv1.PatchType("") + + if mutating { + // Ensure a mutating webhook provides both patch and patchType together + if len(r.Response.Patch) > 0 && r.Response.PatchType == nil { + return nil, fmt.Errorf("webhook returned response.patch but not response.patchType") + } + if len(r.Response.Patch) == 0 && r.Response.PatchType != nil { + return nil, fmt.Errorf("webhook returned response.patchType but not response.patch") + } + patch = r.Response.Patch + if r.Response.PatchType != nil { + patchType = *r.Response.PatchType + if len(patchType) == 0 { + return nil, fmt.Errorf("webhook returned invalid response.patchType of %q", patchType) + } + } + } else { + // Ensure a validating webhook doesn't return patch or patchType + if len(r.Response.Patch) > 0 { + return nil, fmt.Errorf("validating webhook may not return response.patch") + } + if r.Response.PatchType != nil { + return nil, fmt.Errorf("validating webhook may not return response.patchType") + } + } + + return &AdmissionResponse{ + AuditAnnotations: r.Response.AuditAnnotations, + Allowed: r.Response.Allowed, + Patch: patch, + PatchType: patchType, + Result: r.Response.Result, + Warnings: r.Response.Warnings, + }, nil + + case *admissionv1beta1.AdmissionReview: + if r.Response == nil { + return nil, fmt.Errorf("webhook response was absent") + } + + // Response GVK and response.uid were not verified in v1beta1 handling, allow any + + patch := []byte(nil) + patchType := admissionv1.PatchType("") + if mutating { + patch = r.Response.Patch + if len(r.Response.Patch) > 0 { + // patch type was not verified in v1beta1 admissionreview handling. pin to only supported version if a patch is provided. + patchType = admissionv1.PatchTypeJSONPatch + } + } + + return &AdmissionResponse{ + AuditAnnotations: r.Response.AuditAnnotations, + Allowed: r.Response.Allowed, + Patch: patch, + PatchType: patchType, + Result: r.Response.Result, + Warnings: r.Response.Warnings, + }, nil + + default: + return nil, fmt.Errorf("unexpected response type %T", review) + } +} + +// CreateAdmissionObjects returns the unique request uid, the AdmissionReview object to send the webhook and to decode the response into, +// or an error if the webhook does not support receiving any of the admission review versions we know to send +func CreateAdmissionObjects(versionedAttributes *generic.VersionedAttributes, invocation *generic.WebhookInvocation) (uid types.UID, request, response runtime.Object, err error) { + for _, version := range invocation.Webhook.GetAdmissionReviewVersions() { + switch version { + case admissionv1.SchemeGroupVersion.Version: + uid := types.UID(uuid.NewUUID()) + request := CreateV1AdmissionReview(uid, versionedAttributes, invocation) + response := &admissionv1.AdmissionReview{} + return uid, request, response, nil + + case admissionv1beta1.SchemeGroupVersion.Version: + uid := types.UID(uuid.NewUUID()) + request := CreateV1beta1AdmissionReview(uid, versionedAttributes, invocation) + response := &admissionv1beta1.AdmissionReview{} + return uid, request, response, nil + + } + } + return "", nil, nil, fmt.Errorf("webhook does not accept known AdmissionReview versions (v1, v1beta1)") +} + +// CreateV1AdmissionReview creates an AdmissionReview for the provided admission.Attributes +func CreateV1AdmissionReview(uid types.UID, versionedAttributes *generic.VersionedAttributes, invocation *generic.WebhookInvocation) *admissionv1.AdmissionReview { + attr := versionedAttributes.Attributes + gvk := invocation.Kind + gvr := invocation.Resource + subresource := invocation.Subresource + requestGVK := attr.GetKind() + requestGVR := attr.GetResource() + requestSubResource := attr.GetSubresource() + aUserInfo := attr.GetUserInfo() + userInfo := authenticationv1.UserInfo{ + Extra: make(map[string]authenticationv1.ExtraValue), + Groups: aUserInfo.GetGroups(), + UID: aUserInfo.GetUID(), + Username: aUserInfo.GetName(), + } + dryRun := attr.IsDryRun() + + // Convert the extra information in the user object + for key, val := range aUserInfo.GetExtra() { + userInfo.Extra[key] = authenticationv1.ExtraValue(val) + } + + return &admissionv1.AdmissionReview{ + Request: &admissionv1.AdmissionRequest{ + UID: uid, + Kind: metav1.GroupVersionKind{ + Group: gvk.Group, + Kind: gvk.Kind, + Version: gvk.Version, + }, + Resource: metav1.GroupVersionResource{ + Group: gvr.Group, + Resource: gvr.Resource, + Version: gvr.Version, + }, + SubResource: subresource, + RequestKind: &metav1.GroupVersionKind{ + Group: requestGVK.Group, + Kind: requestGVK.Kind, + Version: requestGVK.Version, + }, + RequestResource: &metav1.GroupVersionResource{ + Group: requestGVR.Group, + Resource: requestGVR.Resource, + Version: requestGVR.Version, + }, + RequestSubResource: requestSubResource, + Name: attr.GetName(), + Namespace: attr.GetNamespace(), + Operation: admissionv1.Operation(attr.GetOperation()), + UserInfo: userInfo, + Object: runtime.RawExtension{ + Object: versionedAttributes.VersionedObject, + }, + OldObject: runtime.RawExtension{ + Object: versionedAttributes.VersionedOldObject, + }, + DryRun: &dryRun, + Options: runtime.RawExtension{ + Object: attr.GetOperationOptions(), + }, + }, + } +} + +// CreateV1beta1AdmissionReview creates an AdmissionReview for the provided admission.Attributes +func CreateV1beta1AdmissionReview(uid types.UID, versionedAttributes *generic.VersionedAttributes, invocation *generic.WebhookInvocation) *admissionv1beta1.AdmissionReview { + attr := versionedAttributes.Attributes + gvk := invocation.Kind + gvr := invocation.Resource + subresource := invocation.Subresource + requestGVK := attr.GetKind() + requestGVR := attr.GetResource() + requestSubResource := attr.GetSubresource() + aUserInfo := attr.GetUserInfo() + userInfo := authenticationv1.UserInfo{ + Extra: make(map[string]authenticationv1.ExtraValue), + Groups: aUserInfo.GetGroups(), + UID: aUserInfo.GetUID(), + Username: aUserInfo.GetName(), + } + dryRun := attr.IsDryRun() + + // Convert the extra information in the user object + for key, val := range aUserInfo.GetExtra() { + userInfo.Extra[key] = authenticationv1.ExtraValue(val) + } + + return &admissionv1beta1.AdmissionReview{ + Request: &admissionv1beta1.AdmissionRequest{ + UID: uid, + Kind: metav1.GroupVersionKind{ + Group: gvk.Group, + Kind: gvk.Kind, + Version: gvk.Version, + }, + Resource: metav1.GroupVersionResource{ + Group: gvr.Group, + Resource: gvr.Resource, + Version: gvr.Version, + }, + SubResource: subresource, + RequestKind: &metav1.GroupVersionKind{ + Group: requestGVK.Group, + Kind: requestGVK.Kind, + Version: requestGVK.Version, + }, + RequestResource: &metav1.GroupVersionResource{ + Group: requestGVR.Group, + Resource: requestGVR.Resource, + Version: requestGVR.Version, + }, + RequestSubResource: requestSubResource, + Name: attr.GetName(), + Namespace: attr.GetNamespace(), + Operation: admissionv1beta1.Operation(attr.GetOperation()), + UserInfo: userInfo, + Object: runtime.RawExtension{ + Object: versionedAttributes.VersionedObject, + }, + OldObject: runtime.RawExtension{ + Object: versionedAttributes.VersionedOldObject, + }, + DryRun: &dryRun, + Options: runtime.RawExtension{ + Object: attr.GetOperationOptions(), + }, + }, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/doc.go new file mode 100644 index 000000000..fbacf3371 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package request creates admissionReview request based on admission attributes. +package request // import "k8s.io/apiserver/pkg/admission/plugin/webhook/request" diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/rules.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/rules.go new file mode 100644 index 000000000..924e79bcc --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/rules.go @@ -0,0 +1,129 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules + +import ( + "strings" + + "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" +) + +// Matcher determines if the Attr matches the Rule. +type Matcher struct { + Rule v1.RuleWithOperations + Attr admission.Attributes +} + +// Matches returns if the Attr matches the Rule. +func (r *Matcher) Matches() bool { + return r.scope() && + r.operation() && + r.group() && + r.version() && + r.resource() +} + +func exactOrWildcard(items []string, requested string) bool { + for _, item := range items { + if item == "*" { + return true + } + if item == requested { + return true + } + } + + return false +} + +var namespaceResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"} + +func (r *Matcher) scope() bool { + if r.Rule.Scope == nil || *r.Rule.Scope == v1.AllScopes { + return true + } + // attr.GetNamespace() is set to the name of the namespace for requests of the namespace object itself. + switch *r.Rule.Scope { + case v1.NamespacedScope: + // first make sure that we are not requesting a namespace object (namespace objects are cluster-scoped) + return r.Attr.GetResource() != namespaceResource && r.Attr.GetNamespace() != metav1.NamespaceNone + case v1.ClusterScope: + // also return true if the request is for a namespace object (namespace objects are cluster-scoped) + return r.Attr.GetResource() == namespaceResource || r.Attr.GetNamespace() == metav1.NamespaceNone + default: + return false + } +} + +func (r *Matcher) group() bool { + return exactOrWildcard(r.Rule.APIGroups, r.Attr.GetResource().Group) +} + +func (r *Matcher) version() bool { + return exactOrWildcard(r.Rule.APIVersions, r.Attr.GetResource().Version) +} + +func (r *Matcher) operation() bool { + attrOp := r.Attr.GetOperation() + for _, op := range r.Rule.Operations { + if op == v1.OperationAll { + return true + } + // The constants are the same such that this is a valid cast (and this + // is tested). + if op == v1.OperationType(attrOp) { + return true + } + } + return false +} + +func splitResource(resSub string) (res, sub string) { + parts := strings.SplitN(resSub, "/", 2) + if len(parts) == 2 { + return parts[0], parts[1] + } + return parts[0], "" +} + +func (r *Matcher) resource() bool { + opRes, opSub := r.Attr.GetResource().Resource, r.Attr.GetSubresource() + for _, res := range r.Rule.Resources { + res, sub := splitResource(res) + resMatch := res == "*" || res == opRes + subMatch := sub == "*" || sub == opSub + if resMatch && subMatch { + return true + } + } + return false +} + +// IsWebhookConfigurationResource determines if an admission.Attributes object is describing +// the admission of a ValidatingWebhookConfiguration or a MutatingWebhookConfiguration +func IsWebhookConfigurationResource(attr admission.Attributes) bool { + gvk := attr.GetKind() + if gvk.Group == "admissionregistration.k8s.io" { + if gvk.Kind == "ValidatingWebhookConfiguration" || gvk.Kind == "MutatingWebhookConfiguration" { + return true + } + } + return false +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go new file mode 100644 index 000000000..f065cdf50 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go @@ -0,0 +1,238 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validating + +import ( + "context" + "fmt" + "sync" + "time" + + v1 "k8s.io/api/admissionregistration/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission" + admissionmetrics "k8s.io/apiserver/pkg/admission/metrics" + "k8s.io/apiserver/pkg/admission/plugin/webhook" + webhookerrors "k8s.io/apiserver/pkg/admission/plugin/webhook/errors" + "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" + webhookrequest "k8s.io/apiserver/pkg/admission/plugin/webhook/request" + webhookutil "k8s.io/apiserver/pkg/util/webhook" + "k8s.io/apiserver/pkg/warning" + "k8s.io/klog/v2" + utiltrace "k8s.io/utils/trace" +) + +type validatingDispatcher struct { + cm *webhookutil.ClientManager + plugin *Plugin +} + +func newValidatingDispatcher(p *Plugin) func(cm *webhookutil.ClientManager) generic.Dispatcher { + return func(cm *webhookutil.ClientManager) generic.Dispatcher { + return &validatingDispatcher{cm, p} + } +} + +var _ generic.Dispatcher = &validatingDispatcher{} + +func (d *validatingDispatcher) Dispatch(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces, hooks []webhook.WebhookAccessor) error { + var relevantHooks []*generic.WebhookInvocation + // Construct all the versions we need to call our webhooks + versionedAttrs := map[schema.GroupVersionKind]*generic.VersionedAttributes{} + for _, hook := range hooks { + invocation, statusError := d.plugin.ShouldCallHook(hook, attr, o) + if statusError != nil { + return statusError + } + if invocation == nil { + continue + } + relevantHooks = append(relevantHooks, invocation) + // If we already have this version, continue + if _, ok := versionedAttrs[invocation.Kind]; ok { + continue + } + versionedAttr, err := generic.NewVersionedAttributes(attr, invocation.Kind, o) + if err != nil { + return apierrors.NewInternalError(err) + } + versionedAttrs[invocation.Kind] = versionedAttr + } + + if len(relevantHooks) == 0 { + // no matching hooks + return nil + } + + // Check if the request has already timed out before spawning remote calls + select { + case <-ctx.Done(): + // parent context is canceled or timed out, no point in continuing + return apierrors.NewTimeoutError("request did not complete within requested timeout", 0) + default: + } + + wg := sync.WaitGroup{} + errCh := make(chan error, len(relevantHooks)) + wg.Add(len(relevantHooks)) + for i := range relevantHooks { + go func(invocation *generic.WebhookInvocation) { + defer wg.Done() + hook, ok := invocation.Webhook.GetValidatingWebhook() + if !ok { + utilruntime.HandleError(fmt.Errorf("validating webhook dispatch requires v1.ValidatingWebhook, but got %T", hook)) + return + } + versionedAttr := versionedAttrs[invocation.Kind] + t := time.Now() + err := d.callHook(ctx, hook, invocation, versionedAttr) + ignoreClientCallFailures := hook.FailurePolicy != nil && *hook.FailurePolicy == v1.Ignore + rejected := false + if err != nil { + switch err := err.(type) { + case *webhookutil.ErrCallingWebhook: + if !ignoreClientCallFailures { + rejected = true + admissionmetrics.Metrics.ObserveWebhookRejection(hook.Name, "validating", string(versionedAttr.Attributes.GetOperation()), admissionmetrics.WebhookRejectionCallingWebhookError, 0) + } + case *webhookutil.ErrWebhookRejection: + rejected = true + admissionmetrics.Metrics.ObserveWebhookRejection(hook.Name, "validating", string(versionedAttr.Attributes.GetOperation()), admissionmetrics.WebhookRejectionNoError, int(err.Status.ErrStatus.Code)) + default: + rejected = true + admissionmetrics.Metrics.ObserveWebhookRejection(hook.Name, "validating", string(versionedAttr.Attributes.GetOperation()), admissionmetrics.WebhookRejectionAPIServerInternalError, 0) + } + } + admissionmetrics.Metrics.ObserveWebhook(time.Since(t), rejected, versionedAttr.Attributes, "validating", hook.Name) + if err == nil { + return + } + + if callErr, ok := err.(*webhookutil.ErrCallingWebhook); ok { + if ignoreClientCallFailures { + klog.Warningf("Failed calling webhook, failing open %v: %v", hook.Name, callErr) + utilruntime.HandleError(callErr) + return + } + + klog.Warningf("Failed calling webhook, failing closed %v: %v", hook.Name, err) + errCh <- apierrors.NewInternalError(err) + return + } + + if rejectionErr, ok := err.(*webhookutil.ErrWebhookRejection); ok { + err = rejectionErr.Status + } + klog.Warningf("rejected by webhook %q: %#v", hook.Name, err) + errCh <- err + }(relevantHooks[i]) + } + wg.Wait() + close(errCh) + + var errs []error + for e := range errCh { + errs = append(errs, e) + } + if len(errs) == 0 { + return nil + } + if len(errs) > 1 { + for i := 1; i < len(errs); i++ { + // TODO: merge status errors; until then, just return the first one. + utilruntime.HandleError(errs[i]) + } + } + return errs[0] +} + +func (d *validatingDispatcher) callHook(ctx context.Context, h *v1.ValidatingWebhook, invocation *generic.WebhookInvocation, attr *generic.VersionedAttributes) error { + if attr.Attributes.IsDryRun() { + if h.SideEffects == nil { + return &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("Webhook SideEffects is nil")} + } + if !(*h.SideEffects == v1.SideEffectClassNone || *h.SideEffects == v1.SideEffectClassNoneOnDryRun) { + return webhookerrors.NewDryRunUnsupportedErr(h.Name) + } + } + + uid, request, response, err := webhookrequest.CreateAdmissionObjects(attr, invocation) + if err != nil { + return &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: err} + } + // Make the webhook request + client, err := invocation.Webhook.GetRESTClient(d.cm) + if err != nil { + return &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: err} + } + trace := utiltrace.New("Call validating webhook", + utiltrace.Field{"configuration", invocation.Webhook.GetConfigurationName()}, + utiltrace.Field{"webhook", h.Name}, + utiltrace.Field{"resource", attr.GetResource()}, + utiltrace.Field{"subresource", attr.GetSubresource()}, + utiltrace.Field{"operation", attr.GetOperation()}, + utiltrace.Field{"UID", uid}) + defer trace.LogIfLong(500 * time.Millisecond) + + // if the webhook has a specific timeout, wrap the context to apply it + if h.TimeoutSeconds != nil { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, time.Duration(*h.TimeoutSeconds)*time.Second) + defer cancel() + } + + r := client.Post().Body(request) + + // if the context has a deadline, set it as a parameter to inform the backend + if deadline, hasDeadline := ctx.Deadline(); hasDeadline { + // compute the timeout + if timeout := time.Until(deadline); timeout > 0 { + // if it's not an even number of seconds, round up to the nearest second + if truncated := timeout.Truncate(time.Second); truncated != timeout { + timeout = truncated + time.Second + } + // set the timeout + r.Timeout(timeout) + } + } + + if err := r.Do(ctx).Into(response); err != nil { + return &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: err} + } + trace.Step("Request completed") + + result, err := webhookrequest.VerifyAdmissionResponse(uid, false, response) + if err != nil { + return &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: err} + } + + for k, v := range result.AuditAnnotations { + key := h.Name + "/" + k + if err := attr.Attributes.AddAnnotation(key, v); err != nil { + klog.Warningf("Failed to set admission audit annotation %s to %s for validating webhook %s: %v", key, v, h.Name, err) + } + } + for _, w := range result.Warnings { + warning.AddWarning(ctx, "", w) + } + if result.Allowed { + return nil + } + return &webhookutil.ErrWebhookRejection{Status: webhookerrors.ToStatusErr(h.Name, result.Result)} +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/doc.go new file mode 100644 index 000000000..ede53c668 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package validating makes calls to validating (i.e., non-mutating) webhooks +// during the admission process. +package validating // import "k8s.io/apiserver/pkg/admission/plugin/webhook/validating" diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/plugin.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/plugin.go new file mode 100644 index 000000000..6972877b1 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/plugin.go @@ -0,0 +1,67 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validating + +import ( + "context" + "io" + + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/configuration" + "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" +) + +const ( + // PluginName indicates the name of admission plug-in + PluginName = "ValidatingAdmissionWebhook" +) + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(configFile io.Reader) (admission.Interface, error) { + plugin, err := NewValidatingAdmissionWebhook(configFile) + if err != nil { + return nil, err + } + + return plugin, nil + }) +} + +// Plugin is an implementation of admission.Interface. +type Plugin struct { + *generic.Webhook +} + +var _ admission.ValidationInterface = &Plugin{} + +// NewValidatingAdmissionWebhook returns a generic admission webhook plugin. +func NewValidatingAdmissionWebhook(configFile io.Reader) (*Plugin, error) { + handler := admission.NewHandler(admission.Connect, admission.Create, admission.Delete, admission.Update) + p := &Plugin{} + var err error + p.Webhook, err = generic.NewWebhook(handler, configFile, configuration.NewValidatingWebhookConfigurationManager, newValidatingDispatcher(p)) + if err != nil { + return nil, err + } + return p, nil +} + +// Validate makes an admission decision based on the request attributes. +func (a *Plugin) Validate(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces) error { + return a.Webhook.Dispatch(ctx, attr, o) +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugins.go b/vendor/k8s.io/apiserver/pkg/admission/plugins.go new file mode 100644 index 000000000..e6da6f4a7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugins.go @@ -0,0 +1,208 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "reflect" + "sort" + "strings" + "sync" + + "k8s.io/klog/v2" +) + +// Factory is a function that returns an Interface for admission decisions. +// The config parameter provides an io.Reader handler to the factory in +// order to load specific configurations. If no configuration is provided +// the parameter is nil. +type Factory func(config io.Reader) (Interface, error) + +type Plugins struct { + lock sync.Mutex + registry map[string]Factory +} + +func NewPlugins() *Plugins { + return &Plugins{} +} + +// All registered admission options. +var ( + // PluginEnabledFn checks whether a plugin is enabled. By default, if you ask about it, it's enabled. + PluginEnabledFn = func(name string, config io.Reader) bool { + return true + } +) + +// PluginEnabledFunc is a function type that can provide an external check on whether an admission plugin may be enabled +type PluginEnabledFunc func(name string, config io.Reader) bool + +// Registered enumerates the names of all registered plugins. +func (ps *Plugins) Registered() []string { + ps.lock.Lock() + defer ps.lock.Unlock() + keys := []string{} + for k := range ps.registry { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// Register registers a plugin Factory by name. This +// is expected to happen during app startup. +func (ps *Plugins) Register(name string, plugin Factory) { + ps.lock.Lock() + defer ps.lock.Unlock() + if ps.registry != nil { + _, found := ps.registry[name] + if found { + klog.Fatalf("Admission plugin %q was registered twice", name) + } + } else { + ps.registry = map[string]Factory{} + } + + klog.V(1).Infof("Registered admission plugin %q", name) + ps.registry[name] = plugin +} + +// getPlugin creates an instance of the named plugin. It returns `false` if the +// the name is not known. The error is returned only when the named provider was +// known but failed to initialize. The config parameter specifies the io.Reader +// handler of the configuration file for the cloud provider, or nil for no configuration. +func (ps *Plugins) getPlugin(name string, config io.Reader) (Interface, bool, error) { + ps.lock.Lock() + defer ps.lock.Unlock() + f, found := ps.registry[name] + if !found { + return nil, false, nil + } + + config1, config2, err := splitStream(config) + if err != nil { + return nil, true, err + } + if !PluginEnabledFn(name, config1) { + return nil, true, nil + } + + ret, err := f(config2) + return ret, true, err +} + +// splitStream reads the stream bytes and constructs two copies of it. +func splitStream(config io.Reader) (io.Reader, io.Reader, error) { + if config == nil || reflect.ValueOf(config).IsNil() { + return nil, nil, nil + } + + configBytes, err := ioutil.ReadAll(config) + if err != nil { + return nil, nil, err + } + + return bytes.NewBuffer(configBytes), bytes.NewBuffer(configBytes), nil +} + +// NewFromPlugins returns an admission.Interface that will enforce admission control decisions of all +// the given plugins. +func (ps *Plugins) NewFromPlugins(pluginNames []string, configProvider ConfigProvider, pluginInitializer PluginInitializer, decorator Decorator) (Interface, error) { + handlers := []Interface{} + mutationPlugins := []string{} + validationPlugins := []string{} + for _, pluginName := range pluginNames { + pluginConfig, err := configProvider.ConfigFor(pluginName) + if err != nil { + return nil, err + } + + plugin, err := ps.InitPlugin(pluginName, pluginConfig, pluginInitializer) + if err != nil { + return nil, err + } + if plugin != nil { + if decorator != nil { + handlers = append(handlers, decorator.Decorate(plugin, pluginName)) + } else { + handlers = append(handlers, plugin) + } + + if _, ok := plugin.(MutationInterface); ok { + mutationPlugins = append(mutationPlugins, pluginName) + } + if _, ok := plugin.(ValidationInterface); ok { + validationPlugins = append(validationPlugins, pluginName) + } + } + } + if len(mutationPlugins) != 0 { + klog.Infof("Loaded %d mutating admission controller(s) successfully in the following order: %s.", len(mutationPlugins), strings.Join(mutationPlugins, ",")) + } + if len(validationPlugins) != 0 { + klog.Infof("Loaded %d validating admission controller(s) successfully in the following order: %s.", len(validationPlugins), strings.Join(validationPlugins, ",")) + } + return newReinvocationHandler(chainAdmissionHandler(handlers)), nil +} + +// InitPlugin creates an instance of the named interface. +func (ps *Plugins) InitPlugin(name string, config io.Reader, pluginInitializer PluginInitializer) (Interface, error) { + if name == "" { + klog.Info("No admission plugin specified.") + return nil, nil + } + + plugin, found, err := ps.getPlugin(name, config) + if err != nil { + return nil, fmt.Errorf("couldn't init admission plugin %q: %v", name, err) + } + if !found { + return nil, fmt.Errorf("unknown admission plugin: %s", name) + } + + pluginInitializer.Initialize(plugin) + // ensure that plugins have been properly initialized + if err := ValidateInitialization(plugin); err != nil { + return nil, fmt.Errorf("failed to initialize admission plugin %q: %v", name, err) + } + + return plugin, nil +} + +// ValidateInitialization will call the InitializationValidate function in each plugin if they implement +// the InitializationValidator interface. +func ValidateInitialization(plugin Interface) error { + if validater, ok := plugin.(InitializationValidator); ok { + err := validater.ValidateInitialization() + if err != nil { + return err + } + } + return nil +} + +type PluginInitializers []PluginInitializer + +func (pp PluginInitializers) Initialize(plugin Interface) { + for _, p := range pp { + p.Initialize(plugin) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/reinvocation.go b/vendor/k8s.io/apiserver/pkg/admission/reinvocation.go new file mode 100644 index 000000000..f93c703a1 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/reinvocation.go @@ -0,0 +1,64 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import "context" + +// newReinvocationHandler creates a handler that wraps the provided admission chain and reinvokes it +// if needed according to re-invocation policy of the webhooks. +func newReinvocationHandler(admissionChain Interface) Interface { + return &reinvoker{admissionChain} +} + +type reinvoker struct { + admissionChain Interface +} + +// Admit performs an admission control check using the wrapped admission chain, reinvoking the +// admission chain if needed according to the reinvocation policy. Plugins are expected to check +// the admission attributes' reinvocation context against their reinvocation policy to decide if +// they should re-run, and to update the reinvocation context if they perform any mutations. +func (r *reinvoker) Admit(ctx context.Context, a Attributes, o ObjectInterfaces) error { + if mutator, ok := r.admissionChain.(MutationInterface); ok { + err := mutator.Admit(ctx, a, o) + if err != nil { + return err + } + s := a.GetReinvocationContext() + if s.ShouldReinvoke() { + s.SetIsReinvoke() + // Calling admit a second time will reinvoke all in-tree plugins + // as well as any webhook plugins that need to be reinvoked based on the + // reinvocation policy. + return mutator.Admit(ctx, a, o) + } + } + return nil +} + +// Validate performs an admission control check using the wrapped admission chain, and returns immediately on first error. +func (r *reinvoker) Validate(ctx context.Context, a Attributes, o ObjectInterfaces) error { + if validator, ok := r.admissionChain.(ValidationInterface); ok { + return validator.Validate(ctx, a, o) + } + return nil +} + +// Handles will return true if any of the admission chain handlers handle the given operation. +func (r *reinvoker) Handles(operation Operation) bool { + return r.admissionChain.Handles(operation) +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/util.go b/vendor/k8s.io/apiserver/pkg/admission/util.go new file mode 100644 index 000000000..842932f73 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/util.go @@ -0,0 +1,47 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import "k8s.io/apimachinery/pkg/runtime" + +type RuntimeObjectInterfaces struct { + runtime.ObjectCreater + runtime.ObjectTyper + runtime.ObjectDefaulter + runtime.ObjectConvertor + runtime.EquivalentResourceMapper +} + +func NewObjectInterfacesFromScheme(scheme *runtime.Scheme) ObjectInterfaces { + return &RuntimeObjectInterfaces{scheme, scheme, scheme, scheme, runtime.NewEquivalentResourceRegistry()} +} + +func (r *RuntimeObjectInterfaces) GetObjectCreater() runtime.ObjectCreater { + return r.ObjectCreater +} +func (r *RuntimeObjectInterfaces) GetObjectTyper() runtime.ObjectTyper { + return r.ObjectTyper +} +func (r *RuntimeObjectInterfaces) GetObjectDefaulter() runtime.ObjectDefaulter { + return r.ObjectDefaulter +} +func (r *RuntimeObjectInterfaces) GetObjectConvertor() runtime.ObjectConvertor { + return r.ObjectConvertor +} +func (r *RuntimeObjectInterfaces) GetEquivalentResourceMapper() runtime.EquivalentResourceMapper { + return r.EquivalentResourceMapper +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go index 4e4149441..197ce6537 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go @@ -62,7 +62,8 @@ type EgressSelectorConfiguration struct { // EgressSelection provides the configuration for a single egress selection client. type EgressSelection struct { // Name is the name of the egress selection. - // Currently supported values are "Master", "Etcd" and "Cluster" + // Currently supported values are "controlplane", "master", "etcd" and "cluster" + // The "master" egress selector is deprecated in favor of "controlplane" Name string // Connection is the exact information used to configure the egress selection diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go index 7b9aacae8..49509bf22 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go @@ -62,7 +62,8 @@ type EgressSelectorConfiguration struct { // EgressSelection provides the configuration for a single egress selection client. type EgressSelection struct { // name is the name of the egress selection. - // Currently supported values are "Master", "Etcd" and "Cluster" + // Currently supported values are "controlplane", "master", "etcd" and "cluster" + // The "master" egress selector is deprecated in favor of "controlplane" Name string `json:"name"` // connection is the exact information used to configure the egress selection diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go index 0a6fd0732..ea22b403a 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go @@ -33,7 +33,8 @@ type EgressSelectorConfiguration struct { // EgressSelection provides the configuration for a single egress selection client. type EgressSelection struct { // name is the name of the egress selection. - // Currently supported values are "Master", "Etcd" and "Cluster" + // Currently supported values are "controlplane", "master", "etcd" and "cluster" + // The "master" egress selector is deprecated in favor of "controlplane" Name string `json:"name"` // connection is the exact information used to configure the egress selection diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/install/install.go b/vendor/k8s.io/apiserver/pkg/apis/audit/install/install.go new file mode 100644 index 000000000..6e7d5bc82 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/install/install.go @@ -0,0 +1,37 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the experimental API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/apis/audit/v1" + "k8s.io/apiserver/pkg/apis/audit/v1alpha1" + "k8s.io/apiserver/pkg/apis/audit/v1beta1" +) + +// Install registers the API group and adds types to a scheme +func Install(scheme *runtime.Scheme) { + utilruntime.Must(audit.AddToScheme(scheme)) + utilruntime.Must(v1.AddToScheme(scheme)) + utilruntime.Must(v1beta1.AddToScheme(scheme)) + utilruntime.Must(v1alpha1.AddToScheme(scheme)) + utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion, v1alpha1.SchemeGroupVersion)) +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto index c7242222c..d142ee7e4 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.apiserver.pkg.apis.audit.v1; diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto index 2a0773d19..4d490ff96 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.apiserver.pkg.apis.audit.v1alpha1; diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto index 23ed8910a..95bfb8cec 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.apiserver.pkg.apis.audit.v1beta1; diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/validation/validation.go b/vendor/k8s.io/apiserver/pkg/apis/audit/validation/validation.go new file mode 100644 index 000000000..0611c1ae5 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/validation/validation.go @@ -0,0 +1,133 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "strings" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/apis/audit" +) + +// ValidatePolicy validates the audit policy +func ValidatePolicy(policy *audit.Policy) field.ErrorList { + var allErrs field.ErrorList + allErrs = append(allErrs, validateOmitStages(policy.OmitStages, field.NewPath("omitStages"))...) + rulePath := field.NewPath("rules") + for i, rule := range policy.Rules { + allErrs = append(allErrs, validatePolicyRule(rule, rulePath.Index(i))...) + } + return allErrs +} + +func validatePolicyRule(rule audit.PolicyRule, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + allErrs = append(allErrs, validateLevel(rule.Level, fldPath.Child("level"))...) + allErrs = append(allErrs, validateNonResourceURLs(rule.NonResourceURLs, fldPath.Child("nonResourceURLs"))...) + allErrs = append(allErrs, validateResources(rule.Resources, fldPath.Child("resources"))...) + allErrs = append(allErrs, validateOmitStages(rule.OmitStages, fldPath.Child("omitStages"))...) + + if len(rule.NonResourceURLs) > 0 { + if len(rule.Resources) > 0 || len(rule.Namespaces) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("nonResourceURLs"), rule.NonResourceURLs, "rules cannot apply to both regular resources and non-resource URLs")) + } + } + + return allErrs +} + +var validLevels = []string{ + string(audit.LevelNone), + string(audit.LevelMetadata), + string(audit.LevelRequest), + string(audit.LevelRequestResponse), +} + +var validOmitStages = []string{ + string(audit.StageRequestReceived), + string(audit.StageResponseStarted), + string(audit.StageResponseComplete), + string(audit.StagePanic), +} + +func validateLevel(level audit.Level, fldPath *field.Path) field.ErrorList { + switch level { + case audit.LevelNone, audit.LevelMetadata, audit.LevelRequest, audit.LevelRequestResponse: + return nil + case "": + return field.ErrorList{field.Required(fldPath, "")} + default: + return field.ErrorList{field.NotSupported(fldPath, level, validLevels)} + } +} + +func validateNonResourceURLs(urls []string, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + for i, url := range urls { + if url == "*" { + continue + } + + if !strings.HasPrefix(url, "/") { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i), url, "non-resource URL rules must begin with a '/' character")) + } + + if url != "" && strings.ContainsRune(url[:len(url)-1], '*') { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i), url, "non-resource URL wildcards '*' must be the final character of the rule")) + } + } + return allErrs +} + +func validateResources(groupResources []audit.GroupResources, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + for _, groupResource := range groupResources { + // The empty string represents the core API group. + if len(groupResource.Group) != 0 { + // Group names must be lower case and be valid DNS subdomains. + // reference: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md + // an error is returned for group name like rbac.authorization.k8s.io/v1beta1 + // rbac.authorization.k8s.io is the valid one + if msgs := validation.NameIsDNSSubdomain(groupResource.Group, false); len(msgs) != 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("group"), groupResource.Group, strings.Join(msgs, ","))) + } + } + + if len(groupResource.ResourceNames) > 0 && len(groupResource.Resources) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceNames"), groupResource.ResourceNames, "using resourceNames requires at least one resource")) + } + } + return allErrs +} + +func validateOmitStages(omitStages []audit.Stage, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + for i, stage := range omitStages { + valid := false + for _, validOmitStage := range validOmitStages { + if string(stage) == validOmitStage { + valid = true + break + } + } + if !valid { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i), string(stage), "allowed stages are "+strings.Join(validOmitStages, ","))) + } + } + return allErrs +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/doc.go b/vendor/k8s.io/apiserver/pkg/apis/config/doc.go new file mode 100644 index 000000000..338d4cebf --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +package config // import "k8s.io/apiserver/pkg/apis/config" diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/register.go b/vendor/k8s.io/apiserver/pkg/apis/config/register.go new file mode 100644 index 000000000..6a0aae8e5 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme adds this group to a scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +// GroupName is the group name use in this package. +const GroupName = "apiserver.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind. +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +func addKnownTypes(scheme *runtime.Scheme) error { + // TODO this will get cleaned up with the scheme types are fixed + scheme.AddKnownTypes(SchemeGroupVersion, + &EncryptionConfiguration{}, + ) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/types.go b/vendor/k8s.io/apiserver/pkg/apis/config/types.go new file mode 100644 index 000000000..5dddc97f9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/types.go @@ -0,0 +1,100 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EncryptionConfiguration stores the complete configuration for encryption providers. +type EncryptionConfiguration struct { + metav1.TypeMeta + // resources is a list containing resources, and their corresponding encryption providers. + Resources []ResourceConfiguration +} + +// ResourceConfiguration stores per resource configuration. +type ResourceConfiguration struct { + // resources is a list of kubernetes resources which have to be encrypted. + Resources []string + // providers is a list of transformers to be used for reading and writing the resources to disk. + // eg: aesgcm, aescbc, secretbox, identity. + Providers []ProviderConfiguration +} + +// ProviderConfiguration stores the provided configuration for an encryption provider. +type ProviderConfiguration struct { + // aesgcm is the configuration for the AES-GCM transformer. + AESGCM *AESConfiguration + // aescbc is the configuration for the AES-CBC transformer. + AESCBC *AESConfiguration + // secretbox is the configuration for the Secretbox based transformer. + Secretbox *SecretboxConfiguration + // identity is the (empty) configuration for the identity transformer. + Identity *IdentityConfiguration + // kms contains the name, cache size and path to configuration file for a KMS based envelope transformer. + KMS *KMSConfiguration +} + +// AESConfiguration contains the API configuration for an AES transformer. +type AESConfiguration struct { + // keys is a list of keys to be used for creating the AES transformer. + // Each key has to be 32 bytes long for AES-CBC and 16, 24 or 32 bytes for AES-GCM. + Keys []Key +} + +// SecretboxConfiguration contains the API configuration for an Secretbox transformer. +type SecretboxConfiguration struct { + // keys is a list of keys to be used for creating the Secretbox transformer. + // Each key has to be 32 bytes long. + Keys []Key +} + +// Key contains name and secret of the provided key for a transformer. +type Key struct { + // name is the name of the key to be used while storing data to disk. + Name string + // secret is the actual key, encoded in base64. + Secret string +} + +// String implements Stringer interface in a log safe way. +func (k Key) String() string { + return fmt.Sprintf("Name: %s, Secret: [REDACTED]", k.Name) +} + +// IdentityConfiguration is an empty struct to allow identity transformer in provider configuration. +type IdentityConfiguration struct{} + +// KMSConfiguration contains the name, cache size and path to configuration file for a KMS based envelope transformer. +type KMSConfiguration struct { + // name is the name of the KMS plugin to be used. + Name string + // cachesize is the maximum number of secrets which are cached in memory. The default value is 1000. + // Set to a negative value to disable caching. + // +optional + CacheSize *int32 + // endpoint is the gRPC server listening address, for example "unix:///var/run/kms-provider.sock". + Endpoint string + // timeout for gRPC calls to kms-plugin (ex. 5s). The default is 3 seconds. + // +optional + Timeout *metav1.Duration +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/defaults.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/defaults.go new file mode 100644 index 000000000..2d529651a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/defaults.go @@ -0,0 +1,44 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +var ( + defaultTimeout = &metav1.Duration{Duration: 3 * time.Second} + defaultCacheSize int32 = 1000 +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +// SetDefaults_KMSConfiguration applies defaults to KMSConfiguration. +func SetDefaults_KMSConfiguration(obj *KMSConfiguration) { + if obj.Timeout == nil { + obj.Timeout = defaultTimeout + } + + if obj.CacheSize == nil { + obj.CacheSize = &defaultCacheSize + } +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/doc.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/doc.go new file mode 100644 index 000000000..b1a18ccab --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/config +// +k8s:deepcopy-gen=package +// +k8s:defaulter-gen=TypeMeta +// +groupName=apiserver.config.k8s.io + +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/register.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/register.go new file mode 100644 index 000000000..32b5634c4 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package. +const GroupName = "apiserver.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +var ( + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + // AddToScheme adds this group to a scheme. + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) + localSchemeBuilder.Register(addDefaultingFuncs) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &EncryptionConfiguration{}, + ) + // also register into the v1 group as EncryptionConfig (due to a docs bug) + scheme.AddKnownTypeWithName(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "EncryptionConfig"}, &EncryptionConfiguration{}) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/types.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/types.go new file mode 100644 index 000000000..d7d68d258 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/types.go @@ -0,0 +1,100 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EncryptionConfiguration stores the complete configuration for encryption providers. +type EncryptionConfiguration struct { + metav1.TypeMeta + // resources is a list containing resources, and their corresponding encryption providers. + Resources []ResourceConfiguration `json:"resources"` +} + +// ResourceConfiguration stores per resource configuration. +type ResourceConfiguration struct { + // resources is a list of kubernetes resources which have to be encrypted. + Resources []string `json:"resources"` + // providers is a list of transformers to be used for reading and writing the resources to disk. + // eg: aesgcm, aescbc, secretbox, identity. + Providers []ProviderConfiguration `json:"providers"` +} + +// ProviderConfiguration stores the provided configuration for an encryption provider. +type ProviderConfiguration struct { + // aesgcm is the configuration for the AES-GCM transformer. + AESGCM *AESConfiguration `json:"aesgcm,omitempty"` + // aescbc is the configuration for the AES-CBC transformer. + AESCBC *AESConfiguration `json:"aescbc,omitempty"` + // secretbox is the configuration for the Secretbox based transformer. + Secretbox *SecretboxConfiguration `json:"secretbox,omitempty"` + // identity is the (empty) configuration for the identity transformer. + Identity *IdentityConfiguration `json:"identity,omitempty"` + // kms contains the name, cache size and path to configuration file for a KMS based envelope transformer. + KMS *KMSConfiguration `json:"kms,omitempty"` +} + +// AESConfiguration contains the API configuration for an AES transformer. +type AESConfiguration struct { + // keys is a list of keys to be used for creating the AES transformer. + // Each key has to be 32 bytes long for AES-CBC and 16, 24 or 32 bytes for AES-GCM. + Keys []Key `json:"keys"` +} + +// SecretboxConfiguration contains the API configuration for an Secretbox transformer. +type SecretboxConfiguration struct { + // keys is a list of keys to be used for creating the Secretbox transformer. + // Each key has to be 32 bytes long. + Keys []Key `json:"keys"` +} + +// Key contains name and secret of the provided key for a transformer. +type Key struct { + // name is the name of the key to be used while storing data to disk. + Name string `json:"name"` + // secret is the actual key, encoded in base64. + Secret string `json:"secret"` +} + +// String implements Stringer interface in a log safe way. +func (k Key) String() string { + return fmt.Sprintf("Name: %s, Secret: [REDACTED]", k.Name) +} + +// IdentityConfiguration is an empty struct to allow identity transformer in provider configuration. +type IdentityConfiguration struct{} + +// KMSConfiguration contains the name, cache size and path to configuration file for a KMS based envelope transformer. +type KMSConfiguration struct { + // name is the name of the KMS plugin to be used. + Name string `json:"name"` + // cachesize is the maximum number of secrets which are cached in memory. The default value is 1000. + // Set to a negative value to disable caching. + // +optional + CacheSize *int32 `json:"cachesize,omitempty"` + // endpoint is the gRPC server listening address, for example "unix:///var/run/kms-provider.sock". + Endpoint string `json:"endpoint"` + // timeout for gRPC calls to kms-plugin (ex. 5s). The default is 3 seconds. + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.conversion.go new file mode 100644 index 000000000..c7de6539d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.conversion.go @@ -0,0 +1,296 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + unsafe "unsafe" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + config "k8s.io/apiserver/pkg/apis/config" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*AESConfiguration)(nil), (*config.AESConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AESConfiguration_To_config_AESConfiguration(a.(*AESConfiguration), b.(*config.AESConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.AESConfiguration)(nil), (*AESConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_AESConfiguration_To_v1_AESConfiguration(a.(*config.AESConfiguration), b.(*AESConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*EncryptionConfiguration)(nil), (*config.EncryptionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EncryptionConfiguration_To_config_EncryptionConfiguration(a.(*EncryptionConfiguration), b.(*config.EncryptionConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.EncryptionConfiguration)(nil), (*EncryptionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_EncryptionConfiguration_To_v1_EncryptionConfiguration(a.(*config.EncryptionConfiguration), b.(*EncryptionConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*IdentityConfiguration)(nil), (*config.IdentityConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_IdentityConfiguration_To_config_IdentityConfiguration(a.(*IdentityConfiguration), b.(*config.IdentityConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.IdentityConfiguration)(nil), (*IdentityConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_IdentityConfiguration_To_v1_IdentityConfiguration(a.(*config.IdentityConfiguration), b.(*IdentityConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KMSConfiguration)(nil), (*config.KMSConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_KMSConfiguration_To_config_KMSConfiguration(a.(*KMSConfiguration), b.(*config.KMSConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.KMSConfiguration)(nil), (*KMSConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_KMSConfiguration_To_v1_KMSConfiguration(a.(*config.KMSConfiguration), b.(*KMSConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Key)(nil), (*config.Key)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Key_To_config_Key(a.(*Key), b.(*config.Key), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.Key)(nil), (*Key)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_Key_To_v1_Key(a.(*config.Key), b.(*Key), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ProviderConfiguration)(nil), (*config.ProviderConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ProviderConfiguration_To_config_ProviderConfiguration(a.(*ProviderConfiguration), b.(*config.ProviderConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.ProviderConfiguration)(nil), (*ProviderConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_ProviderConfiguration_To_v1_ProviderConfiguration(a.(*config.ProviderConfiguration), b.(*ProviderConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ResourceConfiguration)(nil), (*config.ResourceConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ResourceConfiguration_To_config_ResourceConfiguration(a.(*ResourceConfiguration), b.(*config.ResourceConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.ResourceConfiguration)(nil), (*ResourceConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_ResourceConfiguration_To_v1_ResourceConfiguration(a.(*config.ResourceConfiguration), b.(*ResourceConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SecretboxConfiguration)(nil), (*config.SecretboxConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SecretboxConfiguration_To_config_SecretboxConfiguration(a.(*SecretboxConfiguration), b.(*config.SecretboxConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.SecretboxConfiguration)(nil), (*SecretboxConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_SecretboxConfiguration_To_v1_SecretboxConfiguration(a.(*config.SecretboxConfiguration), b.(*SecretboxConfiguration), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_AESConfiguration_To_config_AESConfiguration(in *AESConfiguration, out *config.AESConfiguration, s conversion.Scope) error { + out.Keys = *(*[]config.Key)(unsafe.Pointer(&in.Keys)) + return nil +} + +// Convert_v1_AESConfiguration_To_config_AESConfiguration is an autogenerated conversion function. +func Convert_v1_AESConfiguration_To_config_AESConfiguration(in *AESConfiguration, out *config.AESConfiguration, s conversion.Scope) error { + return autoConvert_v1_AESConfiguration_To_config_AESConfiguration(in, out, s) +} + +func autoConvert_config_AESConfiguration_To_v1_AESConfiguration(in *config.AESConfiguration, out *AESConfiguration, s conversion.Scope) error { + out.Keys = *(*[]Key)(unsafe.Pointer(&in.Keys)) + return nil +} + +// Convert_config_AESConfiguration_To_v1_AESConfiguration is an autogenerated conversion function. +func Convert_config_AESConfiguration_To_v1_AESConfiguration(in *config.AESConfiguration, out *AESConfiguration, s conversion.Scope) error { + return autoConvert_config_AESConfiguration_To_v1_AESConfiguration(in, out, s) +} + +func autoConvert_v1_EncryptionConfiguration_To_config_EncryptionConfiguration(in *EncryptionConfiguration, out *config.EncryptionConfiguration, s conversion.Scope) error { + out.Resources = *(*[]config.ResourceConfiguration)(unsafe.Pointer(&in.Resources)) + return nil +} + +// Convert_v1_EncryptionConfiguration_To_config_EncryptionConfiguration is an autogenerated conversion function. +func Convert_v1_EncryptionConfiguration_To_config_EncryptionConfiguration(in *EncryptionConfiguration, out *config.EncryptionConfiguration, s conversion.Scope) error { + return autoConvert_v1_EncryptionConfiguration_To_config_EncryptionConfiguration(in, out, s) +} + +func autoConvert_config_EncryptionConfiguration_To_v1_EncryptionConfiguration(in *config.EncryptionConfiguration, out *EncryptionConfiguration, s conversion.Scope) error { + out.Resources = *(*[]ResourceConfiguration)(unsafe.Pointer(&in.Resources)) + return nil +} + +// Convert_config_EncryptionConfiguration_To_v1_EncryptionConfiguration is an autogenerated conversion function. +func Convert_config_EncryptionConfiguration_To_v1_EncryptionConfiguration(in *config.EncryptionConfiguration, out *EncryptionConfiguration, s conversion.Scope) error { + return autoConvert_config_EncryptionConfiguration_To_v1_EncryptionConfiguration(in, out, s) +} + +func autoConvert_v1_IdentityConfiguration_To_config_IdentityConfiguration(in *IdentityConfiguration, out *config.IdentityConfiguration, s conversion.Scope) error { + return nil +} + +// Convert_v1_IdentityConfiguration_To_config_IdentityConfiguration is an autogenerated conversion function. +func Convert_v1_IdentityConfiguration_To_config_IdentityConfiguration(in *IdentityConfiguration, out *config.IdentityConfiguration, s conversion.Scope) error { + return autoConvert_v1_IdentityConfiguration_To_config_IdentityConfiguration(in, out, s) +} + +func autoConvert_config_IdentityConfiguration_To_v1_IdentityConfiguration(in *config.IdentityConfiguration, out *IdentityConfiguration, s conversion.Scope) error { + return nil +} + +// Convert_config_IdentityConfiguration_To_v1_IdentityConfiguration is an autogenerated conversion function. +func Convert_config_IdentityConfiguration_To_v1_IdentityConfiguration(in *config.IdentityConfiguration, out *IdentityConfiguration, s conversion.Scope) error { + return autoConvert_config_IdentityConfiguration_To_v1_IdentityConfiguration(in, out, s) +} + +func autoConvert_v1_KMSConfiguration_To_config_KMSConfiguration(in *KMSConfiguration, out *config.KMSConfiguration, s conversion.Scope) error { + out.Name = in.Name + out.CacheSize = (*int32)(unsafe.Pointer(in.CacheSize)) + out.Endpoint = in.Endpoint + out.Timeout = (*metav1.Duration)(unsafe.Pointer(in.Timeout)) + return nil +} + +// Convert_v1_KMSConfiguration_To_config_KMSConfiguration is an autogenerated conversion function. +func Convert_v1_KMSConfiguration_To_config_KMSConfiguration(in *KMSConfiguration, out *config.KMSConfiguration, s conversion.Scope) error { + return autoConvert_v1_KMSConfiguration_To_config_KMSConfiguration(in, out, s) +} + +func autoConvert_config_KMSConfiguration_To_v1_KMSConfiguration(in *config.KMSConfiguration, out *KMSConfiguration, s conversion.Scope) error { + out.Name = in.Name + out.CacheSize = (*int32)(unsafe.Pointer(in.CacheSize)) + out.Endpoint = in.Endpoint + out.Timeout = (*metav1.Duration)(unsafe.Pointer(in.Timeout)) + return nil +} + +// Convert_config_KMSConfiguration_To_v1_KMSConfiguration is an autogenerated conversion function. +func Convert_config_KMSConfiguration_To_v1_KMSConfiguration(in *config.KMSConfiguration, out *KMSConfiguration, s conversion.Scope) error { + return autoConvert_config_KMSConfiguration_To_v1_KMSConfiguration(in, out, s) +} + +func autoConvert_v1_Key_To_config_Key(in *Key, out *config.Key, s conversion.Scope) error { + out.Name = in.Name + out.Secret = in.Secret + return nil +} + +// Convert_v1_Key_To_config_Key is an autogenerated conversion function. +func Convert_v1_Key_To_config_Key(in *Key, out *config.Key, s conversion.Scope) error { + return autoConvert_v1_Key_To_config_Key(in, out, s) +} + +func autoConvert_config_Key_To_v1_Key(in *config.Key, out *Key, s conversion.Scope) error { + out.Name = in.Name + out.Secret = in.Secret + return nil +} + +// Convert_config_Key_To_v1_Key is an autogenerated conversion function. +func Convert_config_Key_To_v1_Key(in *config.Key, out *Key, s conversion.Scope) error { + return autoConvert_config_Key_To_v1_Key(in, out, s) +} + +func autoConvert_v1_ProviderConfiguration_To_config_ProviderConfiguration(in *ProviderConfiguration, out *config.ProviderConfiguration, s conversion.Scope) error { + out.AESGCM = (*config.AESConfiguration)(unsafe.Pointer(in.AESGCM)) + out.AESCBC = (*config.AESConfiguration)(unsafe.Pointer(in.AESCBC)) + out.Secretbox = (*config.SecretboxConfiguration)(unsafe.Pointer(in.Secretbox)) + out.Identity = (*config.IdentityConfiguration)(unsafe.Pointer(in.Identity)) + out.KMS = (*config.KMSConfiguration)(unsafe.Pointer(in.KMS)) + return nil +} + +// Convert_v1_ProviderConfiguration_To_config_ProviderConfiguration is an autogenerated conversion function. +func Convert_v1_ProviderConfiguration_To_config_ProviderConfiguration(in *ProviderConfiguration, out *config.ProviderConfiguration, s conversion.Scope) error { + return autoConvert_v1_ProviderConfiguration_To_config_ProviderConfiguration(in, out, s) +} + +func autoConvert_config_ProviderConfiguration_To_v1_ProviderConfiguration(in *config.ProviderConfiguration, out *ProviderConfiguration, s conversion.Scope) error { + out.AESGCM = (*AESConfiguration)(unsafe.Pointer(in.AESGCM)) + out.AESCBC = (*AESConfiguration)(unsafe.Pointer(in.AESCBC)) + out.Secretbox = (*SecretboxConfiguration)(unsafe.Pointer(in.Secretbox)) + out.Identity = (*IdentityConfiguration)(unsafe.Pointer(in.Identity)) + out.KMS = (*KMSConfiguration)(unsafe.Pointer(in.KMS)) + return nil +} + +// Convert_config_ProviderConfiguration_To_v1_ProviderConfiguration is an autogenerated conversion function. +func Convert_config_ProviderConfiguration_To_v1_ProviderConfiguration(in *config.ProviderConfiguration, out *ProviderConfiguration, s conversion.Scope) error { + return autoConvert_config_ProviderConfiguration_To_v1_ProviderConfiguration(in, out, s) +} + +func autoConvert_v1_ResourceConfiguration_To_config_ResourceConfiguration(in *ResourceConfiguration, out *config.ResourceConfiguration, s conversion.Scope) error { + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + out.Providers = *(*[]config.ProviderConfiguration)(unsafe.Pointer(&in.Providers)) + return nil +} + +// Convert_v1_ResourceConfiguration_To_config_ResourceConfiguration is an autogenerated conversion function. +func Convert_v1_ResourceConfiguration_To_config_ResourceConfiguration(in *ResourceConfiguration, out *config.ResourceConfiguration, s conversion.Scope) error { + return autoConvert_v1_ResourceConfiguration_To_config_ResourceConfiguration(in, out, s) +} + +func autoConvert_config_ResourceConfiguration_To_v1_ResourceConfiguration(in *config.ResourceConfiguration, out *ResourceConfiguration, s conversion.Scope) error { + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + out.Providers = *(*[]ProviderConfiguration)(unsafe.Pointer(&in.Providers)) + return nil +} + +// Convert_config_ResourceConfiguration_To_v1_ResourceConfiguration is an autogenerated conversion function. +func Convert_config_ResourceConfiguration_To_v1_ResourceConfiguration(in *config.ResourceConfiguration, out *ResourceConfiguration, s conversion.Scope) error { + return autoConvert_config_ResourceConfiguration_To_v1_ResourceConfiguration(in, out, s) +} + +func autoConvert_v1_SecretboxConfiguration_To_config_SecretboxConfiguration(in *SecretboxConfiguration, out *config.SecretboxConfiguration, s conversion.Scope) error { + out.Keys = *(*[]config.Key)(unsafe.Pointer(&in.Keys)) + return nil +} + +// Convert_v1_SecretboxConfiguration_To_config_SecretboxConfiguration is an autogenerated conversion function. +func Convert_v1_SecretboxConfiguration_To_config_SecretboxConfiguration(in *SecretboxConfiguration, out *config.SecretboxConfiguration, s conversion.Scope) error { + return autoConvert_v1_SecretboxConfiguration_To_config_SecretboxConfiguration(in, out, s) +} + +func autoConvert_config_SecretboxConfiguration_To_v1_SecretboxConfiguration(in *config.SecretboxConfiguration, out *SecretboxConfiguration, s conversion.Scope) error { + out.Keys = *(*[]Key)(unsafe.Pointer(&in.Keys)) + return nil +} + +// Convert_config_SecretboxConfiguration_To_v1_SecretboxConfiguration is an autogenerated conversion function. +func Convert_config_SecretboxConfiguration_To_v1_SecretboxConfiguration(in *config.SecretboxConfiguration, out *SecretboxConfiguration, s conversion.Scope) error { + return autoConvert_config_SecretboxConfiguration_To_v1_SecretboxConfiguration(in, out, s) +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..dcb4e8552 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.deepcopy.go @@ -0,0 +1,227 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AESConfiguration) DeepCopyInto(out *AESConfiguration) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]Key, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AESConfiguration. +func (in *AESConfiguration) DeepCopy() *AESConfiguration { + if in == nil { + return nil + } + out := new(AESConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfiguration) DeepCopyInto(out *EncryptionConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfiguration. +func (in *EncryptionConfiguration) DeepCopy() *EncryptionConfiguration { + if in == nil { + return nil + } + out := new(EncryptionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EncryptionConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityConfiguration) DeepCopyInto(out *IdentityConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityConfiguration. +func (in *IdentityConfiguration) DeepCopy() *IdentityConfiguration { + if in == nil { + return nil + } + out := new(IdentityConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KMSConfiguration) DeepCopyInto(out *KMSConfiguration) { + *out = *in + if in.CacheSize != nil { + in, out := &in.CacheSize, &out.CacheSize + *out = new(int32) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSConfiguration. +func (in *KMSConfiguration) DeepCopy() *KMSConfiguration { + if in == nil { + return nil + } + out := new(KMSConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Key) DeepCopyInto(out *Key) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Key. +func (in *Key) DeepCopy() *Key { + if in == nil { + return nil + } + out := new(Key) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfiguration) DeepCopyInto(out *ProviderConfiguration) { + *out = *in + if in.AESGCM != nil { + in, out := &in.AESGCM, &out.AESGCM + *out = new(AESConfiguration) + (*in).DeepCopyInto(*out) + } + if in.AESCBC != nil { + in, out := &in.AESCBC, &out.AESCBC + *out = new(AESConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Secretbox != nil { + in, out := &in.Secretbox, &out.Secretbox + *out = new(SecretboxConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityConfiguration) + **out = **in + } + if in.KMS != nil { + in, out := &in.KMS, &out.KMS + *out = new(KMSConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfiguration. +func (in *ProviderConfiguration) DeepCopy() *ProviderConfiguration { + if in == nil { + return nil + } + out := new(ProviderConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceConfiguration) DeepCopyInto(out *ResourceConfiguration) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Providers != nil { + in, out := &in.Providers, &out.Providers + *out = make([]ProviderConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceConfiguration. +func (in *ResourceConfiguration) DeepCopy() *ResourceConfiguration { + if in == nil { + return nil + } + out := new(ResourceConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretboxConfiguration) DeepCopyInto(out *SecretboxConfiguration) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]Key, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretboxConfiguration. +func (in *SecretboxConfiguration) DeepCopy() *SecretboxConfiguration { + if in == nil { + return nil + } + out := new(SecretboxConfiguration) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.defaults.go new file mode 100644 index 000000000..1c8db8d04 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.defaults.go @@ -0,0 +1,45 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&EncryptionConfiguration{}, func(obj interface{}) { SetObjectDefaults_EncryptionConfiguration(obj.(*EncryptionConfiguration)) }) + return nil +} + +func SetObjectDefaults_EncryptionConfiguration(in *EncryptionConfiguration) { + for i := range in.Resources { + a := &in.Resources[i] + for j := range a.Providers { + b := &a.Providers[j] + if b.KMS != nil { + SetDefaults_KMSConfiguration(b.KMS) + } + } + } +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/validation/validation.go b/vendor/k8s.io/apiserver/pkg/apis/config/validation/validation.go new file mode 100644 index 000000000..966ff1f0d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/validation/validation.go @@ -0,0 +1,220 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package validation validates EncryptionConfiguration. +package validation + +import ( + "encoding/base64" + "fmt" + "net/url" + + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/apis/config" +) + +const ( + moreThanOneElementErr = "more than one provider specified in a single element, should split into different list elements" + keyLenErrFmt = "secret is not of the expected length, got %d, expected one of %v" + unsupportedSchemeErrFmt = "unsupported scheme %q for KMS provider, only unix is supported" + atLeastOneRequiredErrFmt = "at least one %s is required" + invalidURLErrFmt = "invalid endpoint for kms provider, error: parse %s: net/url: invalid control character in URL" + mandatoryFieldErrFmt = "%s is a mandatory field for a %s" + base64EncodingErr = "secrets must be base64 encoded" + zeroOrNegativeErrFmt = "%s should be a positive value" + nonZeroErrFmt = "%s should be a positive value, or negative to disable" + encryptionConfigNilErr = "EncryptionConfiguration can't be nil" +) + +var ( + aesKeySizes = []int{16, 24, 32} + // See https://golang.org/pkg/crypto/aes/#NewCipher for details on supported key sizes for AES. + secretBoxKeySizes = []int{32} + // See https://godoc.org/golang.org/x/crypto/nacl/secretbox#Open for details on the supported key sizes for Secretbox. + root = field.NewPath("resources") +) + +// ValidateEncryptionConfiguration validates a v1.EncryptionConfiguration. +func ValidateEncryptionConfiguration(c *config.EncryptionConfiguration) field.ErrorList { + allErrs := field.ErrorList{} + + if c == nil { + allErrs = append(allErrs, field.Required(root, "EncryptionConfiguration can't be nil")) + return allErrs + } + + if len(c.Resources) == 0 { + allErrs = append(allErrs, field.Required(root, fmt.Sprintf(atLeastOneRequiredErrFmt, root))) + return allErrs + } + + for i, conf := range c.Resources { + r := root.Index(i).Child("resources") + p := root.Index(i).Child("providers") + + if len(conf.Resources) == 0 { + allErrs = append(allErrs, field.Required(r, fmt.Sprintf(atLeastOneRequiredErrFmt, r))) + } + + if len(conf.Providers) == 0 { + allErrs = append(allErrs, field.Required(p, fmt.Sprintf(atLeastOneRequiredErrFmt, p))) + } + + for j, provider := range conf.Providers { + path := p.Index(j) + allErrs = append(allErrs, validateSingleProvider(provider, path)...) + + switch { + case provider.KMS != nil: + allErrs = append(allErrs, validateKMSConfiguration(provider.KMS, path.Child("kms"))...) + case provider.AESGCM != nil: + allErrs = append(allErrs, validateKeys(provider.AESGCM.Keys, path.Child("aesgcm").Child("keys"), aesKeySizes)...) + case provider.AESCBC != nil: + allErrs = append(allErrs, validateKeys(provider.AESCBC.Keys, path.Child("aescbc").Child("keys"), aesKeySizes)...) + case provider.Secretbox != nil: + allErrs = append(allErrs, validateKeys(provider.Secretbox.Keys, path.Child("secretbox").Child("keys"), secretBoxKeySizes)...) + } + } + } + + return allErrs +} + +func validateSingleProvider(provider config.ProviderConfiguration, filedPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + found := 0 + + if provider.KMS != nil { + found++ + } + if provider.AESGCM != nil { + found++ + } + if provider.AESCBC != nil { + found++ + } + if provider.Secretbox != nil { + found++ + } + if provider.Identity != nil { + found++ + } + + if found == 0 { + return append(allErrs, field.Invalid(filedPath, provider, "provider does not contain any of the expected providers: KMS, AESGCM, AESCBC, Secretbox, Identity")) + } + + if found > 1 { + return append(allErrs, field.Invalid(filedPath, provider, moreThanOneElementErr)) + } + + return allErrs +} + +func validateKeys(keys []config.Key, fieldPath *field.Path, expectedLen []int) field.ErrorList { + allErrs := field.ErrorList{} + + if len(keys) == 0 { + allErrs = append(allErrs, field.Required(fieldPath, fmt.Sprintf(atLeastOneRequiredErrFmt, "keys"))) + return allErrs + } + + for i, key := range keys { + allErrs = append(allErrs, validateKey(key, fieldPath.Index(i), expectedLen)...) + } + + return allErrs +} + +func validateKey(key config.Key, fieldPath *field.Path, expectedLen []int) field.ErrorList { + allErrs := field.ErrorList{} + + if key.Name == "" { + allErrs = append(allErrs, field.Required(fieldPath.Child("name"), fmt.Sprintf(mandatoryFieldErrFmt, "name", "key"))) + } + + if key.Secret == "" { + allErrs = append(allErrs, field.Required(fieldPath.Child("secret"), fmt.Sprintf(mandatoryFieldErrFmt, "secret", "key"))) + return allErrs + } + + secret, err := base64.StdEncoding.DecodeString(key.Secret) + if err != nil { + allErrs = append(allErrs, field.Invalid(fieldPath.Child("secret"), "REDACTED", base64EncodingErr)) + return allErrs + } + + lenMatched := false + for _, l := range expectedLen { + if len(secret) == l { + lenMatched = true + break + } + } + + if !lenMatched { + allErrs = append(allErrs, field.Invalid(fieldPath.Child("secret"), "REDACTED", fmt.Sprintf(keyLenErrFmt, len(secret), expectedLen))) + } + + return allErrs +} + +func validateKMSConfiguration(c *config.KMSConfiguration, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if c.Name == "" { + allErrs = append(allErrs, field.Required(fieldPath.Child("name"), fmt.Sprintf(mandatoryFieldErrFmt, "name", "provider"))) + } + allErrs = append(allErrs, validateKMSTimeout(c, fieldPath.Child("timeout"))...) + allErrs = append(allErrs, validateKMSEndpoint(c, fieldPath.Child("endpoint"))...) + allErrs = append(allErrs, validateKMSCacheSize(c, fieldPath.Child("cachesize"))...) + return allErrs +} + +func validateKMSCacheSize(c *config.KMSConfiguration, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if *c.CacheSize == 0 { + allErrs = append(allErrs, field.Invalid(fieldPath, *c.CacheSize, fmt.Sprintf(nonZeroErrFmt, "cachesize"))) + } + + return allErrs +} + +func validateKMSTimeout(c *config.KMSConfiguration, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if c.Timeout.Duration <= 0 { + allErrs = append(allErrs, field.Invalid(fieldPath, c.Timeout, fmt.Sprintf(zeroOrNegativeErrFmt, "timeout"))) + } + + return allErrs +} + +func validateKMSEndpoint(c *config.KMSConfiguration, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(c.Endpoint) == 0 { + return append(allErrs, field.Invalid(fieldPath, "", fmt.Sprintf(mandatoryFieldErrFmt, "endpoint", "kms"))) + } + + u, err := url.Parse(c.Endpoint) + if err != nil { + return append(allErrs, field.Invalid(fieldPath, c.Endpoint, fmt.Sprintf("invalid endpoint for kms provider, error: %v", err))) + } + + if u.Scheme != "unix" { + return append(allErrs, field.Invalid(fieldPath, c.Endpoint, fmt.Sprintf(unsupportedSchemeErrFmt, u.Scheme))) + } + + return allErrs +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/config/zz_generated.deepcopy.go new file mode 100644 index 000000000..dd66315ee --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/zz_generated.deepcopy.go @@ -0,0 +1,227 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package config + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AESConfiguration) DeepCopyInto(out *AESConfiguration) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]Key, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AESConfiguration. +func (in *AESConfiguration) DeepCopy() *AESConfiguration { + if in == nil { + return nil + } + out := new(AESConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfiguration) DeepCopyInto(out *EncryptionConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfiguration. +func (in *EncryptionConfiguration) DeepCopy() *EncryptionConfiguration { + if in == nil { + return nil + } + out := new(EncryptionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EncryptionConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityConfiguration) DeepCopyInto(out *IdentityConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityConfiguration. +func (in *IdentityConfiguration) DeepCopy() *IdentityConfiguration { + if in == nil { + return nil + } + out := new(IdentityConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KMSConfiguration) DeepCopyInto(out *KMSConfiguration) { + *out = *in + if in.CacheSize != nil { + in, out := &in.CacheSize, &out.CacheSize + *out = new(int32) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSConfiguration. +func (in *KMSConfiguration) DeepCopy() *KMSConfiguration { + if in == nil { + return nil + } + out := new(KMSConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Key) DeepCopyInto(out *Key) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Key. +func (in *Key) DeepCopy() *Key { + if in == nil { + return nil + } + out := new(Key) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfiguration) DeepCopyInto(out *ProviderConfiguration) { + *out = *in + if in.AESGCM != nil { + in, out := &in.AESGCM, &out.AESGCM + *out = new(AESConfiguration) + (*in).DeepCopyInto(*out) + } + if in.AESCBC != nil { + in, out := &in.AESCBC, &out.AESCBC + *out = new(AESConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Secretbox != nil { + in, out := &in.Secretbox, &out.Secretbox + *out = new(SecretboxConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityConfiguration) + **out = **in + } + if in.KMS != nil { + in, out := &in.KMS, &out.KMS + *out = new(KMSConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfiguration. +func (in *ProviderConfiguration) DeepCopy() *ProviderConfiguration { + if in == nil { + return nil + } + out := new(ProviderConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceConfiguration) DeepCopyInto(out *ResourceConfiguration) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Providers != nil { + in, out := &in.Providers, &out.Providers + *out = make([]ProviderConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceConfiguration. +func (in *ResourceConfiguration) DeepCopy() *ResourceConfiguration { + if in == nil { + return nil + } + out := new(ResourceConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretboxConfiguration) DeepCopyInto(out *SecretboxConfiguration) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]Key, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretboxConfiguration. +func (in *SecretboxConfiguration) DeepCopy() *SecretboxConfiguration { + if in == nil { + return nil + } + out := new(SecretboxConfiguration) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go b/vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go new file mode 100644 index 000000000..f3e9a1a7b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go @@ -0,0 +1,475 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bootstrap + +import ( + coordinationv1 "k8s.io/api/coordination/v1" + corev1 "k8s.io/api/core/v1" + flowcontrol "k8s.io/api/flowcontrol/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apiserver/pkg/authentication/serviceaccount" + "k8s.io/apiserver/pkg/authentication/user" +) + +// The objects that define an apiserver's initial behavior. The +// registered defaulting procedures make no changes to these +// particular objects (this is verified in the unit tests of the +// internalbootstrap package; it can not be verified in this package +// because that would require importing k8s.io/kubernetes). +var ( + MandatoryPriorityLevelConfigurations = []*flowcontrol.PriorityLevelConfiguration{ + MandatoryPriorityLevelConfigurationCatchAll, + MandatoryPriorityLevelConfigurationExempt, + } + MandatoryFlowSchemas = []*flowcontrol.FlowSchema{ + MandatoryFlowSchemaExempt, + MandatoryFlowSchemaCatchAll, + } +) + +// The objects that define the current suggested additional configuration +var ( + SuggestedPriorityLevelConfigurations = []*flowcontrol.PriorityLevelConfiguration{ + // "system" priority-level is for the system components that affects self-maintenance of the + // cluster and the availability of those running pods in the cluster, including kubelet and + // kube-proxy. + SuggestedPriorityLevelConfigurationSystem, + // "leader-election" is dedicated for controllers' leader-election, which majorly affects the + // availability of any controller runs in the cluster. + SuggestedPriorityLevelConfigurationLeaderElection, + // "workload-high" is used by those workloads with higher priority but their failure won't directly + // impact the existing running pods in the cluster, which includes kube-scheduler, and those well-known + // built-in workloads such as "deployments", "replicasets" and other low-level custom workload which + // is important for the cluster. + SuggestedPriorityLevelConfigurationWorkloadHigh, + // "workload-low" is used by those workloads with lower priority which availability only has a + // minor impact on the cluster. + SuggestedPriorityLevelConfigurationWorkloadLow, + // "global-default" serves the rest traffic not handled by the other suggested flow-schemas above. + SuggestedPriorityLevelConfigurationGlobalDefault, + } + SuggestedFlowSchemas = []*flowcontrol.FlowSchema{ + SuggestedFlowSchemaSystemNodes, // references "system" priority-level + SuggestedFlowSchemaSystemLeaderElection, // references "leader-election" priority-level + SuggestedFlowSchemaWorkloadLeaderElection, // references "leader-election" priority-level + SuggestedFlowSchemaKubeControllerManager, // references "workload-high" priority-level + SuggestedFlowSchemaKubeScheduler, // references "workload-high" priority-level + SuggestedFlowSchemaKubeSystemServiceAccounts, // references "workload-high" priority-level + SuggestedFlowSchemaServiceAccounts, // references "workload-low" priority-level + SuggestedFlowSchemaGlobalDefault, // references "global-default" priority-level + } +) + +// Mandatory PriorityLevelConfiguration objects +var ( + MandatoryPriorityLevelConfigurationExempt = newPriorityLevelConfiguration( + flowcontrol.PriorityLevelConfigurationNameExempt, + flowcontrol.PriorityLevelConfigurationSpec{ + Type: flowcontrol.PriorityLevelEnablementExempt, + }, + ) + MandatoryPriorityLevelConfigurationCatchAll = newPriorityLevelConfiguration( + "catch-all", + flowcontrol.PriorityLevelConfigurationSpec{ + Type: flowcontrol.PriorityLevelEnablementLimited, + Limited: &flowcontrol.LimitedPriorityLevelConfiguration{ + AssuredConcurrencyShares: 5, + LimitResponse: flowcontrol.LimitResponse{ + Type: flowcontrol.LimitResponseTypeReject, + }, + }, + }) +) + +// Mandatory FlowSchema objects +var ( + // "exempt" priority-level is used for preventing priority inversion and ensuring that sysadmin + // requests are always possible. + MandatoryFlowSchemaExempt = newFlowSchema( + "exempt", + flowcontrol.PriorityLevelConfigurationNameExempt, + 1, // matchingPrecedence + "", // distinguisherMethodType + flowcontrol.PolicyRulesWithSubjects{ + Subjects: groups(user.SystemPrivilegedGroup), + ResourceRules: []flowcontrol.ResourcePolicyRule{ + resourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.APIGroupAll}, + []string{flowcontrol.ResourceAll}, + []string{flowcontrol.NamespaceEvery}, + true, + ), + }, + NonResourceRules: []flowcontrol.NonResourcePolicyRule{ + nonResourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.NonResourceAll}, + ), + }, + }, + ) + // "catch-all" priority-level only gets a minimal positive share of concurrency and won't be reaching + // ideally unless you intentionally deleted the suggested "global-default". + MandatoryFlowSchemaCatchAll = newFlowSchema( + "catch-all", + "catch-all", + 10000, // matchingPrecedence + flowcontrol.FlowDistinguisherMethodByUserType, // distinguisherMethodType + flowcontrol.PolicyRulesWithSubjects{ + Subjects: groups(user.AllUnauthenticated, user.AllAuthenticated), + ResourceRules: []flowcontrol.ResourcePolicyRule{ + resourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.APIGroupAll}, + []string{flowcontrol.ResourceAll}, + []string{flowcontrol.NamespaceEvery}, + true, + ), + }, + NonResourceRules: []flowcontrol.NonResourcePolicyRule{ + nonResourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.NonResourceAll}, + ), + }, + }, + ) +) + +// Suggested PriorityLevelConfiguration objects +var ( + // system priority-level + SuggestedPriorityLevelConfigurationSystem = newPriorityLevelConfiguration( + "system", + flowcontrol.PriorityLevelConfigurationSpec{ + Type: flowcontrol.PriorityLevelEnablementLimited, + Limited: &flowcontrol.LimitedPriorityLevelConfiguration{ + AssuredConcurrencyShares: 30, + LimitResponse: flowcontrol.LimitResponse{ + Type: flowcontrol.LimitResponseTypeQueue, + Queuing: &flowcontrol.QueuingConfiguration{ + Queues: 64, + HandSize: 6, + QueueLengthLimit: 50, + }, + }, + }, + }) + // leader-election priority-level + SuggestedPriorityLevelConfigurationLeaderElection = newPriorityLevelConfiguration( + "leader-election", + flowcontrol.PriorityLevelConfigurationSpec{ + Type: flowcontrol.PriorityLevelEnablementLimited, + Limited: &flowcontrol.LimitedPriorityLevelConfiguration{ + AssuredConcurrencyShares: 10, + LimitResponse: flowcontrol.LimitResponse{ + Type: flowcontrol.LimitResponseTypeQueue, + Queuing: &flowcontrol.QueuingConfiguration{ + Queues: 16, + HandSize: 4, + QueueLengthLimit: 50, + }, + }, + }, + }) + // workload-high priority-level + SuggestedPriorityLevelConfigurationWorkloadHigh = newPriorityLevelConfiguration( + "workload-high", + flowcontrol.PriorityLevelConfigurationSpec{ + Type: flowcontrol.PriorityLevelEnablementLimited, + Limited: &flowcontrol.LimitedPriorityLevelConfiguration{ + AssuredConcurrencyShares: 40, + LimitResponse: flowcontrol.LimitResponse{ + Type: flowcontrol.LimitResponseTypeQueue, + Queuing: &flowcontrol.QueuingConfiguration{ + Queues: 128, + HandSize: 6, + QueueLengthLimit: 50, + }, + }, + }, + }) + // workload-low priority-level + SuggestedPriorityLevelConfigurationWorkloadLow = newPriorityLevelConfiguration( + "workload-low", + flowcontrol.PriorityLevelConfigurationSpec{ + Type: flowcontrol.PriorityLevelEnablementLimited, + Limited: &flowcontrol.LimitedPriorityLevelConfiguration{ + AssuredConcurrencyShares: 100, + LimitResponse: flowcontrol.LimitResponse{ + Type: flowcontrol.LimitResponseTypeQueue, + Queuing: &flowcontrol.QueuingConfiguration{ + Queues: 128, + HandSize: 6, + QueueLengthLimit: 50, + }, + }, + }, + }) + // global-default priority-level + SuggestedPriorityLevelConfigurationGlobalDefault = newPriorityLevelConfiguration( + "global-default", + flowcontrol.PriorityLevelConfigurationSpec{ + Type: flowcontrol.PriorityLevelEnablementLimited, + Limited: &flowcontrol.LimitedPriorityLevelConfiguration{ + AssuredConcurrencyShares: 20, + LimitResponse: flowcontrol.LimitResponse{ + Type: flowcontrol.LimitResponseTypeQueue, + Queuing: &flowcontrol.QueuingConfiguration{ + Queues: 128, + HandSize: 6, + QueueLengthLimit: 50, + }, + }, + }, + }) +) + +// Suggested FlowSchema objects +var ( + SuggestedFlowSchemaSystemNodes = newFlowSchema( + "system-nodes", "system", 500, + flowcontrol.FlowDistinguisherMethodByUserType, + flowcontrol.PolicyRulesWithSubjects{ + Subjects: groups(user.NodesGroup), // the nodes group + ResourceRules: []flowcontrol.ResourcePolicyRule{resourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.APIGroupAll}, + []string{flowcontrol.ResourceAll}, + []string{flowcontrol.NamespaceEvery}, + true)}, + NonResourceRules: []flowcontrol.NonResourcePolicyRule{ + nonResourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.NonResourceAll}), + }, + }, + ) + SuggestedFlowSchemaSystemLeaderElection = newFlowSchema( + "system-leader-election", "leader-election", 100, + flowcontrol.FlowDistinguisherMethodByUserType, + flowcontrol.PolicyRulesWithSubjects{ + Subjects: append( + users(user.KubeControllerManager, user.KubeScheduler), + kubeSystemServiceAccount(flowcontrol.NameAll)...), + ResourceRules: []flowcontrol.ResourcePolicyRule{ + resourceRule( + []string{"get", "create", "update"}, + []string{corev1.GroupName}, + []string{"endpoints", "configmaps"}, + []string{"kube-system"}, + false), + resourceRule( + []string{"get", "create", "update"}, + []string{coordinationv1.GroupName}, + []string{"leases"}, + []string{flowcontrol.NamespaceEvery}, + false), + }, + }, + ) + SuggestedFlowSchemaWorkloadLeaderElection = newFlowSchema( + "workload-leader-election", "leader-election", 200, + flowcontrol.FlowDistinguisherMethodByUserType, + flowcontrol.PolicyRulesWithSubjects{ + Subjects: kubeSystemServiceAccount(flowcontrol.NameAll), + ResourceRules: []flowcontrol.ResourcePolicyRule{ + resourceRule( + []string{"get", "create", "update"}, + []string{corev1.GroupName}, + []string{"endpoints", "configmaps"}, + []string{flowcontrol.NamespaceEvery}, + false), + resourceRule( + []string{"get", "create", "update"}, + []string{coordinationv1.GroupName}, + []string{"leases"}, + []string{flowcontrol.NamespaceEvery}, + false), + }, + }, + ) + SuggestedFlowSchemaKubeControllerManager = newFlowSchema( + "kube-controller-manager", "workload-high", 800, + flowcontrol.FlowDistinguisherMethodByNamespaceType, + flowcontrol.PolicyRulesWithSubjects{ + Subjects: users(user.KubeControllerManager), + ResourceRules: []flowcontrol.ResourcePolicyRule{resourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.APIGroupAll}, + []string{flowcontrol.ResourceAll}, + []string{flowcontrol.NamespaceEvery}, + true)}, + NonResourceRules: []flowcontrol.NonResourcePolicyRule{ + nonResourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.NonResourceAll}), + }, + }, + ) + SuggestedFlowSchemaKubeScheduler = newFlowSchema( + "kube-scheduler", "workload-high", 800, + flowcontrol.FlowDistinguisherMethodByNamespaceType, + flowcontrol.PolicyRulesWithSubjects{ + Subjects: users(user.KubeScheduler), + ResourceRules: []flowcontrol.ResourcePolicyRule{resourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.APIGroupAll}, + []string{flowcontrol.ResourceAll}, + []string{flowcontrol.NamespaceEvery}, + true)}, + NonResourceRules: []flowcontrol.NonResourcePolicyRule{ + nonResourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.NonResourceAll}), + }, + }, + ) + SuggestedFlowSchemaKubeSystemServiceAccounts = newFlowSchema( + "kube-system-service-accounts", "workload-high", 900, + flowcontrol.FlowDistinguisherMethodByNamespaceType, + flowcontrol.PolicyRulesWithSubjects{ + Subjects: kubeSystemServiceAccount(flowcontrol.NameAll), + ResourceRules: []flowcontrol.ResourcePolicyRule{resourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.APIGroupAll}, + []string{flowcontrol.ResourceAll}, + []string{flowcontrol.NamespaceEvery}, + true)}, + NonResourceRules: []flowcontrol.NonResourcePolicyRule{ + nonResourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.NonResourceAll}), + }, + }, + ) + SuggestedFlowSchemaServiceAccounts = newFlowSchema( + "service-accounts", "workload-low", 9000, + flowcontrol.FlowDistinguisherMethodByUserType, + flowcontrol.PolicyRulesWithSubjects{ + Subjects: groups(serviceaccount.AllServiceAccountsGroup), + ResourceRules: []flowcontrol.ResourcePolicyRule{resourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.APIGroupAll}, + []string{flowcontrol.ResourceAll}, + []string{flowcontrol.NamespaceEvery}, + true)}, + NonResourceRules: []flowcontrol.NonResourcePolicyRule{ + nonResourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.NonResourceAll}), + }, + }, + ) + SuggestedFlowSchemaGlobalDefault = newFlowSchema( + "global-default", "global-default", 9900, + flowcontrol.FlowDistinguisherMethodByUserType, + flowcontrol.PolicyRulesWithSubjects{ + Subjects: groups(user.AllUnauthenticated, user.AllAuthenticated), + ResourceRules: []flowcontrol.ResourcePolicyRule{resourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.APIGroupAll}, + []string{flowcontrol.ResourceAll}, + []string{flowcontrol.NamespaceEvery}, + true)}, + NonResourceRules: []flowcontrol.NonResourcePolicyRule{ + nonResourceRule( + []string{flowcontrol.VerbAll}, + []string{flowcontrol.NonResourceAll}), + }, + }, + ) +) + +func newPriorityLevelConfiguration(name string, spec flowcontrol.PriorityLevelConfigurationSpec) *flowcontrol.PriorityLevelConfiguration { + return &flowcontrol.PriorityLevelConfiguration{ + ObjectMeta: metav1.ObjectMeta{Name: name}, + Spec: spec} +} + +func newFlowSchema(name, plName string, matchingPrecedence int32, dmType flowcontrol.FlowDistinguisherMethodType, rules ...flowcontrol.PolicyRulesWithSubjects) *flowcontrol.FlowSchema { + var dm *flowcontrol.FlowDistinguisherMethod + if dmType != "" { + dm = &flowcontrol.FlowDistinguisherMethod{Type: dmType} + } + return &flowcontrol.FlowSchema{ + ObjectMeta: metav1.ObjectMeta{Name: name}, + Spec: flowcontrol.FlowSchemaSpec{ + PriorityLevelConfiguration: flowcontrol.PriorityLevelConfigurationReference{ + Name: plName, + }, + MatchingPrecedence: matchingPrecedence, + DistinguisherMethod: dm, + Rules: rules}, + } + +} + +func groups(names ...string) []flowcontrol.Subject { + ans := make([]flowcontrol.Subject, len(names)) + for idx, name := range names { + ans[idx] = flowcontrol.Subject{ + Kind: flowcontrol.SubjectKindGroup, + Group: &flowcontrol.GroupSubject{ + Name: name, + }, + } + } + return ans +} + +func users(names ...string) []flowcontrol.Subject { + ans := make([]flowcontrol.Subject, len(names)) + for idx, name := range names { + ans[idx] = flowcontrol.Subject{ + Kind: flowcontrol.SubjectKindUser, + User: &flowcontrol.UserSubject{ + Name: name, + }, + } + } + return ans +} + +func kubeSystemServiceAccount(names ...string) []flowcontrol.Subject { + subjects := []flowcontrol.Subject{} + for _, name := range names { + subjects = append(subjects, flowcontrol.Subject{ + Kind: flowcontrol.SubjectKindServiceAccount, + ServiceAccount: &flowcontrol.ServiceAccountSubject{ + Name: name, + Namespace: metav1.NamespaceSystem, + }, + }) + } + return subjects +} + +func resourceRule(verbs []string, groups []string, resources []string, namespaces []string, clusterScoped bool) flowcontrol.ResourcePolicyRule { + return flowcontrol.ResourcePolicyRule{ + Verbs: verbs, + APIGroups: groups, + Resources: resources, + Namespaces: namespaces, + ClusterScope: clusterScoped, + } +} + +func nonResourceRule(verbs []string, nonResourceURLs []string) flowcontrol.NonResourcePolicyRule { + return flowcontrol.NonResourcePolicyRule{Verbs: verbs, NonResourceURLs: nonResourceURLs} +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/policy/checker.go b/vendor/k8s.io/apiserver/pkg/audit/policy/checker.go new file mode 100644 index 000000000..e9a901abf --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/policy/checker.go @@ -0,0 +1,219 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy + +import ( + "strings" + + "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +const ( + // DefaultAuditLevel is the default level to audit at, if no policy rules are matched. + DefaultAuditLevel = audit.LevelNone +) + +// Checker exposes methods for checking the policy rules. +type Checker interface { + // Check the audit level for a request with the given authorizer attributes. + LevelAndStages(authorizer.Attributes) (audit.Level, []audit.Stage) +} + +// NewChecker creates a new policy checker. +func NewChecker(policy *audit.Policy) Checker { + for i, rule := range policy.Rules { + policy.Rules[i].OmitStages = unionStages(policy.OmitStages, rule.OmitStages) + } + return &policyChecker{*policy} +} + +func unionStages(stageLists ...[]audit.Stage) []audit.Stage { + m := make(map[audit.Stage]bool) + for _, sl := range stageLists { + for _, s := range sl { + m[s] = true + } + } + result := make([]audit.Stage, 0, len(m)) + for key := range m { + result = append(result, key) + } + return result +} + +// FakeChecker creates a checker that returns a constant level for all requests (for testing). +func FakeChecker(level audit.Level, stage []audit.Stage) Checker { + return &fakeChecker{level, stage} +} + +type policyChecker struct { + audit.Policy +} + +func (p *policyChecker) LevelAndStages(attrs authorizer.Attributes) (audit.Level, []audit.Stage) { + for _, rule := range p.Rules { + if ruleMatches(&rule, attrs) { + return rule.Level, rule.OmitStages + } + } + return DefaultAuditLevel, p.OmitStages +} + +// Check whether the rule matches the request attrs. +func ruleMatches(r *audit.PolicyRule, attrs authorizer.Attributes) bool { + user := attrs.GetUser() + if len(r.Users) > 0 { + if user == nil || !hasString(r.Users, user.GetName()) { + return false + } + } + if len(r.UserGroups) > 0 { + if user == nil { + return false + } + matched := false + for _, group := range user.GetGroups() { + if hasString(r.UserGroups, group) { + matched = true + break + } + } + if !matched { + return false + } + } + if len(r.Verbs) > 0 { + if !hasString(r.Verbs, attrs.GetVerb()) { + return false + } + } + + if len(r.Namespaces) > 0 || len(r.Resources) > 0 { + return ruleMatchesResource(r, attrs) + } + + if len(r.NonResourceURLs) > 0 { + return ruleMatchesNonResource(r, attrs) + } + + return true +} + +// Check whether the rule's non-resource URLs match the request attrs. +func ruleMatchesNonResource(r *audit.PolicyRule, attrs authorizer.Attributes) bool { + if attrs.IsResourceRequest() { + return false + } + + path := attrs.GetPath() + for _, spec := range r.NonResourceURLs { + if pathMatches(path, spec) { + return true + } + } + + return false +} + +// Check whether the path matches the path specification. +func pathMatches(path, spec string) bool { + // Allow wildcard match + if spec == "*" { + return true + } + // Allow exact match + if spec == path { + return true + } + // Allow a trailing * subpath match + if strings.HasSuffix(spec, "*") && strings.HasPrefix(path, strings.TrimRight(spec, "*")) { + return true + } + return false +} + +// Check whether the rule's resource fields match the request attrs. +func ruleMatchesResource(r *audit.PolicyRule, attrs authorizer.Attributes) bool { + if !attrs.IsResourceRequest() { + return false + } + + if len(r.Namespaces) > 0 { + if !hasString(r.Namespaces, attrs.GetNamespace()) { // Non-namespaced resources use the empty string. + return false + } + } + if len(r.Resources) == 0 { + return true + } + + apiGroup := attrs.GetAPIGroup() + resource := attrs.GetResource() + subresource := attrs.GetSubresource() + combinedResource := resource + // If subresource, the resource in the policy must match "(resource)/(subresource)" + if subresource != "" { + combinedResource = resource + "/" + subresource + } + + name := attrs.GetName() + + for _, gr := range r.Resources { + if gr.Group == apiGroup { + if len(gr.Resources) == 0 { + return true + } + for _, res := range gr.Resources { + if len(gr.ResourceNames) == 0 || hasString(gr.ResourceNames, name) { + // match "*" + if res == combinedResource || res == "*" { + return true + } + // match "*/subresource" + if len(subresource) > 0 && strings.HasPrefix(res, "*/") && subresource == strings.TrimPrefix(res, "*/") { + return true + } + // match "resource/*" + if strings.HasSuffix(res, "/*") && resource == strings.TrimSuffix(res, "/*") { + return true + } + } + } + } + } + return false +} + +// Utility function to check whether a string slice contains a string. +func hasString(slice []string, value string) bool { + for _, s := range slice { + if s == value { + return true + } + } + return false +} + +type fakeChecker struct { + level audit.Level + stage []audit.Stage +} + +func (f *fakeChecker) LevelAndStages(_ authorizer.Attributes) (audit.Level, []audit.Stage) { + return f.level, f.stage +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/policy/enforce.go b/vendor/k8s.io/apiserver/pkg/audit/policy/enforce.go new file mode 100644 index 000000000..e2b107b9f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/policy/enforce.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy + +import ( + "fmt" + + "k8s.io/apiserver/pkg/apis/audit" +) + +// EnforcePolicy drops any part of the event that doesn't conform to a policy level +// or omitStages and sets the event level accordingly +func EnforcePolicy(event *audit.Event, level audit.Level, omitStages []audit.Stage) (*audit.Event, error) { + for _, stage := range omitStages { + if event.Stage == stage { + return nil, nil + } + } + return enforceLevel(event, level) +} + +func enforceLevel(event *audit.Event, level audit.Level) (*audit.Event, error) { + switch level { + case audit.LevelMetadata: + event.Level = audit.LevelMetadata + event.ResponseObject = nil + event.RequestObject = nil + case audit.LevelRequest: + event.Level = audit.LevelRequest + event.ResponseObject = nil + case audit.LevelRequestResponse: + event.Level = audit.LevelRequestResponse + case audit.LevelNone: + return nil, nil + default: + return nil, fmt.Errorf("level unknown: %s", level) + } + return event, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/policy/reader.go b/vendor/k8s.io/apiserver/pkg/audit/policy/reader.go new file mode 100644 index 000000000..81b800fd6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/policy/reader.go @@ -0,0 +1,90 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy + +import ( + "fmt" + "io/ioutil" + + "k8s.io/apimachinery/pkg/runtime/schema" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + auditv1 "k8s.io/apiserver/pkg/apis/audit/v1" + auditv1alpha1 "k8s.io/apiserver/pkg/apis/audit/v1alpha1" + auditv1beta1 "k8s.io/apiserver/pkg/apis/audit/v1beta1" + "k8s.io/apiserver/pkg/apis/audit/validation" + "k8s.io/apiserver/pkg/audit" + + "k8s.io/klog/v2" +) + +var ( + apiGroupVersions = []schema.GroupVersion{ + auditv1beta1.SchemeGroupVersion, + auditv1alpha1.SchemeGroupVersion, + auditv1.SchemeGroupVersion, + } + apiGroupVersionSet = map[schema.GroupVersion]bool{} +) + +func init() { + for _, gv := range apiGroupVersions { + apiGroupVersionSet[gv] = true + } +} + +func LoadPolicyFromFile(filePath string) (*auditinternal.Policy, error) { + if filePath == "" { + return nil, fmt.Errorf("file path not specified") + } + policyDef, err := ioutil.ReadFile(filePath) + if err != nil { + return nil, fmt.Errorf("failed to read file path %q: %+v", filePath, err) + } + + ret, err := LoadPolicyFromBytes(policyDef) + if err != nil { + return nil, fmt.Errorf("%v: from file %v", err.Error(), filePath) + } + + return ret, nil +} + +func LoadPolicyFromBytes(policyDef []byte) (*auditinternal.Policy, error) { + policy := &auditinternal.Policy{} + decoder := audit.Codecs.UniversalDecoder(apiGroupVersions...) + + _, gvk, err := decoder.Decode(policyDef, nil, policy) + if err != nil { + return nil, fmt.Errorf("failed decoding: %v", err) + } + + // Ensure the policy file contained an apiVersion and kind. + if !apiGroupVersionSet[schema.GroupVersion{Group: gvk.Group, Version: gvk.Version}] { + return nil, fmt.Errorf("unknown group version field %v in policy", gvk) + } + + if err := validation.ValidatePolicy(policy); err != nil { + return nil, err.ToAggregate() + } + + policyCnt := len(policy.Rules) + if policyCnt == 0 { + return nil, fmt.Errorf("loaded illegal policy with 0 rules") + } + klog.V(4).Infof("Loaded %d audit policy rules", policyCnt) + return policy, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/policy/util.go b/vendor/k8s.io/apiserver/pkg/audit/policy/util.go new file mode 100644 index 000000000..29be91230 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/policy/util.go @@ -0,0 +1,68 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy + +import ( + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/apis/audit" +) + +// AllStages returns all possible stages +func AllStages() sets.String { + return sets.NewString( + audit.StageRequestReceived, + audit.StageResponseStarted, + audit.StageResponseComplete, + audit.StagePanic, + ) +} + +// AllLevels returns all possible levels +func AllLevels() sets.String { + return sets.NewString( + string(audit.LevelNone), + string(audit.LevelMetadata), + string(audit.LevelRequest), + string(audit.LevelRequestResponse), + ) +} + +// InvertStages subtracts the given array of stages from all stages +func InvertStages(stages []audit.Stage) []audit.Stage { + s := ConvertStagesToStrings(stages) + a := AllStages() + a.Delete(s...) + return ConvertStringSetToStages(a) +} + +// ConvertStagesToStrings converts an array of stages to a string array +func ConvertStagesToStrings(stages []audit.Stage) []string { + s := make([]string, len(stages)) + for i, stage := range stages { + s[i] = string(stage) + } + return s +} + +// ConvertStringSetToStages converts a string set to an array of stages +func ConvertStringSetToStages(set sets.String) []audit.Stage { + stages := make([]audit.Stage, len(set)) + for i, stage := range set.List() { + stages[i] = audit.Stage(stage) + } + return stages +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/request.go b/vendor/k8s.io/apiserver/pkg/audit/request.go index db4a5232e..205bf25c8 100644 --- a/vendor/k8s.io/apiserver/pkg/audit/request.go +++ b/vendor/k8s.io/apiserver/pkg/audit/request.go @@ -43,9 +43,9 @@ const ( userAgentTruncateSuffix = "...TRUNCATED" ) -func NewEventFromRequest(req *http.Request, level auditinternal.Level, attribs authorizer.Attributes) (*auditinternal.Event, error) { +func NewEventFromRequest(req *http.Request, requestReceivedTimestamp time.Time, level auditinternal.Level, attribs authorizer.Attributes) (*auditinternal.Event, error) { ev := &auditinternal.Event{ - RequestReceivedTimestamp: metav1.NewMicroTime(time.Now()), + RequestReceivedTimestamp: metav1.NewMicroTime(requestReceivedTimestamp), Verb: attribs.GetVerb(), RequestURI: req.URL.RequestURI(), UserAgent: maybeTruncateUserAgent(req), diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go index b9c7e2e6e..83697bb54 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go @@ -22,6 +22,7 @@ import ( "github.com/go-openapi/spec" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/group" "k8s.io/apiserver/pkg/authentication/request/anonymous" @@ -43,6 +44,11 @@ type DelegatingAuthenticatorConfig struct { // TokenAccessReviewClient is a client to do token review. It can be nil. Then every token is ignored. TokenAccessReviewClient authenticationclient.TokenReviewInterface + // WebhookRetryBackoff specifies the backoff parameters for the authentication webhook retry logic. + // This allows us to configure the sleep time at each iteration and the maximum number of retries allowed + // before we fail the webhook call in order to limit the fan out that ensues when the system is degraded. + WebhookRetryBackoff *wait.Backoff + // CacheTTL is the length of time that a token authentication answer will be cached. CacheTTL time.Duration @@ -79,7 +85,10 @@ func (c DelegatingAuthenticatorConfig) New() (authenticator.Request, *spec.Secur } if c.TokenAccessReviewClient != nil { - tokenAuth, err := webhooktoken.NewFromInterface(c.TokenAccessReviewClient, c.APIAudiences) + if c.WebhookRetryBackoff == nil { + return nil, nil, errors.New("retry backoff parameters for delegating authentication webhook has not been specified") + } + tokenAuth, err := webhooktoken.NewFromInterface(c.TokenAccessReviewClient, c.APIAudiences, *c.WebhookRetryBackoff) if err != nil { return nil, nil, err } diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/bearertoken.go b/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/bearertoken.go index 2de796b72..292b4f57d 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/bearertoken.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/bearertoken.go @@ -39,7 +39,7 @@ func (a *Authenticator) AuthenticateRequest(req *http.Request) (*authenticator.R if auth == "" { return nil, false, nil } - parts := strings.Split(auth, " ") + parts := strings.SplitN(auth, " ", 3) if len(parts) < 2 || strings.ToLower(parts[0]) != "bearer" { return nil, false, nil } diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go index 6fe5299fc..011639140 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go @@ -19,8 +19,10 @@ package x509 import ( "crypto/x509" "crypto/x509/pkix" + "encoding/hex" "fmt" "net/http" + "strings" "time" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -82,6 +84,27 @@ func (f UserConversionFunc) User(chain []*x509.Certificate) (*authenticator.Resp return f(chain) } +func columnSeparatedHex(d []byte) string { + h := strings.ToUpper(hex.EncodeToString(d)) + var sb strings.Builder + for i, r := range h { + sb.WriteRune(r) + if i%2 == 1 && i != len(h)-1 { + sb.WriteRune(':') + } + } + return sb.String() +} + +func certificateIdentifier(c *x509.Certificate) string { + return fmt.Sprintf( + "SN=%d, SKID=%s, AKID=%s", + c.SerialNumber, + columnSeparatedHex(c.SubjectKeyId), + columnSeparatedHex(c.AuthorityKeyId), + ) +} + // VerifyOptionFunc is function which provides a shallow copy of the VerifyOptions to the authenticator. This allows // for cases where the options (particularly the CAs) can change. If the bool is false, then the returned VerifyOptions // are ignored and the authenticator will express "no opinion". This allows a clear signal for cases where a CertPool @@ -129,7 +152,11 @@ func (a *Authenticator) AuthenticateRequest(req *http.Request) (*authenticator.R clientCertificateExpirationHistogram.Observe(remaining.Seconds()) chains, err := req.TLS.PeerCertificates[0].Verify(optsCopy) if err != nil { - return nil, false, err + return nil, false, fmt.Errorf( + "verifying certificate %s failed: %w", + certificateIdentifier(req.TLS.PeerCertificates[0]), + err, + ) } var errlist []error diff --git a/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go b/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go new file mode 100644 index 000000000..f0dc07676 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go @@ -0,0 +1,183 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serviceaccount + +import ( + "context" + "fmt" + "strings" + + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apiserver/pkg/authentication/user" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + + "k8s.io/klog/v2" +) + +const ( + ServiceAccountUsernamePrefix = "system:serviceaccount:" + ServiceAccountUsernameSeparator = ":" + ServiceAccountGroupPrefix = "system:serviceaccounts:" + AllServiceAccountsGroup = "system:serviceaccounts" + // PodNameKey is the key used in a user's "extra" to specify the pod name of + // the authenticating request. + PodNameKey = "authentication.kubernetes.io/pod-name" + // PodUIDKey is the key used in a user's "extra" to specify the pod UID of + // the authenticating request. + PodUIDKey = "authentication.kubernetes.io/pod-uid" +) + +// MakeUsername generates a username from the given namespace and ServiceAccount name. +// The resulting username can be passed to SplitUsername to extract the original namespace and ServiceAccount name. +func MakeUsername(namespace, name string) string { + return ServiceAccountUsernamePrefix + namespace + ServiceAccountUsernameSeparator + name +} + +// MatchesUsername checks whether the provided username matches the namespace and name without +// allocating. Use this when checking a service account namespace and name against a known string. +func MatchesUsername(namespace, name string, username string) bool { + if !strings.HasPrefix(username, ServiceAccountUsernamePrefix) { + return false + } + username = username[len(ServiceAccountUsernamePrefix):] + + if !strings.HasPrefix(username, namespace) { + return false + } + username = username[len(namespace):] + + if !strings.HasPrefix(username, ServiceAccountUsernameSeparator) { + return false + } + username = username[len(ServiceAccountUsernameSeparator):] + + return username == name +} + +var invalidUsernameErr = fmt.Errorf("Username must be in the form %s", MakeUsername("namespace", "name")) + +// SplitUsername returns the namespace and ServiceAccount name embedded in the given username, +// or an error if the username is not a valid name produced by MakeUsername +func SplitUsername(username string) (string, string, error) { + if !strings.HasPrefix(username, ServiceAccountUsernamePrefix) { + return "", "", invalidUsernameErr + } + trimmed := strings.TrimPrefix(username, ServiceAccountUsernamePrefix) + parts := strings.Split(trimmed, ServiceAccountUsernameSeparator) + if len(parts) != 2 { + return "", "", invalidUsernameErr + } + namespace, name := parts[0], parts[1] + if len(apimachineryvalidation.ValidateNamespaceName(namespace, false)) != 0 { + return "", "", invalidUsernameErr + } + if len(apimachineryvalidation.ValidateServiceAccountName(name, false)) != 0 { + return "", "", invalidUsernameErr + } + return namespace, name, nil +} + +// MakeGroupNames generates service account group names for the given namespace +func MakeGroupNames(namespace string) []string { + return []string{ + AllServiceAccountsGroup, + MakeNamespaceGroupName(namespace), + } +} + +// MakeNamespaceGroupName returns the name of the group all service accounts in the namespace are included in +func MakeNamespaceGroupName(namespace string) string { + return ServiceAccountGroupPrefix + namespace +} + +// UserInfo returns a user.Info interface for the given namespace, service account name and UID +func UserInfo(namespace, name, uid string) user.Info { + return (&ServiceAccountInfo{ + Name: name, + Namespace: namespace, + UID: uid, + }).UserInfo() +} + +type ServiceAccountInfo struct { + Name, Namespace, UID string + PodName, PodUID string +} + +func (sa *ServiceAccountInfo) UserInfo() user.Info { + info := &user.DefaultInfo{ + Name: MakeUsername(sa.Namespace, sa.Name), + UID: sa.UID, + Groups: MakeGroupNames(sa.Namespace), + } + if sa.PodName != "" && sa.PodUID != "" { + info.Extra = map[string][]string{ + PodNameKey: {sa.PodName}, + PodUIDKey: {sa.PodUID}, + } + } + return info +} + +// IsServiceAccountToken returns true if the secret is a valid api token for the service account +func IsServiceAccountToken(secret *v1.Secret, sa *v1.ServiceAccount) bool { + if secret.Type != v1.SecretTypeServiceAccountToken { + return false + } + + name := secret.Annotations[v1.ServiceAccountNameKey] + uid := secret.Annotations[v1.ServiceAccountUIDKey] + if name != sa.Name { + // Name must match + return false + } + if len(uid) > 0 && uid != string(sa.UID) { + // If UID is specified, it must match + return false + } + + return true +} + +func GetOrCreateServiceAccount(coreClient v1core.CoreV1Interface, namespace, name string) (*v1.ServiceAccount, error) { + sa, err := coreClient.ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err == nil { + return sa, nil + } + if !apierrors.IsNotFound(err) { + return nil, err + } + + // Create the namespace if we can't verify it exists. + // Tolerate errors, since we don't know whether this component has namespace creation permissions. + if _, err := coreClient.Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}); apierrors.IsNotFound(err) { + if _, err = coreClient.Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) { + klog.Warningf("create non-exist namespace %s failed:%v", namespace, err) + } + } + + // Create the service account + sa, err = coreClient.ServiceAccounts(namespace).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}}, metav1.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + // If we're racing to init and someone else already created it, re-fetch + return coreClient.ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + } + return sa, err +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/user/user.go b/vendor/k8s.io/apiserver/pkg/authentication/user/user.go index f02dc39ec..4d6ec0980 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/user/user.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/user/user.go @@ -70,6 +70,7 @@ func (i *DefaultInfo) GetExtra() map[string][]string { const ( SystemPrivilegedGroup = "system:masters" NodesGroup = "system:nodes" + MonitoringGroup = "system:monitoring" AllUnauthenticated = "system:unauthenticated" AllAuthenticated = "system:authenticated" diff --git a/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go index fa385e125..665483308 100644 --- a/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go +++ b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go @@ -17,8 +17,10 @@ limitations under the License. package authorizerfactory import ( + "errors" "time" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/plugin/pkg/authorizer/webhook" authorizationclient "k8s.io/client-go/kubernetes/typed/authorization/v1" @@ -35,12 +37,22 @@ type DelegatingAuthorizerConfig struct { // DenyCacheTTL is the length of time that an unsuccessful authorization response will be cached. // You generally want more responsive, "deny, try again" flows. DenyCacheTTL time.Duration + + // WebhookRetryBackoff specifies the backoff parameters for the authorization webhook retry logic. + // This allows us to configure the sleep time at each iteration and the maximum number of retries allowed + // before we fail the webhook call in order to limit the fan out that ensues when the system is degraded. + WebhookRetryBackoff *wait.Backoff } func (c DelegatingAuthorizerConfig) New() (authorizer.Authorizer, error) { + if c.WebhookRetryBackoff == nil { + return nil, errors.New("retry backoff parameters for delegating authorization webhook has not been specified") + } + return webhook.NewFromInterface( c.SubjectAccessReviewClient, c.AllowCacheTTL, c.DenyCacheTTL, + *c.WebhookRetryBackoff, ) } diff --git a/vendor/k8s.io/apiserver/pkg/authorization/path/doc.go b/vendor/k8s.io/apiserver/pkg/authorization/path/doc.go new file mode 100644 index 000000000..654aaeb74 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authorization/path/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package path contains an authorizer that allows certain paths and path prefixes. +package path // import "k8s.io/apiserver/pkg/authorization/path" diff --git a/vendor/k8s.io/apiserver/pkg/authorization/path/path.go b/vendor/k8s.io/apiserver/pkg/authorization/path/path.go new file mode 100644 index 000000000..03f524b38 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authorization/path/path.go @@ -0,0 +1,67 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package path + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +// NewAuthorizer returns an authorizer which accepts a given set of paths. +// Each path is either a fully matching path or it ends in * in case a prefix match is done. A leading / is optional. +func NewAuthorizer(alwaysAllowPaths []string) (authorizer.Authorizer, error) { + var prefixes []string + paths := sets.NewString() + for _, p := range alwaysAllowPaths { + p = strings.TrimPrefix(p, "/") + if len(p) == 0 { + // matches "/" + paths.Insert(p) + continue + } + if strings.ContainsRune(p[:len(p)-1], '*') { + return nil, fmt.Errorf("only trailing * allowed in %q", p) + } + if strings.HasSuffix(p, "*") { + prefixes = append(prefixes, p[:len(p)-1]) + } else { + paths.Insert(p) + } + } + + return authorizer.AuthorizerFunc(func(a authorizer.Attributes) (authorizer.Decision, string, error) { + if a.IsResourceRequest() { + return authorizer.DecisionNoOpinion, "", nil + } + + pth := strings.TrimPrefix(a.GetPath(), "/") + if paths.Has(pth) { + return authorizer.DecisionAllow, "", nil + } + + for _, prefix := range prefixes { + if strings.HasPrefix(pth, prefix) { + return authorizer.DecisionAllow, "", nil + } + } + + return authorizer.DecisionNoOpinion, "", nil + }), nil +} diff --git a/vendor/k8s.io/apiserver/pkg/authorization/union/union.go b/vendor/k8s.io/apiserver/pkg/authorization/union/union.go new file mode 100644 index 000000000..460d9a4ab --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authorization/union/union.go @@ -0,0 +1,106 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package union implements an authorizer that combines multiple subauthorizer. +// The union authorizer iterates over each subauthorizer and returns the first +// decision that is either an Allow decision or a Deny decision. If a +// subauthorizer returns a NoOpinion, then the union authorizer moves onto the +// next authorizer or, if the subauthorizer was the last authorizer, returns +// NoOpinion as the aggregate decision. I.e. union authorizer creates an +// aggregate decision and supports short-circuit allows and denies from +// subauthorizers. +package union + +import ( + "context" + "strings" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +// unionAuthzHandler authorizer against a chain of authorizer.Authorizer +type unionAuthzHandler []authorizer.Authorizer + +// New returns an authorizer that authorizes against a chain of authorizer.Authorizer objects +func New(authorizationHandlers ...authorizer.Authorizer) authorizer.Authorizer { + return unionAuthzHandler(authorizationHandlers) +} + +// Authorizes against a chain of authorizer.Authorizer objects and returns nil if successful and returns error if unsuccessful +func (authzHandler unionAuthzHandler) Authorize(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) { + var ( + errlist []error + reasonlist []string + ) + + for _, currAuthzHandler := range authzHandler { + decision, reason, err := currAuthzHandler.Authorize(ctx, a) + + if err != nil { + errlist = append(errlist, err) + } + if len(reason) != 0 { + reasonlist = append(reasonlist, reason) + } + switch decision { + case authorizer.DecisionAllow, authorizer.DecisionDeny: + return decision, reason, err + case authorizer.DecisionNoOpinion: + // continue to the next authorizer + } + } + + return authorizer.DecisionNoOpinion, strings.Join(reasonlist, "\n"), utilerrors.NewAggregate(errlist) +} + +// unionAuthzRulesHandler authorizer against a chain of authorizer.RuleResolver +type unionAuthzRulesHandler []authorizer.RuleResolver + +// NewRuleResolvers returns an authorizer that authorizes against a chain of authorizer.Authorizer objects +func NewRuleResolvers(authorizationHandlers ...authorizer.RuleResolver) authorizer.RuleResolver { + return unionAuthzRulesHandler(authorizationHandlers) +} + +// RulesFor against a chain of authorizer.RuleResolver objects and returns nil if successful and returns error if unsuccessful +func (authzHandler unionAuthzRulesHandler) RulesFor(user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) { + var ( + errList []error + resourceRulesList []authorizer.ResourceRuleInfo + nonResourceRulesList []authorizer.NonResourceRuleInfo + ) + incompleteStatus := false + + for _, currAuthzHandler := range authzHandler { + resourceRules, nonResourceRules, incomplete, err := currAuthzHandler.RulesFor(user, namespace) + + if incomplete { + incompleteStatus = true + } + if err != nil { + errList = append(errList, err) + } + if len(resourceRules) > 0 { + resourceRulesList = append(resourceRulesList, resourceRules...) + } + if len(nonResourceRules) > 0 { + nonResourceRulesList = append(nonResourceRulesList, nonResourceRules...) + } + } + + return resourceRulesList, nonResourceRulesList, incompleteStatus, utilerrors.NewAggregate(errList) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/deprecation/deprecation.go b/vendor/k8s.io/apiserver/pkg/endpoints/deprecation/deprecation.go new file mode 100644 index 000000000..3d0123b23 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/deprecation/deprecation.go @@ -0,0 +1,133 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deprecation + +import ( + "fmt" + "regexp" + "strconv" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/version" +) + +type apiLifecycleDeprecated interface { + APILifecycleDeprecated() (major, minor int) +} + +type apiLifecycleRemoved interface { + APILifecycleRemoved() (major, minor int) +} + +type apiLifecycleReplacement interface { + APILifecycleReplacement() schema.GroupVersionKind +} + +// extract all digits at the beginning of the string +var leadingDigits = regexp.MustCompile(`^(\d+)`) + +// MajorMinor parses a numeric major/minor version from the provided version info. +// The minor version drops all characters after the first non-digit character: +// version.Info{Major:"1", Minor:"2+"} -> 1,2 +// version.Info{Major:"1", Minor:"2.3-build4"} -> 1,2 +func MajorMinor(v version.Info) (int, int, error) { + major, err := strconv.Atoi(v.Major) + if err != nil { + return 0, 0, err + } + minor, err := strconv.Atoi(leadingDigits.FindString(v.Minor)) + if err != nil { + return 0, 0, err + } + return major, minor, nil +} + +// IsDeprecated returns true if obj implements APILifecycleDeprecated() and returns +// a major/minor version that is non-zero and is <= the specified current major/minor version. +func IsDeprecated(obj runtime.Object, currentMajor, currentMinor int) bool { + deprecated, isDeprecated := obj.(apiLifecycleDeprecated) + if !isDeprecated { + return false + } + + deprecatedMajor, deprecatedMinor := deprecated.APILifecycleDeprecated() + // no deprecation version expressed + if deprecatedMajor == 0 && deprecatedMinor == 0 { + return false + } + // no current version info available + if currentMajor == 0 && currentMinor == 0 { + return true + } + // compare deprecation version to current version + if deprecatedMajor > currentMajor { + return false + } + if deprecatedMajor == currentMajor && deprecatedMinor > currentMinor { + return false + } + return true +} + +// RemovedRelease returns the major/minor version in which the given object is unavailable (in the form ".") +// if the object implements APILifecycleRemoved() to indicate a non-zero removal version, and returns an empty string otherwise. +func RemovedRelease(obj runtime.Object) string { + if removed, hasRemovalInfo := obj.(apiLifecycleRemoved); hasRemovalInfo { + removedMajor, removedMinor := removed.APILifecycleRemoved() + if removedMajor != 0 || removedMinor != 0 { + return fmt.Sprintf("%d.%d", removedMajor, removedMinor) + } + } + return "" +} + +// WarningMessage returns a human-readable deprecation warning if the object implements APILifecycleDeprecated() +// to indicate a non-zero deprecated major/minor version and has a populated GetObjectKind().GroupVersionKind(). +func WarningMessage(obj runtime.Object) string { + deprecated, isDeprecated := obj.(apiLifecycleDeprecated) + if !isDeprecated { + return "" + } + + deprecatedMajor, deprecatedMinor := deprecated.APILifecycleDeprecated() + if deprecatedMajor == 0 && deprecatedMinor == 0 { + return "" + } + + gvk := obj.GetObjectKind().GroupVersionKind() + if gvk.Empty() { + return "" + } + deprecationWarning := fmt.Sprintf("%s %s is deprecated in v%d.%d+", gvk.GroupVersion().String(), gvk.Kind, deprecatedMajor, deprecatedMinor) + + if removed, hasRemovalInfo := obj.(apiLifecycleRemoved); hasRemovalInfo { + removedMajor, removedMinor := removed.APILifecycleRemoved() + if removedMajor != 0 || removedMinor != 0 { + deprecationWarning = deprecationWarning + fmt.Sprintf(", unavailable in v%d.%d+", removedMajor, removedMinor) + } + } + + if replaced, hasReplacement := obj.(apiLifecycleReplacement); hasReplacement { + replacement := replaced.APILifecycleReplacement() + if !replacement.Empty() { + deprecationWarning = deprecationWarning + fmt.Sprintf("; use %s %s", replacement.GroupVersion().String(), replacement.Kind) + } + } + + return deprecationWarning +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/discovery/addresses.go b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/addresses.go new file mode 100644 index 000000000..d175d15fe --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/addresses.go @@ -0,0 +1,72 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "net" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type Addresses interface { + ServerAddressByClientCIDRs(net.IP) []metav1.ServerAddressByClientCIDR +} + +// DefaultAddresses is a default implementation of Addresses that will work in most cases +type DefaultAddresses struct { + // CIDRRules is a list of CIDRs and Addresses to use if a client is in the range + CIDRRules []CIDRRule + + // DefaultAddress is the address (hostname or IP and port) that should be used in + // if no CIDR matches more specifically. + DefaultAddress string +} + +// CIDRRule is a rule for adding an alternate path to the master based on matching CIDR +type CIDRRule struct { + IPRange net.IPNet + + // Address is the address (hostname or IP and port) that should be used in + // if this CIDR matches + Address string +} + +func (d DefaultAddresses) ServerAddressByClientCIDRs(clientIP net.IP) []metav1.ServerAddressByClientCIDR { + addressCIDRMap := []metav1.ServerAddressByClientCIDR{ + { + ClientCIDR: "0.0.0.0/0", + ServerAddress: d.DefaultAddress, + }, + } + + for _, rule := range d.CIDRRules { + addressCIDRMap = append(addressCIDRMap, rule.ServerAddressByClientCIDRs(clientIP)...) + } + return addressCIDRMap +} + +func (d CIDRRule) ServerAddressByClientCIDRs(clientIP net.IP) []metav1.ServerAddressByClientCIDR { + addressCIDRMap := []metav1.ServerAddressByClientCIDR{} + + if d.IPRange.Contains(clientIP) { + addressCIDRMap = append(addressCIDRMap, metav1.ServerAddressByClientCIDR{ + ClientCIDR: d.IPRange.String(), + ServerAddress: d.Address, + }) + } + return addressCIDRMap +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/discovery/group.go b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/group.go new file mode 100644 index 000000000..7e9927a3a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/group.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "net/http" + + "github.com/emicklei/go-restful" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" +) + +// APIGroupHandler creates a webservice serving the supported versions, preferred version, and name +// of a group. E.g., such a web service will be registered at /apis/extensions. +type APIGroupHandler struct { + serializer runtime.NegotiatedSerializer + group metav1.APIGroup +} + +func NewAPIGroupHandler(serializer runtime.NegotiatedSerializer, group metav1.APIGroup) *APIGroupHandler { + if keepUnversioned(group.Name) { + // Because in release 1.1, /apis/extensions returns response with empty + // APIVersion, we use stripVersionNegotiatedSerializer to keep the + // response backwards compatible. + serializer = stripVersionNegotiatedSerializer{serializer} + } + + return &APIGroupHandler{ + serializer: serializer, + group: group, + } +} + +func (s *APIGroupHandler) WebService() *restful.WebService { + mediaTypes, _ := negotiation.MediaTypesForSerializer(s.serializer) + ws := new(restful.WebService) + ws.Path(APIGroupPrefix + "/" + s.group.Name) + ws.Doc("get information of a group") + ws.Route(ws.GET("/").To(s.handle). + Doc("get information of a group"). + Operation("getAPIGroup"). + Produces(mediaTypes...). + Consumes(mediaTypes...). + Writes(metav1.APIGroup{})) + return ws +} + +// handle returns a handler which will return the api.GroupAndVersion of the group. +func (s *APIGroupHandler) handle(req *restful.Request, resp *restful.Response) { + s.ServeHTTP(resp.ResponseWriter, req.Request) +} + +func (s *APIGroupHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + responsewriters.WriteObjectNegotiated(s.serializer, negotiation.DefaultEndpointRestrictions, schema.GroupVersion{}, w, req, http.StatusOK, &s.group) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/discovery/legacy.go b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/legacy.go new file mode 100644 index 000000000..b33ecec65 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/legacy.go @@ -0,0 +1,76 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "net/http" + + "github.com/emicklei/go-restful" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" +) + +// legacyRootAPIHandler creates a webservice serving api group discovery. +type legacyRootAPIHandler struct { + // addresses is used to build cluster IPs for discovery. + addresses Addresses + apiPrefix string + serializer runtime.NegotiatedSerializer +} + +func NewLegacyRootAPIHandler(addresses Addresses, serializer runtime.NegotiatedSerializer, apiPrefix string) *legacyRootAPIHandler { + // Because in release 1.1, /apis returns response with empty APIVersion, we + // use stripVersionNegotiatedSerializer to keep the response backwards + // compatible. + serializer = stripVersionNegotiatedSerializer{serializer} + + return &legacyRootAPIHandler{ + addresses: addresses, + apiPrefix: apiPrefix, + serializer: serializer, + } +} + +// AddApiWebService adds a service to return the supported api versions at the legacy /api. +func (s *legacyRootAPIHandler) WebService() *restful.WebService { + mediaTypes, _ := negotiation.MediaTypesForSerializer(s.serializer) + ws := new(restful.WebService) + ws.Path(s.apiPrefix) + ws.Doc("get available API versions") + ws.Route(ws.GET("/").To(s.handle). + Doc("get available API versions"). + Operation("getAPIVersions"). + Produces(mediaTypes...). + Consumes(mediaTypes...). + Writes(metav1.APIVersions{})) + return ws +} + +func (s *legacyRootAPIHandler) handle(req *restful.Request, resp *restful.Response) { + clientIP := utilnet.GetClientIP(req.Request) + apiVersions := &metav1.APIVersions{ + ServerAddressByClientCIDRs: s.addresses.ServerAddressByClientCIDRs(clientIP), + Versions: []string{"v1"}, + } + + responsewriters.WriteObjectNegotiated(s.serializer, negotiation.DefaultEndpointRestrictions, schema.GroupVersion{}, resp.ResponseWriter, req.Request, http.StatusOK, apiVersions) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/discovery/root.go b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/root.go new file mode 100644 index 000000000..beba9c8a4 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/root.go @@ -0,0 +1,135 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "net/http" + "sync" + + restful "github.com/emicklei/go-restful" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" +) + +// GroupManager is an interface that allows dynamic mutation of the existing webservice to handle +// API groups being added or removed. +type GroupManager interface { + AddGroup(apiGroup metav1.APIGroup) + RemoveGroup(groupName string) + + WebService() *restful.WebService +} + +// rootAPIsHandler creates a webservice serving api group discovery. +// The list of APIGroups may change while the server is running because additional resources +// are registered or removed. It is not safe to cache the values. +type rootAPIsHandler struct { + // addresses is used to build cluster IPs for discovery. + addresses Addresses + + serializer runtime.NegotiatedSerializer + + // Map storing information about all groups to be exposed in discovery response. + // The map is from name to the group. + lock sync.RWMutex + apiGroups map[string]metav1.APIGroup + // apiGroupNames preserves insertion order + apiGroupNames []string +} + +func NewRootAPIsHandler(addresses Addresses, serializer runtime.NegotiatedSerializer) *rootAPIsHandler { + // Because in release 1.1, /apis returns response with empty APIVersion, we + // use stripVersionNegotiatedSerializer to keep the response backwards + // compatible. + serializer = stripVersionNegotiatedSerializer{serializer} + + return &rootAPIsHandler{ + addresses: addresses, + serializer: serializer, + apiGroups: map[string]metav1.APIGroup{}, + } +} + +func (s *rootAPIsHandler) AddGroup(apiGroup metav1.APIGroup) { + s.lock.Lock() + defer s.lock.Unlock() + + _, alreadyExists := s.apiGroups[apiGroup.Name] + + s.apiGroups[apiGroup.Name] = apiGroup + if !alreadyExists { + s.apiGroupNames = append(s.apiGroupNames, apiGroup.Name) + } +} + +func (s *rootAPIsHandler) RemoveGroup(groupName string) { + s.lock.Lock() + defer s.lock.Unlock() + + delete(s.apiGroups, groupName) + for i := range s.apiGroupNames { + if s.apiGroupNames[i] == groupName { + s.apiGroupNames = append(s.apiGroupNames[:i], s.apiGroupNames[i+1:]...) + break + } + } +} + +func (s *rootAPIsHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + s.lock.RLock() + defer s.lock.RUnlock() + + orderedGroups := []metav1.APIGroup{} + for _, groupName := range s.apiGroupNames { + orderedGroups = append(orderedGroups, s.apiGroups[groupName]) + } + + clientIP := utilnet.GetClientIP(req) + serverCIDR := s.addresses.ServerAddressByClientCIDRs(clientIP) + groups := make([]metav1.APIGroup, len(orderedGroups)) + for i := range orderedGroups { + groups[i] = orderedGroups[i] + groups[i].ServerAddressByClientCIDRs = serverCIDR + } + + responsewriters.WriteObjectNegotiated(s.serializer, negotiation.DefaultEndpointRestrictions, schema.GroupVersion{}, resp, req, http.StatusOK, &metav1.APIGroupList{Groups: groups}) +} + +func (s *rootAPIsHandler) restfulHandle(req *restful.Request, resp *restful.Response) { + s.ServeHTTP(resp.ResponseWriter, req.Request) +} + +// WebService returns a webservice serving api group discovery. +// Note: during the server runtime apiGroups might change. +func (s *rootAPIsHandler) WebService() *restful.WebService { + mediaTypes, _ := negotiation.MediaTypesForSerializer(s.serializer) + ws := new(restful.WebService) + ws.Path(APIGroupPrefix) + ws.Doc("get available API versions") + ws.Route(ws.GET("/").To(s.restfulHandle). + Doc("get available API versions"). + Operation("getAPIVersions"). + Produces(mediaTypes...). + Consumes(mediaTypes...). + Writes(metav1.APIGroupList{})) + return ws +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/discovery/storageversionhash.go b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/storageversionhash.go new file mode 100644 index 000000000..a1b00decb --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/storageversionhash.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "crypto/sha256" + "encoding/base64" +) + +// StorageVersionHash calculates the storage version hash for a +// tuple. +// WARNING: this function is subject to change. Clients shouldn't depend on +// this function. +func StorageVersionHash(group, version, kind string) string { + gvk := group + "/" + version + "/" + kind + if gvk == "" { + return "" + } + bytes := sha256.Sum256([]byte(gvk)) + // Assuming there are N kinds in the cluster, and the hash is X-byte long, + // the chance of colliding hash P(N,X) approximates to 1-e^(-(N^2)/2^(8X+1)). + // P(10,000, 8) ~= 2.7*10^(-12), which is low enough. + // See https://en.wikipedia.org/wiki/Birthday_problem#Approximations. + return base64.StdEncoding.EncodeToString( + bytes[:8]) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/discovery/util.go b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/util.go new file mode 100644 index 000000000..7487ffc18 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/util.go @@ -0,0 +1,110 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" +) + +const APIGroupPrefix = "/apis" + +func keepUnversioned(group string) bool { + return group == "" || group == "extensions" +} + +// stripVersionEncoder strips APIVersion field from the encoding output. It's +// used to keep the responses at the discovery endpoints backward compatible +// with release-1.1, when the responses have empty APIVersion. +type stripVersionEncoder struct { + encoder runtime.Encoder + serializer runtime.Serializer + identifier runtime.Identifier +} + +func newStripVersionEncoder(e runtime.Encoder, s runtime.Serializer) runtime.Encoder { + return stripVersionEncoder{ + encoder: e, + serializer: s, + identifier: identifier(e), + } +} + +func identifier(e runtime.Encoder) runtime.Identifier { + result := map[string]string{ + "name": "stripVersion", + } + if e != nil { + result["encoder"] = string(e.Identifier()) + } + identifier, err := json.Marshal(result) + if err != nil { + klog.Fatalf("Failed marshaling identifier for stripVersionEncoder: %v", err) + } + return runtime.Identifier(identifier) +} + +func (c stripVersionEncoder) Encode(obj runtime.Object, w io.Writer) error { + if co, ok := obj.(runtime.CacheableObject); ok { + return co.CacheEncode(c.Identifier(), c.doEncode, w) + } + return c.doEncode(obj, w) +} + +func (c stripVersionEncoder) doEncode(obj runtime.Object, w io.Writer) error { + buf := bytes.NewBuffer([]byte{}) + err := c.encoder.Encode(obj, buf) + if err != nil { + return err + } + roundTrippedObj, gvk, err := c.serializer.Decode(buf.Bytes(), nil, nil) + if err != nil { + return err + } + gvk.Group = "" + gvk.Version = "" + roundTrippedObj.GetObjectKind().SetGroupVersionKind(*gvk) + return c.serializer.Encode(roundTrippedObj, w) +} + +// Identifier implements runtime.Encoder interface. +func (c stripVersionEncoder) Identifier() runtime.Identifier { + return c.identifier +} + +// stripVersionNegotiatedSerializer will return stripVersionEncoder when +// EncoderForVersion is called. See comments for stripVersionEncoder. +type stripVersionNegotiatedSerializer struct { + runtime.NegotiatedSerializer +} + +func (n stripVersionNegotiatedSerializer) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder { + serializer, ok := encoder.(runtime.Serializer) + if !ok { + // The stripVersionEncoder needs both an encoder and decoder, but is called from a context that doesn't have access to the + // decoder. We do a best effort cast here (since this code path is only for backwards compatibility) to get access to the caller's + // decoder. + panic(fmt.Sprintf("Unable to extract serializer from %#v", encoder)) + } + versioned := n.NegotiatedSerializer.EncoderForVersion(encoder, gv) + return newStripVersionEncoder(versioned, serializer) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/discovery/version.go b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/version.go new file mode 100644 index 000000000..0976041bf --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/version.go @@ -0,0 +1,83 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "net/http" + + restful "github.com/emicklei/go-restful" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" +) + +type APIResourceLister interface { + ListAPIResources() []metav1.APIResource +} + +type APIResourceListerFunc func() []metav1.APIResource + +func (f APIResourceListerFunc) ListAPIResources() []metav1.APIResource { + return f() +} + +// APIVersionHandler creates a webservice serving the supported resources for the version +// E.g., such a web service will be registered at /apis/extensions/v1beta1. +type APIVersionHandler struct { + serializer runtime.NegotiatedSerializer + + groupVersion schema.GroupVersion + apiResourceLister APIResourceLister +} + +func NewAPIVersionHandler(serializer runtime.NegotiatedSerializer, groupVersion schema.GroupVersion, apiResourceLister APIResourceLister) *APIVersionHandler { + if keepUnversioned(groupVersion.Group) { + // Because in release 1.1, /apis/extensions returns response with empty + // APIVersion, we use stripVersionNegotiatedSerializer to keep the + // response backwards compatible. + serializer = stripVersionNegotiatedSerializer{serializer} + } + + return &APIVersionHandler{ + serializer: serializer, + groupVersion: groupVersion, + apiResourceLister: apiResourceLister, + } +} + +func (s *APIVersionHandler) AddToWebService(ws *restful.WebService) { + mediaTypes, _ := negotiation.MediaTypesForSerializer(s.serializer) + ws.Route(ws.GET("/").To(s.handle). + Doc("get available resources"). + Operation("getAPIResources"). + Produces(mediaTypes...). + Consumes(mediaTypes...). + Writes(metav1.APIResourceList{})) +} + +// handle returns a handler which will return the api.VersionAndVersion of the group. +func (s *APIVersionHandler) handle(req *restful.Request, resp *restful.Response) { + s.ServeHTTP(resp.ResponseWriter, req.Request) +} + +func (s *APIVersionHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + responsewriters.WriteObjectNegotiated(s.serializer, negotiation.DefaultEndpointRestrictions, schema.GroupVersion{}, w, req, http.StatusOK, + &metav1.APIResourceList{GroupVersion: s.groupVersion.String(), APIResources: s.apiResourceLister.ListAPIResources()}) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/doc.go b/vendor/k8s.io/apiserver/pkg/endpoints/doc.go new file mode 100644 index 000000000..ef99114b6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package endpoints contains the generic code that provides a RESTful Kubernetes-style API service. +package endpoints // import "k8s.io/apiserver/pkg/endpoints" diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go b/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go new file mode 100644 index 000000000..04264230d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go @@ -0,0 +1,96 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filterlatency + +import ( + "context" + "net/http" + "time" + + utilclock "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apiserver/pkg/endpoints/metrics" + apirequest "k8s.io/apiserver/pkg/endpoints/request" +) + +type requestFilterRecordKeyType int + +// requestFilterRecordKey is the context key for a request filter record struct. +const requestFilterRecordKey requestFilterRecordKeyType = iota + +type requestFilterRecord struct { + name string + startedTimestamp time.Time +} + +// withRequestFilterRecord attaches the given request filter record to the parent context. +func withRequestFilterRecord(parent context.Context, fr *requestFilterRecord) context.Context { + return apirequest.WithValue(parent, requestFilterRecordKey, fr) +} + +// requestFilterRecordFrom returns the request filter record from the given context. +func requestFilterRecordFrom(ctx context.Context) *requestFilterRecord { + fr, _ := ctx.Value(requestFilterRecordKey).(*requestFilterRecord) + return fr +} + +// TrackStarted measures the timestamp the given handler has started execution +// by attaching a handler to the chain. +func TrackStarted(handler http.Handler, name string) http.Handler { + return trackStarted(handler, name, utilclock.RealClock{}) +} + +// TrackCompleted measures the timestamp the given handler has completed execution and then +// it updates the corresponding metric with the filter latency duration. +func TrackCompleted(handler http.Handler) http.Handler { + return trackCompleted(handler, utilclock.RealClock{}, func(fr *requestFilterRecord, completedAt time.Time) { + metrics.RecordFilterLatency(fr.name, completedAt.Sub(fr.startedTimestamp)) + }) +} + +func trackStarted(handler http.Handler, name string, clock utilclock.PassiveClock) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + if fr := requestFilterRecordFrom(ctx); fr != nil { + fr.name = name + fr.startedTimestamp = clock.Now() + + handler.ServeHTTP(w, r) + return + } + + fr := &requestFilterRecord{ + name: name, + startedTimestamp: clock.Now(), + } + r = r.WithContext(withRequestFilterRecord(ctx, fr)) + handler.ServeHTTP(w, r) + }) +} + +func trackCompleted(handler http.Handler, clock utilclock.PassiveClock, action func(*requestFilterRecord, time.Time)) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // The previous filter has just completed. + completedAt := clock.Now() + + defer handler.ServeHTTP(w, r) + + ctx := r.Context() + if fr := requestFilterRecordFrom(ctx); fr != nil { + action(fr, completedAt) + } + }) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/OWNERS b/vendor/k8s.io/apiserver/pkg/endpoints/filters/OWNERS new file mode 100644 index 000000000..05259ce35 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- deads2k +- sttts +- soltysh diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go new file mode 100644 index 000000000..891d60935 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go @@ -0,0 +1,256 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "bufio" + "errors" + "fmt" + "net" + "net/http" + "sync" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/audit/policy" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + "k8s.io/apiserver/pkg/endpoints/request" +) + +// WithAudit decorates a http.Handler with audit logging information for all the +// requests coming to the server. Audit level is decided according to requests' +// attributes and audit policy. Logs are emitted to the audit sink to +// process events. If sink or audit policy is nil, no decoration takes place. +func WithAudit(handler http.Handler, sink audit.Sink, policy policy.Checker, longRunningCheck request.LongRunningRequestCheck) http.Handler { + if sink == nil || policy == nil { + return handler + } + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + req, ev, omitStages, err := createAuditEventAndAttachToContext(req, policy) + if err != nil { + utilruntime.HandleError(fmt.Errorf("failed to create audit event: %v", err)) + responsewriters.InternalError(w, req, errors.New("failed to create audit event")) + return + } + ctx := req.Context() + if ev == nil || ctx == nil { + handler.ServeHTTP(w, req) + return + } + + ev.Stage = auditinternal.StageRequestReceived + if processed := processAuditEvent(sink, ev, omitStages); !processed { + audit.ApiserverAuditDroppedCounter.Inc() + responsewriters.InternalError(w, req, errors.New("failed to store audit event")) + return + } + + // intercept the status code + var longRunningSink audit.Sink + if longRunningCheck != nil { + ri, _ := request.RequestInfoFrom(ctx) + if longRunningCheck(req, ri) { + longRunningSink = sink + } + } + respWriter := decorateResponseWriter(w, ev, longRunningSink, omitStages) + + // send audit event when we leave this func, either via a panic or cleanly. In the case of long + // running requests, this will be the second audit event. + defer func() { + if r := recover(); r != nil { + defer panic(r) + ev.Stage = auditinternal.StagePanic + ev.ResponseStatus = &metav1.Status{ + Code: http.StatusInternalServerError, + Status: metav1.StatusFailure, + Reason: metav1.StatusReasonInternalError, + Message: fmt.Sprintf("APIServer panic'd: %v", r), + } + processAuditEvent(sink, ev, omitStages) + return + } + + // if no StageResponseStarted event was sent b/c neither a status code nor a body was sent, fake it here + // But Audit-Id http header will only be sent when http.ResponseWriter.WriteHeader is called. + fakedSuccessStatus := &metav1.Status{ + Code: http.StatusOK, + Status: metav1.StatusSuccess, + Message: "Connection closed early", + } + if ev.ResponseStatus == nil && longRunningSink != nil { + ev.ResponseStatus = fakedSuccessStatus + ev.Stage = auditinternal.StageResponseStarted + processAuditEvent(longRunningSink, ev, omitStages) + } + + ev.Stage = auditinternal.StageResponseComplete + if ev.ResponseStatus == nil { + ev.ResponseStatus = fakedSuccessStatus + } + processAuditEvent(sink, ev, omitStages) + }() + handler.ServeHTTP(respWriter, req) + }) +} + +// createAuditEventAndAttachToContext is responsible for creating the audit event +// and attaching it to the appropriate request context. It returns: +// - context with audit event attached to it +// - created audit event +// - error if anything bad happened +func createAuditEventAndAttachToContext(req *http.Request, policy policy.Checker) (*http.Request, *auditinternal.Event, []auditinternal.Stage, error) { + ctx := req.Context() + + attribs, err := GetAuthorizerAttributes(ctx) + if err != nil { + return req, nil, nil, fmt.Errorf("failed to GetAuthorizerAttributes: %v", err) + } + + level, omitStages := policy.LevelAndStages(attribs) + audit.ObservePolicyLevel(level) + if level == auditinternal.LevelNone { + // Don't audit. + return req, nil, nil, nil + } + + requestReceivedTimestamp, ok := request.ReceivedTimestampFrom(ctx) + if !ok { + requestReceivedTimestamp = time.Now() + } + ev, err := audit.NewEventFromRequest(req, requestReceivedTimestamp, level, attribs) + if err != nil { + return req, nil, nil, fmt.Errorf("failed to complete audit event from request: %v", err) + } + + req = req.WithContext(request.WithAuditEvent(ctx, ev)) + + return req, ev, omitStages, nil +} + +func processAuditEvent(sink audit.Sink, ev *auditinternal.Event, omitStages []auditinternal.Stage) bool { + for _, stage := range omitStages { + if ev.Stage == stage { + return true + } + } + + if ev.Stage == auditinternal.StageRequestReceived { + ev.StageTimestamp = metav1.NewMicroTime(ev.RequestReceivedTimestamp.Time) + } else { + ev.StageTimestamp = metav1.NewMicroTime(time.Now()) + } + audit.ObserveEvent() + return sink.ProcessEvents(ev) +} + +func decorateResponseWriter(responseWriter http.ResponseWriter, ev *auditinternal.Event, sink audit.Sink, omitStages []auditinternal.Stage) http.ResponseWriter { + delegate := &auditResponseWriter{ + ResponseWriter: responseWriter, + event: ev, + sink: sink, + omitStages: omitStages, + } + + // check if the ResponseWriter we're wrapping is the fancy one we need + // or if the basic is sufficient + _, cn := responseWriter.(http.CloseNotifier) + _, fl := responseWriter.(http.Flusher) + _, hj := responseWriter.(http.Hijacker) + if cn && fl && hj { + return &fancyResponseWriterDelegator{delegate} + } + return delegate +} + +var _ http.ResponseWriter = &auditResponseWriter{} + +// auditResponseWriter intercepts WriteHeader, sets it in the event. If the sink is set, it will +// create immediately an event (for long running requests). +type auditResponseWriter struct { + http.ResponseWriter + event *auditinternal.Event + once sync.Once + sink audit.Sink + omitStages []auditinternal.Stage +} + +func (a *auditResponseWriter) setHttpHeader() { + a.ResponseWriter.Header().Set(auditinternal.HeaderAuditID, string(a.event.AuditID)) +} + +func (a *auditResponseWriter) processCode(code int) { + a.once.Do(func() { + if a.event.ResponseStatus == nil { + a.event.ResponseStatus = &metav1.Status{} + } + a.event.ResponseStatus.Code = int32(code) + a.event.Stage = auditinternal.StageResponseStarted + + if a.sink != nil { + processAuditEvent(a.sink, a.event, a.omitStages) + } + }) +} + +func (a *auditResponseWriter) Write(bs []byte) (int, error) { + // the Go library calls WriteHeader internally if no code was written yet. But this will go unnoticed for us + a.processCode(http.StatusOK) + a.setHttpHeader() + return a.ResponseWriter.Write(bs) +} + +func (a *auditResponseWriter) WriteHeader(code int) { + a.processCode(code) + a.setHttpHeader() + a.ResponseWriter.WriteHeader(code) +} + +// fancyResponseWriterDelegator implements http.CloseNotifier, http.Flusher and +// http.Hijacker which are needed to make certain http operation (e.g. watch, rsh, etc) +// working. +type fancyResponseWriterDelegator struct { + *auditResponseWriter +} + +func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { + return f.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +func (f *fancyResponseWriterDelegator) Flush() { + f.ResponseWriter.(http.Flusher).Flush() +} + +func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + // fake a response status before protocol switch happens + f.processCode(http.StatusSwitchingProtocols) + + // This will be ignored if WriteHeader() function has already been called. + // It's not guaranteed Audit-ID http header is sent for all requests. + // For example, when user run "kubectl exec", apiserver uses a proxy handler + // to deal with the request, users can only get http headers returned by kubelet node. + f.setHttpHeader() + + return f.ResponseWriter.(http.Hijacker).Hijack() +} + +var _ http.CloseNotifier = &fancyResponseWriterDelegator{} +var _ http.Flusher = &fancyResponseWriterDelegator{} +var _ http.Hijacker = &fancyResponseWriterDelegator{} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit_annotations.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit_annotations.go new file mode 100644 index 000000000..22b276991 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit_annotations.go @@ -0,0 +1,39 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "net/http" + + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/audit/policy" +) + +// WithAuditAnnotations decorates a http.Handler with a []{key, value} that is merged +// with the audit.Event.Annotations map. This allows layers that run before WithAudit +// (such as authentication) to assert annotations. +// If sink or audit policy is nil, no decoration takes place. +func WithAuditAnnotations(handler http.Handler, sink audit.Sink, policy policy.Checker) http.Handler { + // no need to wrap if auditing is disabled + if sink == nil || policy == nil { + return handler + } + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + req = req.WithContext(audit.WithAuditAnnotations(req.Context())) + handler.ServeHTTP(w, req) + }) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go new file mode 100644 index 000000000..e88e7ad28 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go @@ -0,0 +1,94 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "errors" + "fmt" + "net/http" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/klog/v2" +) + +// WithAuthentication creates an http handler that tries to authenticate the given request as a user, and then +// stores any such user found onto the provided context for the request. If authentication fails or returns an error +// the failed handler is used. On success, "Authorization" header is removed from the request and handler +// is invoked to serve the request. +func WithAuthentication(handler http.Handler, auth authenticator.Request, failed http.Handler, apiAuds authenticator.Audiences) http.Handler { + if auth == nil { + klog.Warningf("Authentication is disabled") + return handler + } + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + authenticationStart := time.Now() + + if len(apiAuds) > 0 { + req = req.WithContext(authenticator.WithAudiences(req.Context(), apiAuds)) + } + resp, ok, err := auth.AuthenticateRequest(req) + defer recordAuthMetrics(resp, ok, err, apiAuds, authenticationStart) + if err != nil || !ok { + if err != nil { + klog.Errorf("Unable to authenticate the request due to an error: %v", err) + } + failed.ServeHTTP(w, req) + return + } + + if !audiencesAreAcceptable(apiAuds, resp.Audiences) { + err = fmt.Errorf("unable to match the audience: %v , accepted: %v", resp.Audiences, apiAuds) + klog.Error(err) + failed.ServeHTTP(w, req) + return + } + + // authorization header is not required anymore in case of a successful authentication. + req.Header.Del("Authorization") + + req = req.WithContext(genericapirequest.WithUser(req.Context(), resp.User)) + handler.ServeHTTP(w, req) + }) +} + +func Unauthorized(s runtime.NegotiatedSerializer) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + requestInfo, found := genericapirequest.RequestInfoFrom(ctx) + if !found { + responsewriters.InternalError(w, req, errors.New("no RequestInfo found in the context")) + return + } + + gv := schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} + responsewriters.ErrorNegotiated(apierrors.NewUnauthorized("Unauthorized"), s, gv, w, req) + }) +} + +func audiencesAreAcceptable(apiAuds, responseAudiences authenticator.Audiences) bool { + if len(apiAuds) == 0 || len(responseAudiences) == 0 { + return true + } + + return len(apiAuds.Intersect(responseAudiences)) > 0 +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/authn_audit.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authn_audit.go new file mode 100644 index 000000000..09d7db8cc --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authn_audit.go @@ -0,0 +1,86 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "errors" + "fmt" + "net/http" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/audit/policy" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" +) + +// WithFailedAuthenticationAudit decorates a failed http.Handler used in WithAuthentication handler. +// It is meant to log only failed authentication requests. +func WithFailedAuthenticationAudit(failedHandler http.Handler, sink audit.Sink, policy policy.Checker) http.Handler { + if sink == nil || policy == nil { + return failedHandler + } + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + req, ev, omitStages, err := createAuditEventAndAttachToContext(req, policy) + if err != nil { + utilruntime.HandleError(fmt.Errorf("failed to create audit event: %v", err)) + responsewriters.InternalError(w, req, errors.New("failed to create audit event")) + return + } + if ev == nil { + failedHandler.ServeHTTP(w, req) + return + } + + ev.ResponseStatus = &metav1.Status{} + ev.ResponseStatus.Message = getAuthMethods(req) + ev.Stage = auditinternal.StageResponseStarted + + rw := decorateResponseWriter(w, ev, sink, omitStages) + failedHandler.ServeHTTP(rw, req) + }) +} + +func getAuthMethods(req *http.Request) string { + authMethods := []string{} + + if _, _, ok := req.BasicAuth(); ok { + authMethods = append(authMethods, "basic") + } + + auth := strings.TrimSpace(req.Header.Get("Authorization")) + parts := strings.Split(auth, " ") + if len(parts) > 1 && strings.ToLower(parts[0]) == "bearer" { + authMethods = append(authMethods, "bearer") + } + + token := strings.TrimSpace(req.URL.Query().Get("access_token")) + if len(token) > 0 { + authMethods = append(authMethods, "access_token") + } + + if req.TLS != nil && len(req.TLS.PeerCertificates) > 0 { + authMethods = append(authMethods, "x509") + } + + if len(authMethods) > 0 { + return fmt.Sprintf("Authentication failed, attempted: %s", strings.Join(authMethods, ", ")) + } + return "Authentication failed, no credentials provided" +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go new file mode 100644 index 000000000..8d115ff09 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "context" + "errors" + "net/http" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + "k8s.io/apiserver/pkg/endpoints/request" +) + +const ( + // Annotation key names set in advanced audit + decisionAnnotationKey = "authorization.k8s.io/decision" + reasonAnnotationKey = "authorization.k8s.io/reason" + + // Annotation values set in advanced audit + decisionAllow = "allow" + decisionForbid = "forbid" + reasonError = "internal error" +) + +// WithAuthorizationCheck passes all authorized requests on to handler, and returns a forbidden error otherwise. +func WithAuthorization(handler http.Handler, a authorizer.Authorizer, s runtime.NegotiatedSerializer) http.Handler { + if a == nil { + klog.Warningf("Authorization is disabled") + return handler + } + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + ae := request.AuditEventFrom(ctx) + + attributes, err := GetAuthorizerAttributes(ctx) + if err != nil { + responsewriters.InternalError(w, req, err) + return + } + authorized, reason, err := a.Authorize(ctx, attributes) + // an authorizer like RBAC could encounter evaluation errors and still allow the request, so authorizer decision is checked before error here. + if authorized == authorizer.DecisionAllow { + audit.LogAnnotation(ae, decisionAnnotationKey, decisionAllow) + audit.LogAnnotation(ae, reasonAnnotationKey, reason) + handler.ServeHTTP(w, req) + return + } + if err != nil { + audit.LogAnnotation(ae, reasonAnnotationKey, reasonError) + responsewriters.InternalError(w, req, err) + return + } + + klog.V(4).Infof("Forbidden: %#v, Reason: %q", req.RequestURI, reason) + audit.LogAnnotation(ae, decisionAnnotationKey, decisionForbid) + audit.LogAnnotation(ae, reasonAnnotationKey, reason) + responsewriters.Forbidden(ctx, attributes, w, req, reason, s) + }) +} + +func GetAuthorizerAttributes(ctx context.Context) (authorizer.Attributes, error) { + attribs := authorizer.AttributesRecord{} + + user, ok := request.UserFrom(ctx) + if ok { + attribs.User = user + } + + requestInfo, found := request.RequestInfoFrom(ctx) + if !found { + return nil, errors.New("no RequestInfo found in the context") + } + + // Start with common attributes that apply to resource and non-resource requests + attribs.ResourceRequest = requestInfo.IsResourceRequest + attribs.Path = requestInfo.Path + attribs.Verb = requestInfo.Verb + + attribs.APIGroup = requestInfo.APIGroup + attribs.APIVersion = requestInfo.APIVersion + attribs.Resource = requestInfo.Resource + attribs.Subresource = requestInfo.Subresource + attribs.Namespace = requestInfo.Namespace + attribs.Name = requestInfo.Name + + return &attribs, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/cachecontrol.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/cachecontrol.go new file mode 100644 index 000000000..e19f9d055 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/cachecontrol.go @@ -0,0 +1,33 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "net/http" +) + +// WithCacheControl sets the Cache-Control header to "no-cache, private" because all servers are protected by authn/authz. +// see https://developers.google.com/web/fundamentals/performance/optimizing-content-efficiency/http-caching#defining_optimal_cache-control_policy +func WithCacheControl(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + // Set the cache-control header if it is not already set + if _, ok := w.Header()["Cache-Control"]; !ok { + w.Header().Set("Cache-Control", "no-cache, private") + } + handler.ServeHTTP(w, req) + }) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/doc.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/doc.go new file mode 100644 index 000000000..a13125a25 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package filters contains all the http handler chain filters which +// _are_ api related, i.e. which are prerequisite for the API services +// to work (in contrast to the filters in the server package which are +// not part of the API contract). +package filters // import "k8s.io/apiserver/pkg/endpoints/filters" diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go new file mode 100644 index 000000000..1246ae863 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go @@ -0,0 +1,239 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + + "k8s.io/klog/v2" + + authenticationv1 "k8s.io/api/authentication/v1" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/authentication/serviceaccount" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/server/httplog" +) + +// WithImpersonation is a filter that will inspect and check requests that attempt to change the user.Info for their requests +func WithImpersonation(handler http.Handler, a authorizer.Authorizer, s runtime.NegotiatedSerializer) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + impersonationRequests, err := buildImpersonationRequests(req.Header) + if err != nil { + klog.V(4).Infof("%v", err) + responsewriters.InternalError(w, req, err) + return + } + if len(impersonationRequests) == 0 { + handler.ServeHTTP(w, req) + return + } + + ctx := req.Context() + requestor, exists := request.UserFrom(ctx) + if !exists { + responsewriters.InternalError(w, req, errors.New("no user found for request")) + return + } + + // if groups are not specified, then we need to look them up differently depending on the type of user + // if they are specified, then they are the authority (including the inclusion of system:authenticated/system:unauthenticated groups) + groupsSpecified := len(req.Header[authenticationv1.ImpersonateGroupHeader]) > 0 + + // make sure we're allowed to impersonate each thing we're requesting. While we're iterating through, start building username + // and group information + username := "" + groups := []string{} + userExtra := map[string][]string{} + for _, impersonationRequest := range impersonationRequests { + gvk := impersonationRequest.GetObjectKind().GroupVersionKind() + actingAsAttributes := &authorizer.AttributesRecord{ + User: requestor, + Verb: "impersonate", + APIGroup: gvk.Group, + APIVersion: gvk.Version, + Namespace: impersonationRequest.Namespace, + Name: impersonationRequest.Name, + ResourceRequest: true, + } + + switch gvk.GroupKind() { + case v1.SchemeGroupVersion.WithKind("ServiceAccount").GroupKind(): + actingAsAttributes.Resource = "serviceaccounts" + username = serviceaccount.MakeUsername(impersonationRequest.Namespace, impersonationRequest.Name) + if !groupsSpecified { + // if groups aren't specified for a service account, we know the groups because its a fixed mapping. Add them + groups = serviceaccount.MakeGroupNames(impersonationRequest.Namespace) + } + + case v1.SchemeGroupVersion.WithKind("User").GroupKind(): + actingAsAttributes.Resource = "users" + username = impersonationRequest.Name + + case v1.SchemeGroupVersion.WithKind("Group").GroupKind(): + actingAsAttributes.Resource = "groups" + groups = append(groups, impersonationRequest.Name) + + case authenticationv1.SchemeGroupVersion.WithKind("UserExtra").GroupKind(): + extraKey := impersonationRequest.FieldPath + extraValue := impersonationRequest.Name + actingAsAttributes.Resource = "userextras" + actingAsAttributes.Subresource = extraKey + userExtra[extraKey] = append(userExtra[extraKey], extraValue) + + default: + klog.V(4).Infof("unknown impersonation request type: %v", impersonationRequest) + responsewriters.Forbidden(ctx, actingAsAttributes, w, req, fmt.Sprintf("unknown impersonation request type: %v", impersonationRequest), s) + return + } + + decision, reason, err := a.Authorize(ctx, actingAsAttributes) + if err != nil || decision != authorizer.DecisionAllow { + klog.V(4).Infof("Forbidden: %#v, Reason: %s, Error: %v", req.RequestURI, reason, err) + responsewriters.Forbidden(ctx, actingAsAttributes, w, req, reason, s) + return + } + } + + if username != user.Anonymous { + // When impersonating a non-anonymous user, include the 'system:authenticated' group + // in the impersonated user info: + // - if no groups were specified + // - if a group has been specified other than 'system:authenticated' + // + // If 'system:unauthenticated' group has been specified we should not include + // the 'system:authenticated' group. + addAuthenticated := true + for _, group := range groups { + if group == user.AllAuthenticated || group == user.AllUnauthenticated { + addAuthenticated = false + break + } + } + + if addAuthenticated { + groups = append(groups, user.AllAuthenticated) + } + } else { + addUnauthenticated := true + for _, group := range groups { + if group == user.AllUnauthenticated { + addUnauthenticated = false + break + } + } + + if addUnauthenticated { + groups = append(groups, user.AllUnauthenticated) + } + } + + newUser := &user.DefaultInfo{ + Name: username, + Groups: groups, + Extra: userExtra, + } + req = req.WithContext(request.WithUser(ctx, newUser)) + + oldUser, _ := request.UserFrom(ctx) + httplog.LogOf(req, w).Addf("%v is acting as %v", oldUser, newUser) + + ae := request.AuditEventFrom(ctx) + audit.LogImpersonatedUser(ae, newUser) + + // clear all the impersonation headers from the request + req.Header.Del(authenticationv1.ImpersonateUserHeader) + req.Header.Del(authenticationv1.ImpersonateGroupHeader) + for headerName := range req.Header { + if strings.HasPrefix(headerName, authenticationv1.ImpersonateUserExtraHeaderPrefix) { + req.Header.Del(headerName) + } + } + + handler.ServeHTTP(w, req) + }) +} + +func unescapeExtraKey(encodedKey string) string { + key, err := url.PathUnescape(encodedKey) // Decode %-encoded bytes. + if err != nil { + return encodedKey // Always record extra strings, even if malformed/unencoded. + } + return key +} + +// buildImpersonationRequests returns a list of objectreferences that represent the different things we're requesting to impersonate. +// Also includes a map[string][]string representing user.Info.Extra +// Each request must be authorized against the current user before switching contexts. +func buildImpersonationRequests(headers http.Header) ([]v1.ObjectReference, error) { + impersonationRequests := []v1.ObjectReference{} + + requestedUser := headers.Get(authenticationv1.ImpersonateUserHeader) + hasUser := len(requestedUser) > 0 + if hasUser { + if namespace, name, err := serviceaccount.SplitUsername(requestedUser); err == nil { + impersonationRequests = append(impersonationRequests, v1.ObjectReference{Kind: "ServiceAccount", Namespace: namespace, Name: name}) + } else { + impersonationRequests = append(impersonationRequests, v1.ObjectReference{Kind: "User", Name: requestedUser}) + } + } + + hasGroups := false + for _, group := range headers[authenticationv1.ImpersonateGroupHeader] { + hasGroups = true + impersonationRequests = append(impersonationRequests, v1.ObjectReference{Kind: "Group", Name: group}) + } + + hasUserExtra := false + for headerName, values := range headers { + if !strings.HasPrefix(headerName, authenticationv1.ImpersonateUserExtraHeaderPrefix) { + continue + } + + hasUserExtra = true + extraKey := unescapeExtraKey(strings.ToLower(headerName[len(authenticationv1.ImpersonateUserExtraHeaderPrefix):])) + + // make a separate request for each extra value they're trying to set + for _, value := range values { + impersonationRequests = append(impersonationRequests, + v1.ObjectReference{ + Kind: "UserExtra", + // we only parse out a group above, but the parsing will fail if there isn't SOME version + // using the internal version will help us fail if anyone starts using it + APIVersion: authenticationv1.SchemeGroupVersion.String(), + Name: value, + // ObjectReference doesn't have a subresource field. FieldPath is close and available, so we'll use that + // TODO fight the good fight for ObjectReference to refer to resources and subresources + FieldPath: extraKey, + }) + } + } + + if (hasGroups || hasUserExtra) && !hasUser { + return nil, fmt.Errorf("requested %v without impersonating a user", impersonationRequests) + } + + return impersonationRequests, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/metrics.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/metrics.go new file mode 100644 index 000000000..421c0e0a2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/metrics.go @@ -0,0 +1,115 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "strings" + "time" + + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +/* + * By default, all the following metrics are defined as falling under + * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes) + * + * Promoting the stability level of the metric is a responsibility of the component owner, since it + * involves explicitly acknowledging support for the metric across multiple releases, in accordance with + * the metric stability policy. + */ +const ( + successLabel = "success" + failureLabel = "failure" + errorLabel = "error" +) + +var ( + authenticatedUserCounter = metrics.NewCounterVec( + &metrics.CounterOpts{ + Name: "authenticated_user_requests", + Help: "Counter of authenticated requests broken out by username.", + StabilityLevel: metrics.ALPHA, + }, + []string{"username"}, + ) + + authenticatedAttemptsCounter = metrics.NewCounterVec( + &metrics.CounterOpts{ + Name: "authentication_attempts", + Help: "Counter of authenticated attempts.", + StabilityLevel: metrics.ALPHA, + }, + []string{"result"}, + ) + + authenticationLatency = metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Name: "authentication_duration_seconds", + Help: "Authentication duration in seconds broken out by result.", + Buckets: metrics.ExponentialBuckets(0.001, 2, 15), + StabilityLevel: metrics.ALPHA, + }, + []string{"result"}, + ) +) + +func init() { + legacyregistry.MustRegister(authenticatedUserCounter) + legacyregistry.MustRegister(authenticatedAttemptsCounter) + legacyregistry.MustRegister(authenticationLatency) +} + +func recordAuthMetrics(resp *authenticator.Response, ok bool, err error, apiAudiences authenticator.Audiences, authStart time.Time) { + var resultLabel string + + switch { + case err != nil || (resp != nil && !audiencesAreAcceptable(apiAudiences, resp.Audiences)): + resultLabel = errorLabel + case !ok: + resultLabel = failureLabel + default: + resultLabel = successLabel + authenticatedUserCounter.WithLabelValues(compressUsername(resp.User.GetName())).Inc() + } + + authenticatedAttemptsCounter.WithLabelValues(resultLabel).Inc() + authenticationLatency.WithLabelValues(resultLabel).Observe(time.Since(authStart).Seconds()) +} + +// compressUsername maps all possible usernames onto a small set of categories +// of usernames. This is done both to limit the cardinality of the +// authorized_user_requests metric, and to avoid pushing actual usernames in the +// metric. +func compressUsername(username string) string { + switch { + // Known internal identities. + case username == "admin" || + username == "client" || + username == "kube_proxy" || + username == "kubelet" || + username == "system:serviceaccount:kube-system:default": + return username + // Probably an email address. + case strings.Contains(username, "@"): + return "email_id" + // Anything else (custom service accounts, custom external identities, etc.) + default: + return "other" + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/request_received_time.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/request_received_time.go new file mode 100644 index 000000000..d9e7369f6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/request_received_time.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "net/http" + + utilclock "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apiserver/pkg/endpoints/request" +) + +// WithRequestReceivedTimestamp attaches the ReceivedTimestamp (the time the request reached +// the apiserver) to the context. +func WithRequestReceivedTimestamp(handler http.Handler) http.Handler { + return withRequestReceivedTimestampWithClock(handler, utilclock.RealClock{}) +} + +// The clock is passed as a parameter, handy for unit testing. +func withRequestReceivedTimestampWithClock(handler http.Handler, clock utilclock.PassiveClock) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + req = req.WithContext(request.WithReceivedTimestamp(ctx, clock.Now())) + + handler.ServeHTTP(w, req) + }) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/requestinfo.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/requestinfo.go new file mode 100644 index 000000000..9cc524d4e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/requestinfo.go @@ -0,0 +1,41 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "fmt" + "net/http" + + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + "k8s.io/apiserver/pkg/endpoints/request" +) + +// WithRequestInfo attaches a RequestInfo to the context. +func WithRequestInfo(handler http.Handler, resolver request.RequestInfoResolver) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + info, err := resolver.NewRequestInfo(req) + if err != nil { + responsewriters.InternalError(w, req, fmt.Errorf("failed to create RequestInfo: %v", err)) + return + } + + req = req.WithContext(request.WithRequestInfo(ctx, info)) + + handler.ServeHTTP(w, req) + }) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/storageversion.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/storageversion.go new file mode 100644 index 000000000..414fc194e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/storageversion.go @@ -0,0 +1,121 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "errors" + "fmt" + "net/http" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/storageversion" + _ "k8s.io/component-base/metrics/prometheus/workqueue" // for workqueue metric registration + "k8s.io/klog/v2" +) + +// WithStorageVersionPrecondition checks if the storage version barrier has +// completed, if not, it only passes the following API requests: +// 1. non-resource requests, +// 2. read requests, +// 3. write requests to the storageversion API, +// 4. create requests to the namespace API sent by apiserver itself, +// 5. write requests to the lease API in kube-system namespace, +// 6. resources whose StorageVersion is not pending update, including non-persisted resources. +func WithStorageVersionPrecondition(handler http.Handler, svm storageversion.Manager, s runtime.NegotiatedSerializer) http.Handler { + if svm == nil { + // TODO(roycaihw): switch to warning after the feature graduate to beta/GA + klog.V(2).Infof("Storage Version barrier is disabled") + return handler + } + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if svm.Completed() { + handler.ServeHTTP(w, req) + return + } + ctx := req.Context() + requestInfo, found := request.RequestInfoFrom(ctx) + if !found { + responsewriters.InternalError(w, req, errors.New("no RequestInfo found in the context")) + return + } + // Allow non-resource requests + if !requestInfo.IsResourceRequest { + handler.ServeHTTP(w, req) + return + } + // Allow read requests + if requestInfo.Verb == "get" || requestInfo.Verb == "list" || requestInfo.Verb == "watch" { + handler.ServeHTTP(w, req) + return + } + // Allow writes to the storage version API + if requestInfo.APIGroup == "internal.apiserver.k8s.io" && requestInfo.Resource == "storageversions" { + handler.ServeHTTP(w, req) + return + } + // The system namespace is required for apiserver-identity lease to exist. Allow the apiserver + // itself to create namespaces. + // NOTE: with this exception, if the bootstrap client writes namespaces with a new version, + // and the upgraded apiserver dies before updating the StorageVersion for namespaces, the + // storage migrator won't be able to tell these namespaces are stored in a different version in etcd. + // Because the bootstrap client only creates system namespace and doesn't update them, this can + // only happen if the upgraded apiserver is the first apiserver that kicks off namespace creation, + // or if an upgraded server that joins an existing cluster has new system namespaces (other + // than kube-system, kube-public, kube-node-lease) that need to be created. + u, hasUser := request.UserFrom(ctx) + if requestInfo.APIGroup == "" && requestInfo.Resource == "namespaces" && + requestInfo.Verb == "create" && hasUser && + u.GetName() == user.APIServerUser && contains(u.GetGroups(), user.SystemPrivilegedGroup) { + handler.ServeHTTP(w, req) + return + } + // Allow writes to the lease API in kube-system. The storage version API depends on the + // apiserver-identity leases to operate. Leases in kube-system are either apiserver-identity + // lease (which gets garbage collected when stale) or leader-election leases (which gets + // periodically updated by system components). Both types of leases won't be stale in etcd. + if requestInfo.APIGroup == "coordination.k8s.io" && requestInfo.Resource == "leases" && + requestInfo.Namespace == metav1.NamespaceSystem { + handler.ServeHTTP(w, req) + return + } + // If the resource's StorageVersion is not in the to-be-updated list, let it pass. + // Non-persisted resources are not in the to-be-updated list, so they will pass. + gr := schema.GroupResource{requestInfo.APIGroup, requestInfo.Resource} + if !svm.PendingUpdate(gr) { + handler.ServeHTTP(w, req) + return + } + + gv := schema.GroupVersion{requestInfo.APIGroup, requestInfo.APIVersion} + responsewriters.ErrorNegotiated(apierrors.NewServiceUnavailable(fmt.Sprintf("wait for storage version registration to complete for resource: %v, last seen error: %v", gr, svm.LastUpdateError(gr))), s, gv, w, req) + }) +} + +func contains(s []string, e string) bool { + for _, a := range s { + if a == e { + return true + } + } + return false +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/warning.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/warning.go new file mode 100644 index 000000000..55e85f0b7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/warning.go @@ -0,0 +1,133 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "fmt" + "net/http" + "sync" + "unicode/utf8" + + "k8s.io/apimachinery/pkg/util/net" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/warning" +) + +// WithWarningRecorder attaches a deduplicating k8s.io/apiserver/pkg/warning#WarningRecorder to the request context. +func WithWarningRecorder(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + recorder := &recorder{writer: w} + req = req.WithContext(warning.WithWarningRecorder(req.Context(), recorder)) + handler.ServeHTTP(w, req) + }) +} + +var ( + truncateAtTotalRunes = 4 * 1024 + truncateItemRunes = 256 +) + +type recordedWarning struct { + agent string + text string +} + +type recorder struct { + // lock guards calls to AddWarning from multiple threads + lock sync.Mutex + + // recorded tracks whether AddWarning was already called with a given text + recorded map[string]bool + + // ordered tracks warnings added so they can be replayed and truncated if needed + ordered []recordedWarning + + // written tracks how many runes of text have been added as warning headers + written int + + // truncating tracks if we have already exceeded truncateAtTotalRunes and are now truncating warning messages as we add them + truncating bool + + // writer is the response writer to add warning headers to + writer http.ResponseWriter +} + +func (r *recorder) AddWarning(agent, text string) { + if len(text) == 0 { + return + } + + r.lock.Lock() + defer r.lock.Unlock() + + // if we've already exceeded our limit and are already truncating, return early + if r.written >= truncateAtTotalRunes && r.truncating { + return + } + + // init if needed + if r.recorded == nil { + r.recorded = map[string]bool{} + } + + // dedupe if already warned + if r.recorded[text] { + return + } + r.recorded[text] = true + r.ordered = append(r.ordered, recordedWarning{agent: agent, text: text}) + + // truncate on a rune boundary, if needed + textRuneLength := utf8.RuneCountInString(text) + if r.truncating && textRuneLength > truncateItemRunes { + text = string([]rune(text)[:truncateItemRunes]) + textRuneLength = truncateItemRunes + } + + // compute the header + header, err := net.NewWarningHeader(299, agent, text) + if err != nil { + return + } + + // if this fits within our limit, or we're already truncating, write and return + if r.written+textRuneLength <= truncateAtTotalRunes || r.truncating { + r.written += textRuneLength + r.writer.Header().Add("Warning", header) + return + } + + // otherwise, enable truncation, reset, and replay the existing items as truncated warnings + r.truncating = true + r.written = 0 + r.writer.Header().Del("Warning") + utilruntime.HandleError(fmt.Errorf("exceeded max warning header size, truncating")) + for _, w := range r.ordered { + agent := w.agent + text := w.text + + textRuneLength := utf8.RuneCountInString(text) + if textRuneLength > truncateItemRunes { + text = string([]rune(text)[:truncateItemRunes]) + textRuneLength = truncateItemRunes + } + if header, err := net.NewWarningHeader(299, agent, text); err == nil { + r.written += textRuneLength + r.writer.Header().Add("Warning", header) + } + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go b/vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go new file mode 100644 index 000000000..22b973661 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go @@ -0,0 +1,137 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoints + +import ( + "path" + "time" + + restful "github.com/emicklei/go-restful" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/endpoints/discovery" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" + "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/apiserver/pkg/storageversion" + openapiproto "k8s.io/kube-openapi/pkg/util/proto" +) + +// APIGroupVersion is a helper for exposing rest.Storage objects as http.Handlers via go-restful +// It handles URLs of the form: +// /${storage_key}[/${object_name}] +// Where 'storage_key' points to a rest.Storage object stored in storage. +// This object should contain all parameterization necessary for running a particular API version +type APIGroupVersion struct { + Storage map[string]rest.Storage + + Root string + + // GroupVersion is the external group version + GroupVersion schema.GroupVersion + + // OptionsExternalVersion controls the Kubernetes APIVersion used for common objects in the apiserver + // schema like api.Status, api.DeleteOptions, and metav1.ListOptions. Other implementors may + // define a version "v1beta1" but want to use the Kubernetes "v1" internal objects. If + // empty, defaults to GroupVersion. + OptionsExternalVersion *schema.GroupVersion + // MetaGroupVersion defaults to "meta.k8s.io/v1" and is the scheme group version used to decode + // common API implementations like ListOptions. Future changes will allow this to vary by group + // version (for when the inevitable meta/v2 group emerges). + MetaGroupVersion *schema.GroupVersion + + // RootScopedKinds are the root scoped kinds for the primary GroupVersion + RootScopedKinds sets.String + + // Serializer is used to determine how to convert responses from API methods into bytes to send over + // the wire. + Serializer runtime.NegotiatedSerializer + ParameterCodec runtime.ParameterCodec + + Typer runtime.ObjectTyper + Creater runtime.ObjectCreater + Convertor runtime.ObjectConvertor + Defaulter runtime.ObjectDefaulter + Linker runtime.SelfLinker + UnsafeConvertor runtime.ObjectConvertor + TypeConverter fieldmanager.TypeConverter + + EquivalentResourceRegistry runtime.EquivalentResourceRegistry + + // Authorizer determines whether a user is allowed to make a certain request. The Handler does a preliminary + // authorization check using the request URI but it may be necessary to make additional checks, such as in + // the create-on-update case + Authorizer authorizer.Authorizer + + Admit admission.Interface + + MinRequestTimeout time.Duration + + // OpenAPIModels exposes the OpenAPI models to each individual handler. + OpenAPIModels openapiproto.Models + + // The limit on the request body size that would be accepted and decoded in a write request. + // 0 means no limit. + MaxRequestBodyBytes int64 +} + +// InstallREST registers the REST handlers (storage, watch, proxy and redirect) into a restful Container. +// It is expected that the provided path root prefix will serve all operations. Root MUST NOT end +// in a slash. +func (g *APIGroupVersion) InstallREST(container *restful.Container) ([]*storageversion.ResourceInfo, error) { + prefix := path.Join(g.Root, g.GroupVersion.Group, g.GroupVersion.Version) + installer := &APIInstaller{ + group: g, + prefix: prefix, + minRequestTimeout: g.MinRequestTimeout, + } + + apiResources, resourceInfos, ws, registrationErrors := installer.Install() + versionDiscoveryHandler := discovery.NewAPIVersionHandler(g.Serializer, g.GroupVersion, staticLister{apiResources}) + versionDiscoveryHandler.AddToWebService(ws) + container.Add(ws) + return removeNonPersistedResources(resourceInfos), utilerrors.NewAggregate(registrationErrors) +} + +func removeNonPersistedResources(infos []*storageversion.ResourceInfo) []*storageversion.ResourceInfo { + var filtered []*storageversion.ResourceInfo + for _, info := range infos { + // if EncodingVersion is empty, then the apiserver does not + // need to register this resource via the storage version API, + // thus we can remove it. + if info != nil && len(info.EncodingVersion) > 0 { + filtered = append(filtered, info) + } + } + return filtered +} + +// staticLister implements the APIResourceLister interface +type staticLister struct { + list []metav1.APIResource +} + +func (s staticLister) ListAPIResources() []metav1.APIResource { + return s.list +} + +var _ discovery.APIResourceLister = &staticLister{} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go new file mode 100644 index 000000000..631914f36 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go @@ -0,0 +1,247 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "bytes" + "context" + "fmt" + "net/http" + "strings" + "time" + "unicode" + "unicode/utf8" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/apiserver/pkg/util/dryrun" + utilfeature "k8s.io/apiserver/pkg/util/feature" + utiltrace "k8s.io/utils/trace" +) + +var namespaceGVK = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"} + +func createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Interface, includeName bool) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + // For performance tracking purposes. + trace := utiltrace.New("Create", utiltrace.Field{Key: "url", Value: req.URL.Path}, utiltrace.Field{Key: "user-agent", Value: &lazyTruncatedUserAgent{req}}, utiltrace.Field{Key: "client", Value: &lazyClientIP{req}}) + defer trace.LogIfLong(500 * time.Millisecond) + + if isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) { + scope.err(errors.NewBadRequest("the dryRun feature is disabled"), w, req) + return + } + + // TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer) + timeout := parseTimeout(req.URL.Query().Get("timeout")) + + namespace, name, err := scope.Namer.Name(req) + if err != nil { + if includeName { + // name was required, return + scope.err(err, w, req) + return + } + + // otherwise attempt to look up the namespace + namespace, err = scope.Namer.Namespace(req) + if err != nil { + scope.err(err, w, req) + return + } + } + + ctx, cancel := context.WithTimeout(req.Context(), timeout) + defer cancel() + outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) + if err != nil { + scope.err(err, w, req) + return + } + + gv := scope.Kind.GroupVersion() + s, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer) + if err != nil { + scope.err(err, w, req) + return + } + + decoder := scope.Serializer.DecoderToVersion(s.Serializer, scope.HubGroupVersion) + + body, err := limitedReadBody(req, scope.MaxRequestBodyBytes) + if err != nil { + scope.err(err, w, req) + return + } + + options := &metav1.CreateOptions{} + values := req.URL.Query() + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, options); err != nil { + err = errors.NewBadRequest(err.Error()) + scope.err(err, w, req) + return + } + if errs := validation.ValidateCreateOptions(options); len(errs) > 0 { + err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "CreateOptions"}, "", errs) + scope.err(err, w, req) + return + } + options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("CreateOptions")) + + defaultGVK := scope.Kind + original := r.New() + trace.Step("About to convert to expected version") + obj, gvk, err := decoder.Decode(body, &defaultGVK, original) + if err != nil { + err = transformDecodeError(scope.Typer, err, original, gvk, body) + scope.err(err, w, req) + return + } + if gvk.GroupVersion() != gv { + err = errors.NewBadRequest(fmt.Sprintf("the API version in the data (%s) does not match the expected API version (%v)", gvk.GroupVersion().String(), gv.String())) + scope.err(err, w, req) + return + } + trace.Step("Conversion done") + + // On create, get name from new object if unset + if len(name) == 0 { + _, name, _ = scope.Namer.ObjectName(obj) + } + if len(namespace) == 0 && *gvk == namespaceGVK { + namespace = name + } + ctx = request.WithNamespace(ctx, namespace) + + ae := request.AuditEventFrom(ctx) + admit = admission.WithAudit(admit, ae) + audit.LogRequestObject(ae, obj, scope.Resource, scope.Subresource, scope.Serializer) + + userInfo, _ := request.UserFrom(ctx) + + trace.Step("About to store object in database") + admissionAttributes := admission.NewAttributesRecord(obj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, options, dryrun.IsDryRun(options.DryRun), userInfo) + requestFunc := func() (runtime.Object, error) { + return r.Create( + ctx, + name, + obj, + rest.AdmissionToValidateObjectFunc(admit, admissionAttributes, scope), + options, + ) + } + // Dedup owner references before updating managed fields + dedupOwnerReferencesAndAddWarning(obj, req.Context(), false) + result, err := finishRequest(timeout, func() (runtime.Object, error) { + if scope.FieldManager != nil { + liveObj, err := scope.Creater.New(scope.Kind) + if err != nil { + return nil, fmt.Errorf("failed to create new object (Create for %v): %v", scope.Kind, err) + } + obj = scope.FieldManager.UpdateNoErrors(liveObj, obj, managerOrUserAgent(options.FieldManager, req.UserAgent())) + } + if mutatingAdmission, ok := admit.(admission.MutationInterface); ok && mutatingAdmission.Handles(admission.Create) { + if err := mutatingAdmission.Admit(ctx, admissionAttributes, scope); err != nil { + return nil, err + } + } + // Dedup owner references again after mutating admission happens + dedupOwnerReferencesAndAddWarning(obj, req.Context(), true) + result, err := requestFunc() + // If the object wasn't committed to storage because it's serialized size was too large, + // it is safe to remove managedFields (which can be large) and try again. + if isTooLargeError(err) { + if accessor, accessorErr := meta.Accessor(obj); accessorErr == nil { + accessor.SetManagedFields(nil) + result, err = requestFunc() + } + } + return result, err + }) + if err != nil { + scope.err(err, w, req) + return + } + trace.Step("Object stored in database") + + code := http.StatusCreated + status, ok := result.(*metav1.Status) + if ok && err == nil && status.Code == 0 { + status.Code = int32(code) + } + + transformResponseObject(ctx, scope, trace, req, w, code, outputMediaType, result) + } +} + +// CreateNamedResource returns a function that will handle a resource creation with name. +func CreateNamedResource(r rest.NamedCreater, scope *RequestScope, admission admission.Interface) http.HandlerFunc { + return createHandler(r, scope, admission, true) +} + +// CreateResource returns a function that will handle a resource creation. +func CreateResource(r rest.Creater, scope *RequestScope, admission admission.Interface) http.HandlerFunc { + return createHandler(&namedCreaterAdapter{r}, scope, admission, false) +} + +type namedCreaterAdapter struct { + rest.Creater +} + +func (c *namedCreaterAdapter) Create(ctx context.Context, name string, obj runtime.Object, createValidatingAdmission rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) { + return c.Creater.Create(ctx, obj, createValidatingAdmission, options) +} + +// manager is assumed to be already a valid value, we need to make +// userAgent into a valid value too. +func managerOrUserAgent(manager, userAgent string) string { + if manager != "" { + return manager + } + return prefixFromUserAgent(userAgent) +} + +// prefixFromUserAgent takes the characters preceding the first /, quote +// unprintable character and then trim what's beyond the +// FieldManagerMaxLength limit. +func prefixFromUserAgent(u string) string { + m := strings.Split(u, "/")[0] + buf := bytes.NewBuffer(nil) + for _, r := range m { + // Ignore non-printable characters + if !unicode.IsPrint(r) { + continue + } + // Only append if we have room for it + if buf.Len()+utf8.RuneLen(r) > validation.FieldManagerMaxLength { + break + } + buf.WriteRune(r) + } + return buf.String() +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go new file mode 100644 index 000000000..892aaf4a0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go @@ -0,0 +1,290 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "context" + "fmt" + "net/http" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" + metainternalversionvalidation "k8s.io/apimachinery/pkg/apis/meta/internalversion/validation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/apiserver/pkg/util/dryrun" + utilfeature "k8s.io/apiserver/pkg/util/feature" + utiltrace "k8s.io/utils/trace" +) + +// DeleteResource returns a function that will handle a resource deletion +// TODO admission here becomes solely validating admission +func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestScope, admit admission.Interface) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + // For performance tracking purposes. + trace := utiltrace.New("Delete", utiltrace.Field{Key: "url", Value: req.URL.Path}, utiltrace.Field{Key: "user-agent", Value: &lazyTruncatedUserAgent{req}}, utiltrace.Field{Key: "client", Value: &lazyClientIP{req}}) + defer trace.LogIfLong(500 * time.Millisecond) + + if isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) { + scope.err(errors.NewBadRequest("the dryRun feature is disabled"), w, req) + return + } + + // TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer) + timeout := parseTimeout(req.URL.Query().Get("timeout")) + + namespace, name, err := scope.Namer.Name(req) + if err != nil { + scope.err(err, w, req) + return + } + ctx, cancel := context.WithTimeout(req.Context(), timeout) + defer cancel() + ctx = request.WithNamespace(ctx, namespace) + ae := request.AuditEventFrom(ctx) + admit = admission.WithAudit(admit, ae) + + outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) + if err != nil { + scope.err(err, w, req) + return + } + + options := &metav1.DeleteOptions{} + if allowsOptions { + body, err := limitedReadBody(req, scope.MaxRequestBodyBytes) + if err != nil { + scope.err(err, w, req) + return + } + if len(body) > 0 { + s, err := negotiation.NegotiateInputSerializer(req, false, metainternalversionscheme.Codecs) + if err != nil { + scope.err(err, w, req) + return + } + // For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions + // It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions + defaultGVK := scope.MetaGroupVersion.WithKind("DeleteOptions") + obj, _, err := metainternalversionscheme.Codecs.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options) + if err != nil { + scope.err(err, w, req) + return + } + if obj != options { + scope.err(fmt.Errorf("decoded object cannot be converted to DeleteOptions"), w, req) + return + } + trace.Step("Decoded delete options") + + ae := request.AuditEventFrom(ctx) + audit.LogRequestObject(ae, obj, scope.Resource, scope.Subresource, scope.Serializer) + trace.Step("Recorded the audit event") + } else { + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { + err = errors.NewBadRequest(err.Error()) + scope.err(err, w, req) + return + } + } + } + if errs := validation.ValidateDeleteOptions(options); len(errs) > 0 { + err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", errs) + scope.err(err, w, req) + return + } + options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("DeleteOptions")) + + trace.Step("About to delete object from database") + wasDeleted := true + userInfo, _ := request.UserFrom(ctx) + staticAdmissionAttrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Delete, options, dryrun.IsDryRun(options.DryRun), userInfo) + result, err := finishRequest(timeout, func() (runtime.Object, error) { + obj, deleted, err := r.Delete(ctx, name, rest.AdmissionToValidateObjectDeleteFunc(admit, staticAdmissionAttrs, scope), options) + wasDeleted = deleted + return obj, err + }) + if err != nil { + scope.err(err, w, req) + return + } + trace.Step("Object deleted from database") + + status := http.StatusOK + // Return http.StatusAccepted if the resource was not deleted immediately and + // user requested cascading deletion by setting OrphanDependents=false. + // Note: We want to do this always if resource was not deleted immediately, but + // that will break existing clients. + // Other cases where resource is not instantly deleted are: namespace deletion + // and pod graceful deletion. + if !wasDeleted && options.OrphanDependents != nil && !*options.OrphanDependents { + status = http.StatusAccepted + } + // if the rest.Deleter returns a nil object, fill out a status. Callers may return a valid + // object with the response. + if result == nil { + result = &metav1.Status{ + Status: metav1.StatusSuccess, + Code: int32(status), + Details: &metav1.StatusDetails{ + Name: name, + Kind: scope.Kind.Kind, + }, + } + } + + transformResponseObject(ctx, scope, trace, req, w, status, outputMediaType, result) + } +} + +// DeleteCollection returns a function that will handle a collection deletion +func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestScope, admit admission.Interface) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + trace := utiltrace.New("Delete", utiltrace.Field{"url", req.URL.Path}) + defer trace.LogIfLong(500 * time.Millisecond) + + if isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) { + scope.err(errors.NewBadRequest("the dryRun feature is disabled"), w, req) + return + } + + // TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer) + timeout := parseTimeout(req.URL.Query().Get("timeout")) + + namespace, err := scope.Namer.Namespace(req) + if err != nil { + scope.err(err, w, req) + return + } + + ctx, cancel := context.WithTimeout(req.Context(), timeout) + defer cancel() + ctx = request.WithNamespace(ctx, namespace) + ae := request.AuditEventFrom(ctx) + + outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) + if err != nil { + scope.err(err, w, req) + return + } + + listOptions := metainternalversion.ListOptions{} + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, &listOptions); err != nil { + err = errors.NewBadRequest(err.Error()) + scope.err(err, w, req) + return + } + + if errs := metainternalversionvalidation.ValidateListOptions(&listOptions); len(errs) > 0 { + err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "ListOptions"}, "", errs) + scope.err(err, w, req) + return + } + + // transform fields + // TODO: DecodeParametersInto should do this. + if listOptions.FieldSelector != nil { + fn := func(label, value string) (newLabel, newValue string, err error) { + return scope.Convertor.ConvertFieldLabel(scope.Kind, label, value) + } + if listOptions.FieldSelector, err = listOptions.FieldSelector.Transform(fn); err != nil { + // TODO: allow bad request to set field causes based on query parameters + err = errors.NewBadRequest(err.Error()) + scope.err(err, w, req) + return + } + } + + options := &metav1.DeleteOptions{} + if checkBody { + body, err := limitedReadBody(req, scope.MaxRequestBodyBytes) + if err != nil { + scope.err(err, w, req) + return + } + if len(body) > 0 { + s, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer) + if err != nil { + scope.err(err, w, req) + return + } + // For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions + // It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions + defaultGVK := scope.Kind.GroupVersion().WithKind("DeleteOptions") + obj, _, err := scope.Serializer.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options) + if err != nil { + scope.err(err, w, req) + return + } + if obj != options { + scope.err(fmt.Errorf("decoded object cannot be converted to DeleteOptions"), w, req) + return + } + + ae := request.AuditEventFrom(ctx) + audit.LogRequestObject(ae, obj, scope.Resource, scope.Subresource, scope.Serializer) + } else { + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { + err = errors.NewBadRequest(err.Error()) + scope.err(err, w, req) + return + } + } + } + if errs := validation.ValidateDeleteOptions(options); len(errs) > 0 { + err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", errs) + scope.err(err, w, req) + return + } + options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("DeleteOptions")) + + admit = admission.WithAudit(admit, ae) + userInfo, _ := request.UserFrom(ctx) + staticAdmissionAttrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, "", scope.Resource, scope.Subresource, admission.Delete, options, dryrun.IsDryRun(options.DryRun), userInfo) + result, err := finishRequest(timeout, func() (runtime.Object, error) { + return r.DeleteCollection(ctx, rest.AdmissionToValidateObjectDeleteFunc(admit, staticAdmissionAttrs, scope), options, &listOptions) + }) + if err != nil { + scope.err(err, w, req) + return + } + + // if the rest.Deleter returns a nil object, fill out a status. Callers may return a valid + // object with the response. + if result == nil { + result = &metav1.Status{ + Status: metav1.StatusSuccess, + Code: http.StatusOK, + Details: &metav1.StatusDetails{ + Kind: scope.Kind.Kind, + }, + } + } + + transformResponseObject(ctx, scope, trace, req, w, http.StatusOK, outputMediaType, result) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/doc.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/doc.go new file mode 100644 index 000000000..d39833814 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package handlers contains HTTP handlers to implement the apiserver APIs. +package handlers // import "k8s.io/apiserver/pkg/endpoints/handlers" diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/OWNERS b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/OWNERS new file mode 100644 index 000000000..a7470137b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/OWNERS @@ -0,0 +1,5 @@ +approvers: +- jennybuckley +- apelisse +reviewers: +- kwiesmueller diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/buildmanagerinfo.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/buildmanagerinfo.go new file mode 100644 index 000000000..fc471e797 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/buildmanagerinfo.go @@ -0,0 +1,72 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" +) + +type buildManagerInfoManager struct { + fieldManager Manager + groupVersion schema.GroupVersion +} + +var _ Manager = &buildManagerInfoManager{} + +// NewBuildManagerInfoManager creates a new Manager that converts the manager name into a unique identifier +// combining operation and version for update requests, and just operation for apply requests. +func NewBuildManagerInfoManager(f Manager, gv schema.GroupVersion) Manager { + return &buildManagerInfoManager{ + fieldManager: f, + groupVersion: gv, + } +} + +// Update implements Manager. +func (f *buildManagerInfoManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + manager, err := f.buildManagerInfo(manager, metav1.ManagedFieldsOperationUpdate) + if err != nil { + return nil, nil, fmt.Errorf("failed to build manager identifier: %v", err) + } + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply implements Manager. +func (f *buildManagerInfoManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + manager, err := f.buildManagerInfo(manager, metav1.ManagedFieldsOperationApply) + if err != nil { + return nil, nil, fmt.Errorf("failed to build manager identifier: %v", err) + } + return f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force) +} + +func (f *buildManagerInfoManager) buildManagerInfo(prefix string, operation metav1.ManagedFieldsOperationType) (string, error) { + managerInfo := metav1.ManagedFieldsEntry{ + Manager: prefix, + Operation: operation, + APIVersion: f.groupVersion.String(), + } + if managerInfo.Manager == "" { + managerInfo.Manager = "unknown" + } + return internal.BuildManagerIdentifier(&managerInfo) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/capmanagers.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/capmanagers.go new file mode 100644 index 000000000..c3184e241 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/capmanagers.go @@ -0,0 +1,134 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "fmt" + "sort" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +type capManagersManager struct { + fieldManager Manager + maxUpdateManagers int + oldUpdatesManagerName string +} + +var _ Manager = &capManagersManager{} + +// NewCapManagersManager creates a new wrapped FieldManager which ensures that the number of managers from updates +// does not exceed maxUpdateManagers, by merging some of the oldest entries on each update. +func NewCapManagersManager(fieldManager Manager, maxUpdateManagers int) Manager { + return &capManagersManager{ + fieldManager: fieldManager, + maxUpdateManagers: maxUpdateManagers, + oldUpdatesManagerName: "ancient-changes", + } +} + +// Update implements Manager. +func (f *capManagersManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + object, managed, err := f.fieldManager.Update(liveObj, newObj, managed, manager) + if err != nil { + return object, managed, err + } + if managed, err = f.capUpdateManagers(managed); err != nil { + return nil, nil, fmt.Errorf("failed to cap update managers: %v", err) + } + return object, managed, nil +} + +// Apply implements Manager. +func (f *capManagersManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + return f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force) +} + +// capUpdateManagers merges a number of the oldest update entries into versioned buckets, +// such that the number of entries from updates does not exceed f.maxUpdateManagers. +func (f *capManagersManager) capUpdateManagers(managed Managed) (newManaged Managed, err error) { + // Gather all entries from updates + updaters := []string{} + for manager, fields := range managed.Fields() { + if !fields.Applied() { + updaters = append(updaters, manager) + } + } + if len(updaters) <= f.maxUpdateManagers { + return managed, nil + } + + // If we have more than the maximum, sort the update entries by time, oldest first. + sort.Slice(updaters, func(i, j int) bool { + iTime, jTime, iSeconds, jSeconds := managed.Times()[updaters[i]], managed.Times()[updaters[j]], int64(0), int64(0) + if iTime != nil { + iSeconds = iTime.Unix() + } + if jTime != nil { + jSeconds = jTime.Unix() + } + if iSeconds != jSeconds { + return iSeconds < jSeconds + } + return updaters[i] < updaters[j] + }) + + // Merge the oldest updaters with versioned bucket managers until the number of updaters is under the cap + versionToFirstManager := map[string]string{} + for i, length := 0, len(updaters); i < len(updaters) && length > f.maxUpdateManagers; i++ { + manager := updaters[i] + vs := managed.Fields()[manager] + time := managed.Times()[manager] + version := string(vs.APIVersion()) + + // Create a new manager identifier for the versioned bucket entry. + // The version for this manager comes from the version of the update being merged into the bucket. + bucket, err := internal.BuildManagerIdentifier(&metav1.ManagedFieldsEntry{ + Manager: f.oldUpdatesManagerName, + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: version, + }) + if err != nil { + return managed, fmt.Errorf("failed to create bucket manager for version %v: %v", version, err) + } + + // Merge the fieldets if this is not the first time the version was seen. + // Otherwise just record the manager name in versionToFirstManager + if first, ok := versionToFirstManager[version]; ok { + // If the bucket doesn't exists yet, create one. + if _, ok := managed.Fields()[bucket]; !ok { + s := managed.Fields()[first] + delete(managed.Fields(), first) + managed.Fields()[bucket] = s + } + + managed.Fields()[bucket] = fieldpath.NewVersionedSet(vs.Set().Union(managed.Fields()[bucket].Set()), vs.APIVersion(), vs.Applied()) + delete(managed.Fields(), manager) + length-- + + // Use the time from the update being merged into the bucket, since it is more recent. + managed.Times()[bucket] = time + } else { + versionToFirstManager[version] = manager + } + } + + return managed, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/endpoints.yaml b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/endpoints.yaml new file mode 100644 index 000000000..a667e9834 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/endpoints.yaml @@ -0,0 +1,7018 @@ +apiVersion: v1 +kind: Endpoints +metadata: + creationTimestamp: '2016-10-04T17:45:58Z' + labels: + app: my-app + name: app-server + namespace: default + resourceVersion: '184597135' + selfLink: /self/link + uid: 6826f086-8a5a-11e6-8d09-42010a800005 +subsets: +- addresses: + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0000 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0001 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0002 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0003 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0004 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0005 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0006 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0007 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0008 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0009 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0010 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0011 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0012 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0013 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0014 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0015 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0016 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0017 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0018 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0019 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0020 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0021 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0022 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0023 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0024 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0025 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0026 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0027 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0028 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0029 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0030 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0031 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0032 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0033 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0034 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0035 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0036 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0037 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0038 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0039 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0040 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0041 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0042 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0043 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0044 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0045 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0046 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0047 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0048 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0049 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0050 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0051 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0052 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0053 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0054 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0055 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0056 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0057 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0058 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0059 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0060 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0061 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0062 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0063 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0064 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0065 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0066 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0067 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0068 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0069 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0070 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0071 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0072 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0073 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0074 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0075 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0076 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0077 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0078 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0079 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0080 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0081 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0082 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0083 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0084 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0085 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0086 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0087 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0088 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0089 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0090 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0091 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0092 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0093 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0094 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0095 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0096 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0097 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0098 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0099 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0100 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0101 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0102 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0103 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0104 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0105 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0106 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0107 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0108 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0109 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0110 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0111 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0112 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0113 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0114 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0115 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0116 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0117 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0118 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0119 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0120 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0121 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0122 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0123 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0124 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0125 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0126 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0127 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0128 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0129 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0130 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0131 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0132 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0133 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0134 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0135 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0136 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0137 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0138 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0139 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0140 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0141 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0142 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0143 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0144 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0145 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0146 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0147 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0148 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0149 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0150 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0151 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0152 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0153 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0154 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0155 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0156 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0157 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0158 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0159 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0160 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0161 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0162 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0163 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0164 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0165 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0166 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0167 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0168 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0169 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0170 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0171 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0172 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0173 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0174 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0175 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0176 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0177 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0178 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0179 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0180 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0181 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0182 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0183 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0184 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0185 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0186 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0187 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0188 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0189 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0190 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0191 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0192 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0193 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0194 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0195 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0196 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0197 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0198 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0199 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0200 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0201 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0202 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0203 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0204 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0205 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0206 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0207 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0208 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0209 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0210 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0211 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0212 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0213 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0214 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0215 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0216 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0217 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0218 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0219 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0220 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0221 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0222 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0223 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0224 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0225 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0226 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0227 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0228 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0229 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0230 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0231 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0232 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0233 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0234 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0235 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0236 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0237 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0238 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0239 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0240 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0241 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0242 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0243 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0244 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0245 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0246 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0247 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0248 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0249 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0250 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0251 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0252 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0253 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0254 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0255 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0256 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0257 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0258 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0259 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0260 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0261 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0262 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0263 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0264 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0265 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0266 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0267 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0268 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0269 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0270 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0271 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0272 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0273 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0274 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0275 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0276 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0277 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0278 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0279 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0280 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0281 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0282 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0283 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0284 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0285 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0286 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0287 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0288 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0289 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0290 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0291 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0292 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0293 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0294 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0295 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0296 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0297 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0298 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0299 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0300 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0301 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0302 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0303 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0304 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0305 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0306 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0307 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0308 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0309 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0310 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0311 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0312 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0313 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0314 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0315 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0316 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0317 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0318 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0319 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0320 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0321 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0322 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0323 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0324 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0325 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0326 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0327 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0328 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0329 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0330 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0331 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0332 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0333 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0334 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0335 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0336 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0337 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0338 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0339 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0340 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0341 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0342 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0343 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0344 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0345 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0346 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0347 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0348 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0349 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0350 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0351 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0352 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0353 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0354 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0355 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0356 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0357 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0358 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0359 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0360 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0361 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0362 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0363 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0364 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0365 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0366 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0367 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0368 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0369 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0370 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0371 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0372 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0373 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0374 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0375 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0376 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0377 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0378 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0379 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0380 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0381 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0382 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0383 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0384 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0385 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0386 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0387 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0388 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0389 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0390 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0391 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0392 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0393 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0394 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0395 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0396 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0397 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0398 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0399 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0400 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0401 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0402 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0403 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0404 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0405 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0406 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0407 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0408 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0409 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0410 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0411 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0412 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0413 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0414 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0415 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0416 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0417 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0418 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0419 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0420 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0421 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0422 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0423 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0424 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0425 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0426 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0427 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0428 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0429 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0430 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0431 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0432 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0433 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0434 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0435 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0436 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0437 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0438 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0439 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0440 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0441 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0442 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0443 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0444 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0445 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0446 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0447 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0448 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0449 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0450 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0451 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0452 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0453 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0454 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0455 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0456 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0457 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0458 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0459 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0460 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0461 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0462 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0463 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0464 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0465 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0466 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0467 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0468 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0469 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0470 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0471 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0472 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0473 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0474 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0475 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0476 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0477 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0478 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0479 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0480 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0481 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0482 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0483 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0484 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0485 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0486 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0487 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0488 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0489 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0490 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0491 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0492 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0493 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0494 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0495 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0496 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0497 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0498 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0499 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0500 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0501 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0502 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0503 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0504 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0505 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0506 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0507 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0508 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0509 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0510 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0511 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0512 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0513 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0514 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0515 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0516 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0517 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0518 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0519 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0520 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0521 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0522 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0523 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0524 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0525 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0526 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0527 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0528 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0529 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0530 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0531 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0532 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0533 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0534 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0535 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0536 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0537 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0538 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0539 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0540 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0541 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0542 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0543 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0544 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0545 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0546 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0547 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0548 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0549 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0550 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0551 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0552 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0553 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0554 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0555 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0556 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0557 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0558 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0559 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0560 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0561 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0562 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0563 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0564 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0565 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0566 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0567 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0568 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0569 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0570 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0571 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0572 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0573 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0574 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0575 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0576 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0577 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0578 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0579 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0580 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0581 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0582 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0583 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0584 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0585 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0586 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0587 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0588 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0589 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0590 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0591 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0592 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0593 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0594 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0595 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0596 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0597 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0598 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0599 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0600 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0601 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0602 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0603 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0604 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0605 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0606 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0607 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0608 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0609 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0610 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0611 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0612 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0613 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0614 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0615 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0616 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0617 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0618 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0619 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0620 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0621 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0622 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0623 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0624 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0625 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0626 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0627 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0628 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0629 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0630 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0631 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0632 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0633 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0634 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0635 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0636 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0637 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0638 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0639 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0640 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0641 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0642 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0643 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0644 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0645 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0646 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0647 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0648 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0649 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0650 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0651 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0652 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0653 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0654 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0655 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0656 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0657 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0658 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0659 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0660 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0661 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0662 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0663 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0664 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0665 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0666 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0667 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0668 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0669 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0670 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0671 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0672 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0673 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0674 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0675 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0676 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0677 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0678 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0679 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0680 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0681 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0682 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0683 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0684 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0685 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0686 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0687 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0688 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0689 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0690 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0691 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0692 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0693 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0694 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0695 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0696 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0697 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0698 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0699 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0700 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0701 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0702 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0703 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0704 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0705 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0706 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0707 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0708 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0709 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0710 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0711 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0712 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0713 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0714 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0715 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0716 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0717 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0718 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0719 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0720 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0721 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0722 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0723 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0724 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0725 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0726 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0727 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0728 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0729 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0730 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0731 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0732 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0733 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0734 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0735 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0736 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0737 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0738 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0739 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0740 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0741 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0742 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0743 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0744 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0745 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0746 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0747 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0748 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0749 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0750 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0751 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0752 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0753 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0754 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0755 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0756 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0757 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0758 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0759 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0760 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0761 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0762 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0763 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0764 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0765 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0766 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0767 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0768 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0769 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0770 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0771 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0772 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0773 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0774 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0775 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0776 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0777 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0778 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0779 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0780 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0781 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0782 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0783 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0784 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0785 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0786 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0787 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0788 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0789 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0790 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0791 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0792 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0793 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0794 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0795 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0796 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0797 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0798 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0799 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0800 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0801 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0802 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0803 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0804 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0805 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0806 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0807 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0808 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0809 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0810 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0811 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0812 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0813 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0814 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0815 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0816 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0817 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0818 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0819 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0820 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0821 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0822 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0823 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0824 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0825 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0826 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0827 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0828 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0829 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0830 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0831 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0832 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0833 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0834 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0835 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0836 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0837 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0838 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0839 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0840 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0841 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0842 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0843 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0844 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0845 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0846 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0847 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0848 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0849 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0850 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0851 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0852 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0853 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0854 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0855 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0856 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0857 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0858 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0859 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0860 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0861 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0862 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0863 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0864 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0865 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0866 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0867 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0868 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0869 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0870 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0871 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0872 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0873 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0874 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0875 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0876 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0877 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0878 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0879 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0880 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0881 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0882 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0883 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0884 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0885 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0886 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0887 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0888 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0889 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0890 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0891 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0892 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0893 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0894 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0895 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0896 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0897 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0898 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0899 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0900 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0901 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0902 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0903 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0904 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0905 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0906 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0907 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0908 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0909 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0910 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0911 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0912 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0913 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0914 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0915 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0916 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0917 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0918 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0919 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0920 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0921 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0922 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0923 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0924 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0925 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0926 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0927 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0928 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0929 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0930 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0931 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0932 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0933 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0934 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0935 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0936 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0937 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0938 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0939 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0940 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0941 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0942 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0943 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0944 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0945 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0946 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0947 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0948 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0949 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0950 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0951 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0952 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0953 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0954 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0955 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0956 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0957 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0958 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0959 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0960 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0961 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0962 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0963 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0964 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0965 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0966 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0967 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0968 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0969 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0970 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0971 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0972 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0973 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0974 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0975 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0976 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0977 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0978 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0979 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0980 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0981 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0982 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0983 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0984 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0985 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0986 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0987 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0988 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0989 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0990 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0991 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0992 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0993 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0994 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0995 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0996 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0997 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0998 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0999 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + ports: + - name: port-name + port: 8080 + protocol: TCP + diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go new file mode 100644 index 000000000..7e81fcc89 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go @@ -0,0 +1,243 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "fmt" + "reflect" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" + "k8s.io/klog/v2" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +// DefaultMaxUpdateManagers defines the default maximum retained number of managedFields entries from updates +// if the number of update managers exceeds this, the oldest entries will be merged until the number is below the maximum. +// TODO(jennybuckley): Determine if this is really the best value. Ideally we wouldn't unnecessarily merge too many entries. +const DefaultMaxUpdateManagers int = 10 + +// DefaultTrackOnCreateProbability defines the default probability that the field management of an object +// starts being tracked from the object's creation, instead of from the first time the object is applied to. +const DefaultTrackOnCreateProbability float32 = 1 + +var atMostEverySecond = internal.NewAtMostEvery(time.Second) + +// Managed groups a fieldpath.ManagedFields together with the timestamps associated with each operation. +type Managed interface { + // Fields gets the fieldpath.ManagedFields. + Fields() fieldpath.ManagedFields + + // Times gets the timestamps associated with each operation. + Times() map[string]*metav1.Time +} + +// Manager updates the managed fields and merges applied configurations. +type Manager interface { + // Update is used when the object has already been merged (non-apply + // use-case), and simply updates the managed fields in the output + // object. + Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) + + // Apply is used when server-side apply is called, as it merges the + // object and updates the managed fields. + Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) +} + +// FieldManager updates the managed fields and merge applied +// configurations. +type FieldManager struct { + fieldManager Manager + ignoreManagedFieldsFromRequestObject bool +} + +// NewFieldManager creates a new FieldManager that decodes, manages, then re-encodes managedFields +// on update and apply requests. +func NewFieldManager(f Manager, ignoreManagedFieldsFromRequestObject bool) *FieldManager { + return &FieldManager{fieldManager: f, ignoreManagedFieldsFromRequestObject: ignoreManagedFieldsFromRequestObject} +} + +// NewDefaultFieldManager creates a new FieldManager that merges apply requests +// and update managed fields for other types of requests. +func NewDefaultFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, ignoreManagedFieldsFromRequestObject bool) (*FieldManager, error) { + f, err := NewStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub) + if err != nil { + return nil, fmt.Errorf("failed to create field manager: %v", err) + } + return newDefaultFieldManager(f, typeConverter, objectConverter, objectCreater, kind, ignoreManagedFieldsFromRequestObject), nil +} + +// NewDefaultCRDFieldManager creates a new FieldManager specifically for +// CRDs. This allows for the possibility of fields which are not defined +// in models, as well as having no models defined at all. +func NewDefaultCRDFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, ignoreManagedFieldsFromRequestObject bool) (_ *FieldManager, err error) { + f, err := NewCRDStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub) + if err != nil { + return nil, fmt.Errorf("failed to create field manager: %v", err) + } + return newDefaultFieldManager(f, typeConverter, objectConverter, objectCreater, kind, ignoreManagedFieldsFromRequestObject), nil +} + +// newDefaultFieldManager is a helper function which wraps a Manager with certain default logic. +func newDefaultFieldManager(f Manager, typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, ignoreManagedFieldsFromRequestObject bool) *FieldManager { + f = NewStripMetaManager(f) + f = NewManagedFieldsUpdater(f) + f = NewBuildManagerInfoManager(f, kind.GroupVersion()) + f = NewCapManagersManager(f, DefaultMaxUpdateManagers) + f = NewProbabilisticSkipNonAppliedManager(f, objectCreater, kind, DefaultTrackOnCreateProbability) + f = NewLastAppliedManager(f, typeConverter, objectConverter, kind.GroupVersion()) + f = NewLastAppliedUpdater(f) + + return NewFieldManager(f, ignoreManagedFieldsFromRequestObject) +} + +func decodeLiveManagedFields(liveObj runtime.Object) (Managed, error) { + liveAccessor, err := meta.Accessor(liveObj) + if err != nil { + return nil, err + } + managed, err := internal.DecodeObjectManagedFields(liveAccessor.GetManagedFields()) + if err != nil { + return internal.NewEmptyManaged(), nil + } + return managed, nil +} + +func decodeManagedFields(liveObj, newObj runtime.Object, ignoreManagedFieldsFromRequestObject bool) (Managed, error) { + // We take the managedFields of the live object in case the request tries to + // manually set managedFields via a subresource. + if ignoreManagedFieldsFromRequestObject { + return decodeLiveManagedFields(liveObj) + } + + // If the object doesn't have metadata, we should just return without trying to + // set the managedFields at all, so creates/updates/patches will work normally. + newAccessor, err := meta.Accessor(newObj) + if err != nil { + return nil, err + } + + if isResetManagedFields(newAccessor.GetManagedFields()) { + return internal.NewEmptyManaged(), nil + } + + managed, err := internal.DecodeObjectManagedFields(newAccessor.GetManagedFields()) + // If the managed field is empty or we failed to decode it, + // let's try the live object. This is to prevent clients who + // don't understand managedFields from deleting it accidentally. + if err != nil || len(managed.Fields()) == 0 { + return decodeLiveManagedFields(liveObj) + } + + return managed, nil +} + +// Update is used when the object has already been merged (non-apply +// use-case), and simply updates the managed fields in the output +// object. +func (f *FieldManager) Update(liveObj, newObj runtime.Object, manager string) (object runtime.Object, err error) { + // First try to decode the managed fields provided in the update, + // This is necessary to allow directly updating managed fields. + managed, err := decodeManagedFields(liveObj, newObj, f.ignoreManagedFieldsFromRequestObject) + if err != nil { + return newObj, nil + } + + internal.RemoveObjectManagedFields(liveObj) + internal.RemoveObjectManagedFields(newObj) + + if object, managed, err = f.fieldManager.Update(liveObj, newObj, managed, manager); err != nil { + return nil, err + } + + if err = internal.EncodeObjectManagedFields(object, managed); err != nil { + return nil, fmt.Errorf("failed to encode managed fields: %v", err) + } + + return object, nil +} + +// UpdateNoErrors is the same as Update, but it will not return +// errors. If an error happens, the object is returned with +// managedFields cleared. +func (f *FieldManager) UpdateNoErrors(liveObj, newObj runtime.Object, manager string) runtime.Object { + obj, err := f.Update(liveObj, newObj, manager) + if err != nil { + atMostEverySecond.Do(func() { + klog.Errorf("[SHOULD NOT HAPPEN] failed to update managedFields for %v: %v", + newObj.GetObjectKind().GroupVersionKind(), + err) + }) + // Explicitly remove managedFields on failure, so that + // we can't have garbage in it. + internal.RemoveObjectManagedFields(newObj) + return newObj + } + return obj +} + +// Returns true if the managedFields indicate that the user is trying to +// reset the managedFields, i.e. if the list is non-nil but empty, or if +// the list has one empty item. +func isResetManagedFields(managedFields []metav1.ManagedFieldsEntry) bool { + if len(managedFields) == 0 { + return managedFields != nil + } + + if len(managedFields) == 1 { + return reflect.DeepEqual(managedFields[0], metav1.ManagedFieldsEntry{}) + } + + return false +} + +// Apply is used when server-side apply is called, as it merges the +// object and updates the managed fields. +func (f *FieldManager) Apply(liveObj, appliedObj runtime.Object, manager string, force bool) (object runtime.Object, err error) { + // If the object doesn't have metadata, apply isn't allowed. + accessor, err := meta.Accessor(liveObj) + if err != nil { + return nil, fmt.Errorf("couldn't get accessor: %v", err) + } + + // Decode the managed fields in the live object, since it isn't allowed in the patch. + managed, err := internal.DecodeObjectManagedFields(accessor.GetManagedFields()) + if err != nil { + return nil, fmt.Errorf("failed to decode managed fields: %v", err) + } + + internal.RemoveObjectManagedFields(liveObj) + + object, managed, err = f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force) + if err != nil { + if conflicts, ok := err.(merge.Conflicts); ok { + return nil, internal.NewConflictError(conflicts) + } + return nil, err + } + + if err = internal.EncodeObjectManagedFields(object, managed); err != nil { + return nil, fmt.Errorf("failed to encode managed fields: %v", err) + } + + return object, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/atmostevery.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/atmostevery.go new file mode 100644 index 000000000..b75ef7416 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/atmostevery.go @@ -0,0 +1,60 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "sync" + "time" +) + +// AtMostEvery will never run the method more than once every specified +// duration. +type AtMostEvery struct { + delay time.Duration + lastCall time.Time + mutex sync.Mutex +} + +// NewAtMostEvery creates a new AtMostEvery, that will run the method at +// most every given duration. +func NewAtMostEvery(delay time.Duration) *AtMostEvery { + return &AtMostEvery{ + delay: delay, + } +} + +// updateLastCall returns true if the lastCall time has been updated, +// false if it was too early. +func (s *AtMostEvery) updateLastCall() bool { + s.mutex.Lock() + defer s.mutex.Unlock() + if time.Since(s.lastCall) < s.delay { + return false + } + s.lastCall = time.Now() + return true +} + +// Do will run the method if enough time has passed, and return true. +// Otherwise, it does nothing and returns false. +func (s *AtMostEvery) Do(fn func()) bool { + if !s.updateLastCall() { + return false + } + fn() + return true +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/conflict.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/conflict.go new file mode 100644 index 000000000..cfa19d8d9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/conflict.go @@ -0,0 +1,85 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +// NewConflictError returns an error including details on the requests apply conflicts +func NewConflictError(conflicts merge.Conflicts) *errors.StatusError { + causes := []metav1.StatusCause{} + for _, conflict := range conflicts { + causes = append(causes, metav1.StatusCause{ + Type: metav1.CauseTypeFieldManagerConflict, + Message: fmt.Sprintf("conflict with %v", printManager(conflict.Manager)), + Field: conflict.Path.String(), + }) + } + return errors.NewApplyConflict(causes, getConflictMessage(conflicts)) +} + +func getConflictMessage(conflicts merge.Conflicts) string { + if len(conflicts) == 1 { + return fmt.Sprintf("Apply failed with 1 conflict: conflict with %v: %v", printManager(conflicts[0].Manager), conflicts[0].Path) + } + + m := map[string][]fieldpath.Path{} + for _, conflict := range conflicts { + m[conflict.Manager] = append(m[conflict.Manager], conflict.Path) + } + + uniqueManagers := []string{} + for manager := range m { + uniqueManagers = append(uniqueManagers, manager) + } + + // Print conflicts by sorted managers. + sort.Strings(uniqueManagers) + + messages := []string{} + for _, manager := range uniqueManagers { + messages = append(messages, fmt.Sprintf("conflicts with %v:", printManager(manager))) + for _, path := range m[manager] { + messages = append(messages, fmt.Sprintf("- %v", path)) + } + } + return fmt.Sprintf("Apply failed with %d conflicts: %s", len(conflicts), strings.Join(messages, "\n")) +} + +func printManager(manager string) string { + encodedManager := &metav1.ManagedFieldsEntry{} + if err := json.Unmarshal([]byte(manager), encodedManager); err != nil { + return fmt.Sprintf("%q", manager) + } + if encodedManager.Operation == metav1.ManagedFieldsOperationUpdate { + if encodedManager.Time == nil { + return fmt.Sprintf("%q using %v", encodedManager.Manager, encodedManager.APIVersion) + } + return fmt.Sprintf("%q using %v at %v", encodedManager.Manager, encodedManager.APIVersion, encodedManager.Time.UTC().Format(time.RFC3339)) + } + return fmt.Sprintf("%q", encodedManager.Manager) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/fields.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/fields.go new file mode 100644 index 000000000..08186191a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/fields.go @@ -0,0 +1,47 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "bytes" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// EmptyFields represents a set with no paths +// It looks like metav1.Fields{Raw: []byte("{}")} +var EmptyFields = func() metav1.FieldsV1 { + f, err := SetToFields(*fieldpath.NewSet()) + if err != nil { + panic("should never happen") + } + return f +}() + +// FieldsToSet creates a set paths from an input trie of fields +func FieldsToSet(f metav1.FieldsV1) (s fieldpath.Set, err error) { + err = s.FromJSON(bytes.NewReader(f.Raw)) + return s, err +} + +// SetToFields creates a trie of fields from an input set of paths +func SetToFields(s fieldpath.Set) (f metav1.FieldsV1, err error) { + f.Raw, err = s.ToJSON() + return f, err +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/gvkparser.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/gvkparser.go new file mode 100644 index 000000000..f8abcaf04 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/gvkparser.go @@ -0,0 +1,127 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kube-openapi/pkg/schemaconv" + "k8s.io/kube-openapi/pkg/util/proto" + "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +// groupVersionKindExtensionKey is the key used to lookup the +// GroupVersionKind value for an object definition from the +// definition's "extensions" map. +const groupVersionKindExtensionKey = "x-kubernetes-group-version-kind" + +// GvkParser contains a Parser that allows introspecting the schema. +type GvkParser struct { + gvks map[schema.GroupVersionKind]string + parser typed.Parser +} + +// Type returns a helper which can produce objects of the given type. Any +// errors are deferred until a further function is called. +func (p *GvkParser) Type(gvk schema.GroupVersionKind) *typed.ParseableType { + typeName, ok := p.gvks[gvk] + if !ok { + return nil + } + t := p.parser.Type(typeName) + return &t +} + +// NewGVKParser builds a GVKParser from a proto.Models. This +// will automatically find the proper version of the object, and the +// corresponding schema information. +func NewGVKParser(models proto.Models, preserveUnknownFields bool) (*GvkParser, error) { + typeSchema, err := schemaconv.ToSchemaWithPreserveUnknownFields(models, preserveUnknownFields) + if err != nil { + return nil, fmt.Errorf("failed to convert models to schema: %v", err) + } + parser := GvkParser{ + gvks: map[schema.GroupVersionKind]string{}, + } + parser.parser = typed.Parser{Schema: *typeSchema} + for _, modelName := range models.ListModels() { + model := models.LookupModel(modelName) + if model == nil { + panic(fmt.Sprintf("ListModels returns a model that can't be looked-up for: %v", modelName)) + } + gvkList := parseGroupVersionKind(model) + for _, gvk := range gvkList { + if len(gvk.Kind) > 0 { + _, ok := parser.gvks[gvk] + if ok { + return nil, fmt.Errorf("duplicate entry for %v", gvk) + } + parser.gvks[gvk] = modelName + } + } + } + return &parser, nil +} + +// Get and parse GroupVersionKind from the extension. Returns empty if it doesn't have one. +func parseGroupVersionKind(s proto.Schema) []schema.GroupVersionKind { + extensions := s.GetExtensions() + + gvkListResult := []schema.GroupVersionKind{} + + // Get the extensions + gvkExtension, ok := extensions[groupVersionKindExtensionKey] + if !ok { + return []schema.GroupVersionKind{} + } + + // gvk extension must be a list of at least 1 element. + gvkList, ok := gvkExtension.([]interface{}) + if !ok { + return []schema.GroupVersionKind{} + } + + for _, gvk := range gvkList { + // gvk extension list must be a map with group, version, and + // kind fields + gvkMap, ok := gvk.(map[interface{}]interface{}) + if !ok { + continue + } + group, ok := gvkMap["group"].(string) + if !ok { + continue + } + version, ok := gvkMap["version"].(string) + if !ok { + continue + } + kind, ok := gvkMap["kind"].(string) + if !ok { + continue + } + + gvkListResult = append(gvkListResult, schema.GroupVersionKind{ + Group: group, + Version: version, + Kind: kind, + }) + } + + return gvkListResult +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfields.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfields.go new file mode 100644 index 000000000..9a625e2ac --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfields.go @@ -0,0 +1,244 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "encoding/json" + "fmt" + "sort" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// ManagedInterface groups a fieldpath.ManagedFields together with the timestamps associated with each operation. +type ManagedInterface interface { + // Fields gets the fieldpath.ManagedFields. + Fields() fieldpath.ManagedFields + + // Times gets the timestamps associated with each operation. + Times() map[string]*metav1.Time +} + +type managedStruct struct { + fields fieldpath.ManagedFields + times map[string]*metav1.Time +} + +var _ ManagedInterface = &managedStruct{} + +// Fields implements ManagedInterface. +func (m *managedStruct) Fields() fieldpath.ManagedFields { + return m.fields +} + +// Times implements ManagedInterface. +func (m *managedStruct) Times() map[string]*metav1.Time { + return m.times +} + +// NewEmptyManaged creates an empty ManagedInterface. +func NewEmptyManaged() ManagedInterface { + return NewManaged(fieldpath.ManagedFields{}, map[string]*metav1.Time{}) +} + +// NewManaged creates a ManagedInterface from a fieldpath.ManagedFields and the timestamps associated with each operation. +func NewManaged(f fieldpath.ManagedFields, t map[string]*metav1.Time) ManagedInterface { + return &managedStruct{ + fields: f, + times: t, + } +} + +// RemoveObjectManagedFields removes the ManagedFields from the object +// before we merge so that it doesn't appear in the ManagedFields +// recursively. +func RemoveObjectManagedFields(obj runtime.Object) { + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + accessor.SetManagedFields(nil) +} + +// DecodeObjectManagedFields extracts and converts the objects ManagedFields into a fieldpath.ManagedFields. +func DecodeObjectManagedFields(from []metav1.ManagedFieldsEntry) (ManagedInterface, error) { + managed, err := decodeManagedFields(from) + if err != nil { + return nil, fmt.Errorf("failed to convert managed fields from API: %v", err) + } + return &managed, nil +} + +// EncodeObjectManagedFields converts and stores the fieldpathManagedFields into the objects ManagedFields +func EncodeObjectManagedFields(obj runtime.Object, managed ManagedInterface) error { + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + + encodedManagedFields, err := encodeManagedFields(managed) + if err != nil { + return fmt.Errorf("failed to convert back managed fields to API: %v", err) + } + accessor.SetManagedFields(encodedManagedFields) + + return nil +} + +// decodeManagedFields converts ManagedFields from the wire format (api format) +// to the format used by sigs.k8s.io/structured-merge-diff +func decodeManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (managed managedStruct, err error) { + managed.fields = make(fieldpath.ManagedFields, len(encodedManagedFields)) + managed.times = make(map[string]*metav1.Time, len(encodedManagedFields)) + + for i, encodedVersionedSet := range encodedManagedFields { + switch encodedVersionedSet.FieldsType { + case "FieldsV1": + // Valid case. + case "": + return managedStruct{}, fmt.Errorf("missing fieldsType in managed fields entry %d", i) + default: + return managedStruct{}, fmt.Errorf("invalid fieldsType %q in managed fields entry %d", encodedVersionedSet.FieldsType, i) + } + manager, err := BuildManagerIdentifier(&encodedVersionedSet) + if err != nil { + return managedStruct{}, fmt.Errorf("error decoding manager from %v: %v", encodedVersionedSet, err) + } + managed.fields[manager], err = decodeVersionedSet(&encodedVersionedSet) + if err != nil { + return managedStruct{}, fmt.Errorf("error decoding versioned set from %v: %v", encodedVersionedSet, err) + } + managed.times[manager] = encodedVersionedSet.Time + } + return managed, nil +} + +// BuildManagerIdentifier creates a manager identifier string from a ManagedFieldsEntry +func BuildManagerIdentifier(encodedManager *metav1.ManagedFieldsEntry) (manager string, err error) { + encodedManagerCopy := *encodedManager + + // Never include fields type in the manager identifier + encodedManagerCopy.FieldsType = "" + + // Never include the fields in the manager identifier + encodedManagerCopy.FieldsV1 = nil + + // Never include the time in the manager identifier + encodedManagerCopy.Time = nil + + // For appliers, don't include the APIVersion in the manager identifier, + // so it will always have the same manager identifier each time it applied. + if encodedManager.Operation == metav1.ManagedFieldsOperationApply { + encodedManagerCopy.APIVersion = "" + } + + // Use the remaining fields to build the manager identifier + b, err := json.Marshal(&encodedManagerCopy) + if err != nil { + return "", fmt.Errorf("error marshalling manager identifier: %v", err) + } + + return string(b), nil +} + +func decodeVersionedSet(encodedVersionedSet *metav1.ManagedFieldsEntry) (versionedSet fieldpath.VersionedSet, err error) { + fields := EmptyFields + if encodedVersionedSet.FieldsV1 != nil { + fields = *encodedVersionedSet.FieldsV1 + } + set, err := FieldsToSet(fields) + if err != nil { + return nil, fmt.Errorf("error decoding set: %v", err) + } + return fieldpath.NewVersionedSet(&set, fieldpath.APIVersion(encodedVersionedSet.APIVersion), encodedVersionedSet.Operation == metav1.ManagedFieldsOperationApply), nil +} + +// encodeManagedFields converts ManagedFields from the format used by +// sigs.k8s.io/structured-merge-diff to the wire format (api format) +func encodeManagedFields(managed ManagedInterface) (encodedManagedFields []metav1.ManagedFieldsEntry, err error) { + if len(managed.Fields()) == 0 { + return nil, nil + } + encodedManagedFields = []metav1.ManagedFieldsEntry{} + for manager := range managed.Fields() { + versionedSet := managed.Fields()[manager] + v, err := encodeManagerVersionedSet(manager, versionedSet) + if err != nil { + return nil, fmt.Errorf("error encoding versioned set for %v: %v", manager, err) + } + if t, ok := managed.Times()[manager]; ok { + v.Time = t + } + encodedManagedFields = append(encodedManagedFields, *v) + } + return sortEncodedManagedFields(encodedManagedFields) +} + +func sortEncodedManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (sortedManagedFields []metav1.ManagedFieldsEntry, err error) { + sort.Slice(encodedManagedFields, func(i, j int) bool { + p, q := encodedManagedFields[i], encodedManagedFields[j] + + if p.Operation != q.Operation { + return p.Operation < q.Operation + } + + pSeconds, qSeconds := int64(0), int64(0) + if p.Time != nil { + pSeconds = p.Time.Unix() + } + if q.Time != nil { + qSeconds = q.Time.Unix() + } + if pSeconds != qSeconds { + return pSeconds < qSeconds + } + + if p.Manager != q.Manager { + return p.Manager < q.Manager + } + return p.APIVersion < q.APIVersion + }) + + return encodedManagedFields, nil +} + +func encodeManagerVersionedSet(manager string, versionedSet fieldpath.VersionedSet) (encodedVersionedSet *metav1.ManagedFieldsEntry, err error) { + encodedVersionedSet = &metav1.ManagedFieldsEntry{} + + // Get as many fields as we can from the manager identifier + err = json.Unmarshal([]byte(manager), encodedVersionedSet) + if err != nil { + return nil, fmt.Errorf("error unmarshalling manager identifier %v: %v", manager, err) + } + + // Get the APIVersion, Operation, and Fields from the VersionedSet + encodedVersionedSet.APIVersion = string(versionedSet.APIVersion()) + if versionedSet.Applied() { + encodedVersionedSet.Operation = metav1.ManagedFieldsOperationApply + } + encodedVersionedSet.FieldsType = "FieldsV1" + fields, err := SetToFields(*versionedSet.Set()) + if err != nil { + return nil, fmt.Errorf("error encoding set: %v", err) + } + encodedVersionedSet.FieldsV1 = &fields + + return encodedVersionedSet, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/pathelement.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/pathelement.go new file mode 100644 index 000000000..1954d65d3 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/pathelement.go @@ -0,0 +1,140 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +const ( + // Field indicates that the content of this path element is a field's name + Field = "f" + + // Value indicates that the content of this path element is a field's value + Value = "v" + + // Index indicates that the content of this path element is an index in an array + Index = "i" + + // Key indicates that the content of this path element is a key value map + Key = "k" + + // Separator separates the type of a path element from the contents + Separator = ":" +) + +// NewPathElement parses a serialized path element +func NewPathElement(s string) (fieldpath.PathElement, error) { + split := strings.SplitN(s, Separator, 2) + if len(split) < 2 { + return fieldpath.PathElement{}, fmt.Errorf("missing colon: %v", s) + } + switch split[0] { + case Field: + return fieldpath.PathElement{ + FieldName: &split[1], + }, nil + case Value: + val, err := value.FromJSON([]byte(split[1])) + if err != nil { + return fieldpath.PathElement{}, err + } + return fieldpath.PathElement{ + Value: &val, + }, nil + case Index: + i, err := strconv.Atoi(split[1]) + if err != nil { + return fieldpath.PathElement{}, err + } + return fieldpath.PathElement{ + Index: &i, + }, nil + case Key: + kv := map[string]json.RawMessage{} + err := json.Unmarshal([]byte(split[1]), &kv) + if err != nil { + return fieldpath.PathElement{}, err + } + fields := value.FieldList{} + for k, v := range kv { + b, err := json.Marshal(v) + if err != nil { + return fieldpath.PathElement{}, err + } + val, err := value.FromJSON(b) + if err != nil { + return fieldpath.PathElement{}, err + } + + fields = append(fields, value.Field{ + Name: k, + Value: val, + }) + } + return fieldpath.PathElement{ + Key: &fields, + }, nil + default: + // Ignore unknown key types + return fieldpath.PathElement{}, nil + } +} + +// PathElementString serializes a path element +func PathElementString(pe fieldpath.PathElement) (string, error) { + switch { + case pe.FieldName != nil: + return Field + Separator + *pe.FieldName, nil + case pe.Key != nil: + kv := map[string]json.RawMessage{} + for _, k := range *pe.Key { + b, err := value.ToJSON(k.Value) + if err != nil { + return "", err + } + m := json.RawMessage{} + err = json.Unmarshal(b, &m) + if err != nil { + return "", err + } + kv[k.Name] = m + } + b, err := json.Marshal(kv) + if err != nil { + return "", err + } + return Key + ":" + string(b), nil + case pe.Value != nil: + b, err := value.ToJSON(*pe.Value) + if err != nil { + return "", err + } + return Value + ":" + string(b), nil + case pe.Index != nil: + return Index + ":" + strconv.Itoa(*pe.Index), nil + default: + return "", errors.New("Invalid type of path element") + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedmanager.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedmanager.go new file mode 100644 index 000000000..4b07d462a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedmanager.go @@ -0,0 +1,172 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "encoding/json" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +type lastAppliedManager struct { + fieldManager Manager + typeConverter TypeConverter + objectConverter runtime.ObjectConvertor + groupVersion schema.GroupVersion +} + +var _ Manager = &lastAppliedManager{} + +// NewLastAppliedManager converts the client-side apply annotation to +// server-side apply managed fields +func NewLastAppliedManager(fieldManager Manager, typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, groupVersion schema.GroupVersion) Manager { + return &lastAppliedManager{ + fieldManager: fieldManager, + typeConverter: typeConverter, + objectConverter: objectConverter, + groupVersion: groupVersion, + } +} + +// Update implements Manager. +func (f *lastAppliedManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply will consider the last-applied annotation +// for upgrading an object managed by client-side apply to server-side apply +// without conflicts. +func (f *lastAppliedManager) Apply(liveObj, newObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + newLiveObj, newManaged, newErr := f.fieldManager.Apply(liveObj, newObj, managed, manager, force) + // Upgrade the client-side apply annotation only from kubectl server-side-apply. + // To opt-out of this behavior, users may specify a different field manager. + if manager != "kubectl" { + return newLiveObj, newManaged, newErr + } + + // Check if we have conflicts + if newErr == nil { + return newLiveObj, newManaged, newErr + } + conflicts, ok := newErr.(merge.Conflicts) + if !ok { + return newLiveObj, newManaged, newErr + } + conflictSet := conflictsToSet(conflicts) + + // Check if conflicts are allowed due to client-side apply, + // and if so, then force apply + allowedConflictSet, err := f.allowedConflictsFromLastApplied(liveObj) + if err != nil { + return newLiveObj, newManaged, newErr + } + if !conflictSet.Difference(allowedConflictSet).Empty() { + newConflicts := conflictsDifference(conflicts, allowedConflictSet) + return newLiveObj, newManaged, newConflicts + } + + return f.fieldManager.Apply(liveObj, newObj, managed, manager, true) +} + +func (f *lastAppliedManager) allowedConflictsFromLastApplied(liveObj runtime.Object) (*fieldpath.Set, error) { + var accessor, err = meta.Accessor(liveObj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + + // If there is no client-side apply annotation, then there is nothing to do + var annotations = accessor.GetAnnotations() + if annotations == nil { + return nil, fmt.Errorf("no last applied annotation") + } + var lastApplied, ok = annotations[corev1.LastAppliedConfigAnnotation] + if !ok || lastApplied == "" { + return nil, fmt.Errorf("no last applied annotation") + } + + liveObjVersioned, err := f.objectConverter.ConvertToVersion(liveObj, f.groupVersion) + if err != nil { + return nil, fmt.Errorf("failed to convert live obj to versioned: %v", err) + } + + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) + if err != nil { + return nil, fmt.Errorf("failed to convert live obj to typed: %v", err) + } + + var lastAppliedObj = &unstructured.Unstructured{Object: map[string]interface{}{}} + err = json.Unmarshal([]byte(lastApplied), lastAppliedObj) + if err != nil { + return nil, fmt.Errorf("failed to decode last applied obj: %v in '%s'", err, lastApplied) + } + + if lastAppliedObj.GetAPIVersion() != f.groupVersion.String() { + return nil, fmt.Errorf("expected version of last applied to match live object '%s', but got '%s': %v", f.groupVersion.String(), lastAppliedObj.GetAPIVersion(), err) + } + + lastAppliedObjTyped, err := f.typeConverter.ObjectToTyped(lastAppliedObj) + if err != nil { + return nil, fmt.Errorf("failed to convert last applied to typed: %v", err) + } + + lastAppliedObjFieldSet, err := lastAppliedObjTyped.ToFieldSet() + if err != nil { + return nil, fmt.Errorf("failed to create fieldset for last applied object: %v", err) + } + + comparison, err := lastAppliedObjTyped.Compare(liveObjTyped) + if err != nil { + return nil, fmt.Errorf("failed to compare last applied object and live object: %v", err) + } + + // Remove fields in last applied that are different, added, or missing in + // the live object. + // Because last-applied fields don't match the live object fields, + // then we don't own these fields. + lastAppliedObjFieldSet = lastAppliedObjFieldSet. + Difference(comparison.Modified). + Difference(comparison.Added). + Difference(comparison.Removed) + + return lastAppliedObjFieldSet, nil +} + +// TODO: replace with merge.Conflicts.ToSet() +func conflictsToSet(conflicts merge.Conflicts) *fieldpath.Set { + conflictSet := fieldpath.NewSet() + for _, conflict := range []merge.Conflict(conflicts) { + conflictSet.Insert(conflict.Path) + } + return conflictSet +} + +func conflictsDifference(conflicts merge.Conflicts, s *fieldpath.Set) merge.Conflicts { + newConflicts := []merge.Conflict{} + for _, conflict := range []merge.Conflict(conflicts) { + if !s.Has(conflict.Path) { + newConflicts = append(newConflicts, conflict) + } + } + return newConflicts +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedupdater.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedupdater.go new file mode 100644 index 000000000..91e2e9691 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedupdater.go @@ -0,0 +1,117 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +type lastAppliedUpdater struct { + fieldManager Manager +} + +var _ Manager = &lastAppliedUpdater{} + +// NewLastAppliedUpdater sets the client-side apply annotation up to date with +// server-side apply managed fields +func NewLastAppliedUpdater(fieldManager Manager) Manager { + return &lastAppliedUpdater{ + fieldManager: fieldManager, + } +} + +// Update implements Manager. +func (f *lastAppliedUpdater) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// server-side apply managed fields +func (f *lastAppliedUpdater) Apply(liveObj, newObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + liveObj, managed, err := f.fieldManager.Apply(liveObj, newObj, managed, manager, force) + if err != nil { + return liveObj, managed, err + } + + // Sync the client-side apply annotation only from kubectl server-side apply. + // To opt-out of this behavior, users may specify a different field manager. + // + // If the client-side apply annotation doesn't exist, + // then continue because we have no annotation to update + if manager == "kubectl" && hasLastApplied(liveObj) { + lastAppliedValue, err := buildLastApplied(newObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to build last-applied annotation: %v", err) + } + err = setLastApplied(liveObj, lastAppliedValue) + if err != nil { + return nil, nil, fmt.Errorf("failed to set last-applied annotation: %v", err) + } + } + return liveObj, managed, err +} + +func hasLastApplied(obj runtime.Object) bool { + var accessor, err = meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + var annotations = accessor.GetAnnotations() + if annotations == nil { + return false + } + _, ok := annotations[corev1.LastAppliedConfigAnnotation] + return ok +} + +func setLastApplied(obj runtime.Object, value string) error { + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + var annotations = accessor.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + annotations[corev1.LastAppliedConfigAnnotation] = value + accessor.SetAnnotations(annotations) + return nil +} + +func buildLastApplied(obj runtime.Object) (string, error) { + obj = obj.DeepCopyObject() + + var accessor, err = meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + + // Remove the annotation from the object before encoding the object + var annotations = accessor.GetAnnotations() + delete(annotations, corev1.LastAppliedConfigAnnotation) + accessor.SetAnnotations(annotations) + + lastApplied, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) + if err != nil { + return "", fmt.Errorf("couldn't encode object into last applied annotation: %v", err) + } + return string(lastApplied), nil +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/managedfieldsupdater.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/managedfieldsupdater.go new file mode 100644 index 000000000..9bee82a85 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/managedfieldsupdater.go @@ -0,0 +1,86 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +type managedFieldsUpdater struct { + fieldManager Manager +} + +var _ Manager = &managedFieldsUpdater{} + +// NewManagedFieldsUpdater is responsible for updating the managedfields +// in the object, updating the time of the operation as necessary. For +// updates, it uses a hard-coded manager to detect if things have +// changed, and swaps back the correct manager after the operation is +// done. +func NewManagedFieldsUpdater(fieldManager Manager) Manager { + return &managedFieldsUpdater{ + fieldManager: fieldManager, + } +} + +// Update implements Manager. +func (f *managedFieldsUpdater) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + self := "current-operation" + formerSet := managed.Fields()[manager] + object, managed, err := f.fieldManager.Update(liveObj, newObj, managed, self) + if err != nil { + return object, managed, err + } + + // If the current operation took any fields from anything, it means the object changed, + // so update the timestamp of the managedFieldsEntry and merge with any previous updates from the same manager + if vs, ok := managed.Fields()[self]; ok { + delete(managed.Fields(), self) + + if previous, ok := managed.Fields()[manager]; ok { + managed.Fields()[manager] = fieldpath.NewVersionedSet(vs.Set().Union(previous.Set()), vs.APIVersion(), vs.Applied()) + } else { + managed.Fields()[manager] = vs + } + // Update the time only if the manager's fieldSet has changed. + if formerSet == nil || !managed.Fields()[manager].Set().Equals(formerSet.Set()) { + managed.Times()[manager] = &metav1.Time{Time: time.Now().UTC()} + } + } + + return object, managed, nil +} + +// Apply implements Manager. +func (f *managedFieldsUpdater) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + formerManaged := managed.Fields().Copy() + object, managed, err := f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force) + if err != nil { + return object, managed, err + } + if object != nil || !managed.Fields().Equals(formerManaged) { + managed.Times()[fieldManager] = &metav1.Time{Time: time.Now().UTC()} + } + if object == nil { + object = liveObj + } + return object, managed, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/node.yaml b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/node.yaml new file mode 100644 index 000000000..13a14cf44 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/node.yaml @@ -0,0 +1,259 @@ +apiVersion: v1 +kind: Node +metadata: + annotations: + container.googleapis.com/instance_id: "123456789321654789" + node.alpha.kubernetes.io/ttl: "0" + volumes.kubernetes.io/controller-managed-attach-detach: "true" + creationTimestamp: "2019-07-09T16:17:29Z" + labels: + kubernetes.io/arch: amd64 + beta.kubernetes.io/fluentd-ds-ready: "true" + beta.kubernetes.io/instance-type: n1-standard-4 + kubernetes.io/os: linux + cloud.google.com/gke-nodepool: default-pool + cloud.google.com/gke-os-distribution: cos + failure-domain.beta.kubernetes.io/region: us-central1 + failure-domain.beta.kubernetes.io/zone: us-central1-b + kubernetes.io/hostname: node-default-pool-something + name: node-default-pool-something + resourceVersion: "211582541" + selfLink: /api/v1/nodes/node-default-pool-something + uid: 0c24d0e1-a265-11e9-abe4-42010a80026b +spec: + podCIDR: 10.0.0.1/24 + providerID: some-provider-id-of-some-sort +status: + addresses: + - address: 10.0.0.1 + type: InternalIP + - address: 192.168.0.1 + type: ExternalIP + - address: node-default-pool-something + type: Hostname + allocatable: + cpu: 3920m + ephemeral-storage: "104638878617" + hugepages-2Mi: "0" + memory: 12700100Ki + pods: "110" + capacity: + cpu: "4" + ephemeral-storage: 202086868Ki + hugepages-2Mi: "0" + memory: 15399364Ki + pods: "110" + conditions: + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:08Z" + message: containerd is functioning properly + reason: FrequentContainerdRestart + status: "False" + type: FrequentContainerdRestart + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:06Z" + message: docker overlay2 is functioning properly + reason: CorruptDockerOverlay2 + status: "False" + type: CorruptDockerOverlay2 + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:06Z" + message: node is functioning properly + reason: UnregisterNetDevice + status: "False" + type: FrequentUnregisterNetDevice + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:17:04Z" + message: kernel has no deadlock + reason: KernelHasNoDeadlock + status: "False" + type: KernelDeadlock + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:17:04Z" + message: Filesystem is not read-only + reason: FilesystemIsNotReadOnly + status: "False" + type: ReadonlyFilesystem + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:05Z" + message: kubelet is functioning properly + reason: FrequentKubeletRestart + status: "False" + type: FrequentKubeletRestart + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:06Z" + message: docker is functioning properly + reason: FrequentDockerRestart + status: "False" + type: FrequentDockerRestart + - lastHeartbeatTime: "2019-07-09T16:17:47Z" + lastTransitionTime: "2019-07-09T16:17:47Z" + message: RouteController created a route + reason: RouteCreated + status: "False" + type: NetworkUnavailable + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has sufficient disk space available + reason: KubeletHasSufficientDisk + status: "False" + type: OutOfDisk + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has sufficient memory available + reason: KubeletHasSufficientMemory + status: "False" + type: MemoryPressure + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has no disk pressure + reason: KubeletHasNoDiskPressure + status: "False" + type: DiskPressure + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has sufficient PID available + reason: KubeletHasSufficientPID + status: "False" + type: PIDPressure + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:49Z" + message: kubelet is posting ready status. AppArmor enabled + reason: KubeletReady + status: "True" + type: Ready + daemonEndpoints: + kubeletEndpoint: + Port: 10250 + images: + - names: + - grafana/grafana@sha256:80e5e113a984d74836aa16f5b4524012099436b1a50df293f00ac6377fb512c8 + - grafana/grafana:4.4.2 + sizeBytes: 287008013 + - names: + - k8s.gcr.io/node-problem-detector@sha256:f95cab985c26b2f46e9bd43283e0bfa88860c14e0fb0649266babe8b65e9eb2b + - k8s.gcr.io/node-problem-detector:v0.4.1 + sizeBytes: 286572743 + - names: + - grafana/grafana@sha256:7ff7f9b2501a5d55b55ce3f58d21771b1c5af1f2a4ab7dbf11bef7142aae7033 + - grafana/grafana:4.2.0 + sizeBytes: 277940263 + - names: + - influxdb@sha256:7dddf03376348876ed4bdf33d6dfa3326f45a2bae0930dbd80781a374eb519bc + - influxdb:1.2.2 + sizeBytes: 223948571 + - names: + - gcr.io/stackdriver-agents/stackdriver-logging-agent@sha256:f8d5231b67b9c53f60068b535a11811d29d1b3efd53d2b79f2a2591ea338e4f2 + - gcr.io/stackdriver-agents/stackdriver-logging-agent:0.6-1.6.0-1 + sizeBytes: 223242132 + - names: + - nginx@sha256:35779791c05d119df4fe476db8f47c0bee5943c83eba5656a15fc046db48178b + - nginx:1.10.1 + sizeBytes: 180708613 + - names: + - k8s.gcr.io/fluentd-elasticsearch@sha256:b8c94527b489fb61d3d81ce5ad7f3ddbb7be71e9620a3a36e2bede2f2e487d73 + - k8s.gcr.io/fluentd-elasticsearch:v2.0.4 + sizeBytes: 135716379 + - names: + - nginx@sha256:00be67d6ba53d5318cd91c57771530f5251cfbe028b7be2c4b70526f988cfc9f + - nginx:latest + sizeBytes: 109357355 + - names: + - k8s.gcr.io/kubernetes-dashboard-amd64@sha256:dc4026c1b595435ef5527ca598e1e9c4343076926d7d62b365c44831395adbd0 + - k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3 + sizeBytes: 102319441 + - names: + - gcr.io/google_containers/kube-proxy:v1.11.10-gke.5 + - k8s.gcr.io/kube-proxy:v1.11.10-gke.5 + sizeBytes: 102279340 + - names: + - k8s.gcr.io/event-exporter@sha256:7f9cd7cb04d6959b0aa960727d04fa86759008048c785397b7b0d9dff0007516 + - k8s.gcr.io/event-exporter:v0.2.3 + sizeBytes: 94171943 + - names: + - k8s.gcr.io/prometheus-to-sd@sha256:6c0c742475363d537ff059136e5d5e4ab1f512ee0fd9b7ca42ea48bc309d1662 + - k8s.gcr.io/prometheus-to-sd:v0.3.1 + sizeBytes: 88077694 + - names: + - k8s.gcr.io/fluentd-gcp-scaler@sha256:a5ace7506d393c4ed65eb2cbb6312c64ab357fcea16dff76b9055bc6e498e5ff + - k8s.gcr.io/fluentd-gcp-scaler:0.5.1 + sizeBytes: 86637208 + - names: + - k8s.gcr.io/heapster-amd64@sha256:9fae0af136ce0cf4f88393b3670f7139ffc464692060c374d2ae748e13144521 + - k8s.gcr.io/heapster-amd64:v1.6.0-beta.1 + sizeBytes: 76016169 + - names: + - k8s.gcr.io/ingress-glbc-amd64@sha256:31d36bbd9c44caffa135fc78cf0737266fcf25e3cf0cd1c2fcbfbc4f7309cc52 + - k8s.gcr.io/ingress-glbc-amd64:v1.1.1 + sizeBytes: 67801919 + - names: + - k8s.gcr.io/kube-addon-manager@sha256:d53486c3a0b49ebee019932878dc44232735d5622a51dbbdcec7124199020d09 + - k8s.gcr.io/kube-addon-manager:v8.7 + sizeBytes: 63322109 + - names: + - nginx@sha256:4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315 + - nginx:1.10-alpine + sizeBytes: 54042627 + - names: + - k8s.gcr.io/cpvpa-amd64@sha256:cfe7b0a11c9c8e18c87b1eb34fef9a7cbb8480a8da11fc2657f78dbf4739f869 + - k8s.gcr.io/cpvpa-amd64:v0.6.0 + sizeBytes: 51785854 + - names: + - k8s.gcr.io/cluster-proportional-autoscaler-amd64@sha256:003f98d9f411ddfa6ff6d539196355e03ddd69fa4ed38c7ffb8fec6f729afe2d + - k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2 + sizeBytes: 49648481 + - names: + - k8s.gcr.io/ip-masq-agent-amd64@sha256:1ffda57d87901bc01324c82ceb2145fe6a0448d3f0dd9cb65aa76a867cd62103 + - k8s.gcr.io/ip-masq-agent-amd64:v2.1.1 + sizeBytes: 49612505 + - names: + - k8s.gcr.io/k8s-dns-kube-dns-amd64@sha256:b99fc3eee2a9f052f7eb4cc00f15eb12fc405fa41019baa2d6b79847ae7284a8 + - k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10 + sizeBytes: 49549457 + - names: + - k8s.gcr.io/rescheduler@sha256:156cfbfd05a5a815206fd2eeb6cbdaf1596d71ea4b415d3a6c43071dd7b99450 + - k8s.gcr.io/rescheduler:v0.4.0 + sizeBytes: 48973149 + - names: + - k8s.gcr.io/event-exporter@sha256:16ca66e2b5dc7a1ce6a5aafcb21d0885828b75cdfc08135430480f7ad2364adc + - k8s.gcr.io/event-exporter:v0.2.4 + sizeBytes: 47261019 + - names: + - k8s.gcr.io/coredns@sha256:db2bf53126ed1c761d5a41f24a1b82a461c85f736ff6e90542e9522be4757848 + - k8s.gcr.io/coredns:1.1.3 + sizeBytes: 45587362 + - names: + - prom/prometheus@sha256:483f4c9d7733699ba79facca9f8bcce1cef1af43dfc3e7c5a1882aa85f53cb74 + - prom/prometheus:v1.1.3 + sizeBytes: 45493941 + nodeInfo: + architecture: amd64 + bootID: a32eca78-4ad4-4b76-9252-f143d6c2ae61 + containerRuntimeVersion: docker://17.3.2 + kernelVersion: 4.14.127+ + kubeProxyVersion: v1.11.10-gke.5 + kubeletVersion: v1.11.10-gke.5 + machineID: 1739555e5b231057f0f9a0b5fa29511b + operatingSystem: linux + osImage: Container-Optimized OS from Google + systemUUID: 1739555E-5B23-1057-F0F9-A0B5FA29511B + volumesAttached: + - devicePath: /dev/disk/by-id/b9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049 + - devicePath: /dev/disk/by-id/b9772-pvc-8895a852-fd42-11e6-94d4-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-8895a852-fd42-11e6-94d4-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283 + volumesInUse: + - kubernetes.io/pd/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-8895a852-fd42-11e6-94d4-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283 + - kubernetes.io/pd/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049 diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/pod.yaml b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/pod.yaml new file mode 100644 index 000000000..3fb0877d6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/pod.yaml @@ -0,0 +1,121 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + app: some-app + plugin1: some-value + plugin2: some-value + plugin3: some-value + plugin4: some-value + name: some-name + namespace: default + ownerReferences: + - apiVersion: apps/v1 + blockOwnerDeletion: true + controller: true + kind: ReplicaSet + name: some-name + uid: 0a9d2b9e-779e-11e7-b422-42010a8001be +spec: + containers: + - args: + - one + - two + - three + - four + - five + - six + - seven + - eight + - nine + env: + - name: VAR_3 + valueFrom: + secretKeyRef: + key: some-other-key + name: some-oher-name + - name: VAR_2 + valueFrom: + secretKeyRef: + key: other-key + name: other-name + - name: VAR_1 + valueFrom: + secretKeyRef: + key: some-key + name: some-name + image: some-image-name + imagePullPolicy: IfNotPresent + name: some-name + resources: + requests: + cpu: '0' + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: default-token-hu5jz + readOnly: true + dnsPolicy: ClusterFirst + nodeName: node-name + priority: 0 + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 300 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 300 + volumes: + - name: default-token-hu5jz + secret: + defaultMode: 420 + secretName: default-token-hu5jz +status: + conditions: + - lastProbeTime: null + lastTransitionTime: '2019-07-08T09:31:18Z' + status: 'True' + type: Initialized + - lastProbeTime: null + lastTransitionTime: '2019-07-08T09:41:59Z' + status: 'True' + type: Ready + - lastProbeTime: null + lastTransitionTime: null + status: 'True' + type: ContainersReady + - lastProbeTime: null + lastTransitionTime: '2019-07-08T09:31:18Z' + status: 'True' + type: PodScheduled + containerStatuses: + - containerID: docker://885e82a1ed0b7356541bb410a0126921ac42439607c09875cd8097dd5d7b5376 + image: some-image-name + imageID: docker-pullable://some-image-id + lastState: + terminated: + containerID: docker://d57290f9e00fad626b20d2dd87a3cf69bbc22edae07985374f86a8b2b4e39565 + exitCode: 255 + finishedAt: '2019-07-08T09:39:09Z' + reason: Error + startedAt: '2019-07-08T09:38:54Z' + name: name + ready: true + restartCount: 6 + state: + running: + startedAt: '2019-07-08T09:41:59Z' + hostIP: 10.0.0.1 + phase: Running + podIP: 10.0.0.1 + qosClass: BestEffort + startTime: '2019-07-08T09:31:18Z' diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/skipnonapplied.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/skipnonapplied.go new file mode 100644 index 000000000..a8c34ad65 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/skipnonapplied.go @@ -0,0 +1,91 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "fmt" + "math/rand" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type skipNonAppliedManager struct { + fieldManager Manager + objectCreater runtime.ObjectCreater + gvk schema.GroupVersionKind + beforeApplyManagerName string + probability float32 +} + +var _ Manager = &skipNonAppliedManager{} + +// NewSkipNonAppliedManager creates a new wrapped FieldManager that only starts tracking managers after the first apply. +func NewSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater, gvk schema.GroupVersionKind) Manager { + return NewProbabilisticSkipNonAppliedManager(fieldManager, objectCreater, gvk, 0.0) +} + +// NewProbabilisticSkipNonAppliedManager creates a new wrapped FieldManager that starts tracking managers after the first apply, +// or starts tracking on create with p probability. +func NewProbabilisticSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater, gvk schema.GroupVersionKind, p float32) Manager { + return &skipNonAppliedManager{ + fieldManager: fieldManager, + objectCreater: objectCreater, + gvk: gvk, + beforeApplyManagerName: "before-first-apply", + probability: p, + } +} + +// Update implements Manager. +func (f *skipNonAppliedManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + accessor, err := meta.Accessor(liveObj) + if err != nil { + return newObj, managed, nil + } + + // If managed fields is empty, we need to determine whether to skip tracking managed fields. + if len(managed.Fields()) == 0 { + // Check if the operation is a create, by checking whether lastObj's UID is empty. + // If the operation is create, P(tracking managed fields) = f.probability + // If the operation is update, skip tracking managed fields, since we already know managed fields is empty. + if len(accessor.GetUID()) == 0 { + if f.probability <= rand.Float32() { + return newObj, managed, nil + } + } else { + return newObj, managed, nil + } + } + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply implements Manager. +func (f *skipNonAppliedManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + if len(managed.Fields()) == 0 { + emptyObj, err := f.objectCreater.New(f.gvk) + if err != nil { + return nil, nil, fmt.Errorf("failed to create empty object of type %v: %v", f.gvk, err) + } + liveObj, managed, err = f.fieldManager.Update(emptyObj, liveObj, managed, f.beforeApplyManagerName) + if err != nil { + return nil, nil, fmt.Errorf("failed to create manager for existing fields: %v", err) + } + } + return f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/stripmeta.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/stripmeta.go new file mode 100644 index 000000000..1460d9c80 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/stripmeta.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +type stripMetaManager struct { + fieldManager Manager + + // stripSet is the list of fields that should never be part of a mangedFields. + stripSet *fieldpath.Set +} + +var _ Manager = &stripMetaManager{} + +// NewStripMetaManager creates a new Manager that strips metadata and typemeta fields from the manager's fieldset. +func NewStripMetaManager(fieldManager Manager) Manager { + return &stripMetaManager{ + fieldManager: fieldManager, + stripSet: fieldpath.NewSet( + fieldpath.MakePathOrDie("apiVersion"), + fieldpath.MakePathOrDie("kind"), + fieldpath.MakePathOrDie("metadata"), + fieldpath.MakePathOrDie("metadata", "name"), + fieldpath.MakePathOrDie("metadata", "namespace"), + fieldpath.MakePathOrDie("metadata", "creationTimestamp"), + fieldpath.MakePathOrDie("metadata", "selfLink"), + fieldpath.MakePathOrDie("metadata", "uid"), + fieldpath.MakePathOrDie("metadata", "clusterName"), + fieldpath.MakePathOrDie("metadata", "generation"), + fieldpath.MakePathOrDie("metadata", "managedFields"), + fieldpath.MakePathOrDie("metadata", "resourceVersion"), + ), + } +} + +// Update implements Manager. +func (f *stripMetaManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + newObj, managed, err := f.fieldManager.Update(liveObj, newObj, managed, manager) + if err != nil { + return nil, nil, err + } + f.stripFields(managed.Fields(), manager) + return newObj, managed, nil +} + +// Apply implements Manager. +func (f *stripMetaManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + newObj, managed, err := f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force) + if err != nil { + return nil, nil, err + } + f.stripFields(managed.Fields(), manager) + return newObj, managed, nil +} + +// stripFields removes a predefined set of paths found in typed from managed +func (f *stripMetaManager) stripFields(managed fieldpath.ManagedFields, manager string) { + vs, ok := managed[manager] + if ok { + if vs == nil { + panic(fmt.Sprintf("Found unexpected nil manager which should never happen: %s", manager)) + } + newSet := vs.Set().Difference(f.stripSet) + if newSet.Empty() { + delete(managed, manager) + } else { + managed[manager] = fieldpath.NewVersionedSet(newSet, vs.APIVersion(), vs.Applied()) + } + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go new file mode 100644 index 000000000..216a39cf7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go @@ -0,0 +1,171 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +type structuredMergeManager struct { + typeConverter TypeConverter + objectConverter runtime.ObjectConvertor + objectDefaulter runtime.ObjectDefaulter + groupVersion schema.GroupVersion + hubVersion schema.GroupVersion + updater merge.Updater +} + +var _ Manager = &structuredMergeManager{} + +// NewStructuredMergeManager creates a new Manager that merges apply requests +// and update managed fields for other types of requests. +func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion) (Manager, error) { + return &structuredMergeManager{ + typeConverter: typeConverter, + objectConverter: objectConverter, + objectDefaulter: objectDefaulter, + groupVersion: gv, + hubVersion: hub, + updater: merge.Updater{ + Converter: newVersionConverter(typeConverter, objectConverter, hub), // This is the converter provided to SMD from k8s + }, + }, nil +} + +// NewCRDStructuredMergeManager creates a new Manager specifically for +// CRDs. This allows for the possibility of fields which are not defined +// in models, as well as having no models defined at all. +func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion) (_ Manager, err error) { + return &structuredMergeManager{ + typeConverter: typeConverter, + objectConverter: objectConverter, + objectDefaulter: objectDefaulter, + groupVersion: gv, + hubVersion: hub, + updater: merge.Updater{ + Converter: newCRDVersionConverter(typeConverter, objectConverter, hub), + }, + }, nil +} + +// Update implements Manager. +func (f *structuredMergeManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + newObjVersioned, err := f.toVersioned(newObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new object to proper version: %v", err) + } + liveObjVersioned, err := f.toVersioned(liveObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert live object to proper version: %v", err) + } + newObjTyped, err := f.typeConverter.ObjectToTyped(newObjVersioned) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new object (%v) to smd typed: %v", newObjVersioned.GetObjectKind().GroupVersionKind(), err) + } + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert live object (%v) to smd typed: %v", liveObjVersioned.GetObjectKind().GroupVersionKind(), err) + } + apiVersion := fieldpath.APIVersion(f.groupVersion.String()) + + // TODO(apelisse) use the first return value when unions are implemented + _, managedFields, err := f.updater.Update(liveObjTyped, newObjTyped, apiVersion, managed.Fields(), manager) + if err != nil { + return nil, nil, fmt.Errorf("failed to update ManagedFields: %v", err) + } + managed = internal.NewManaged(managedFields, managed.Times()) + + return newObj, managed, nil +} + +// Apply implements Manager. +func (f *structuredMergeManager) Apply(liveObj, patchObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + // Check that the patch object has the same version as the live object + if patchVersion := patchObj.GetObjectKind().GroupVersionKind().GroupVersion(); patchVersion != f.groupVersion { + return nil, nil, + errors.NewBadRequest( + fmt.Sprintf("Incorrect version specified in apply patch. "+ + "Specified patch version: %s, expected: %s", + patchVersion, f.groupVersion)) + } + + patchObjMeta, err := meta.Accessor(patchObj) + if err != nil { + return nil, nil, fmt.Errorf("couldn't get accessor: %v", err) + } + if patchObjMeta.GetManagedFields() != nil { + return nil, nil, errors.NewBadRequest(fmt.Sprintf("metadata.managedFields must be nil")) + } + + liveObjVersioned, err := f.toVersioned(liveObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert live object to proper version: %v", err) + } + + patchObjTyped, err := f.typeConverter.ObjectToTyped(patchObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to create typed patch object: %v", err) + } + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) + if err != nil { + return nil, nil, fmt.Errorf("failed to create typed live object: %v", err) + } + + apiVersion := fieldpath.APIVersion(f.groupVersion.String()) + newObjTyped, managedFields, err := f.updater.Apply(liveObjTyped, patchObjTyped, apiVersion, managed.Fields(), manager, force) + if err != nil { + return nil, nil, err + } + managed = internal.NewManaged(managedFields, managed.Times()) + + if newObjTyped == nil { + return nil, managed, nil + } + + newObj, err := f.typeConverter.TypedToObject(newObjTyped) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new typed object to object: %v", err) + } + + newObjVersioned, err := f.toVersioned(newObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new object to proper version: %v", err) + } + f.objectDefaulter.Default(newObjVersioned) + + newObjUnversioned, err := f.toUnversioned(newObjVersioned) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert to unversioned: %v", err) + } + return newObjUnversioned, managed, nil +} + +func (f *structuredMergeManager) toVersioned(obj runtime.Object) (runtime.Object, error) { + return f.objectConverter.ConvertToVersion(obj, f.groupVersion) +} + +func (f *structuredMergeManager) toUnversioned(obj runtime.Object) (runtime.Object, error) { + return f.objectConverter.ConvertToVersion(obj, f.hubVersion) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/typeconverter.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/typeconverter.go new file mode 100644 index 000000000..8bb2f0c9e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/typeconverter.go @@ -0,0 +1,130 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" + "k8s.io/kube-openapi/pkg/util/proto" + "sigs.k8s.io/structured-merge-diff/v4/typed" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// TypeConverter allows you to convert from runtime.Object to +// typed.TypedValue and the other way around. +type TypeConverter interface { + ObjectToTyped(runtime.Object) (*typed.TypedValue, error) + TypedToObject(*typed.TypedValue) (runtime.Object, error) +} + +// DeducedTypeConverter is a TypeConverter for CRDs that don't have a +// schema. It does implement the same interface though (and create the +// same types of objects), so that everything can still work the same. +// CRDs are merged with all their fields being "atomic" (lists +// included). +// +// Note that this is not going to be sufficient for converting to/from +// CRDs that have a schema defined (we don't support that schema yet). +// TODO(jennybuckley): Use the schema provided by a CRD if it exists. +type DeducedTypeConverter struct{} + +var _ TypeConverter = DeducedTypeConverter{} + +// ObjectToTyped converts an object into a TypedValue with a "deduced type". +func (DeducedTypeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) { + switch o := obj.(type) { + case *unstructured.Unstructured: + return typed.DeducedParseableType.FromUnstructured(o.UnstructuredContent()) + default: + return typed.DeducedParseableType.FromStructured(obj) + } +} + +// TypedToObject transforms the typed value into a runtime.Object. That +// is not specific to deduced type. +func (DeducedTypeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) { + return valueToObject(value.AsValue()) +} + +type typeConverter struct { + parser *internal.GvkParser +} + +var _ TypeConverter = &typeConverter{} + +// NewTypeConverter builds a TypeConverter from a proto.Models. This +// will automatically find the proper version of the object, and the +// corresponding schema information. +func NewTypeConverter(models proto.Models, preserveUnknownFields bool) (TypeConverter, error) { + parser, err := internal.NewGVKParser(models, preserveUnknownFields) + if err != nil { + return nil, err + } + return &typeConverter{parser: parser}, nil +} + +func (c *typeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) { + gvk := obj.GetObjectKind().GroupVersionKind() + t := c.parser.Type(gvk) + if t == nil { + return nil, newNoCorrespondingTypeError(gvk) + } + switch o := obj.(type) { + case *unstructured.Unstructured: + return t.FromUnstructured(o.UnstructuredContent()) + default: + return t.FromStructured(obj) + } +} + +func (c *typeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) { + return valueToObject(value.AsValue()) +} + +func valueToObject(val value.Value) (runtime.Object, error) { + vu := val.Unstructured() + switch o := vu.(type) { + case map[string]interface{}: + return &unstructured.Unstructured{Object: o}, nil + default: + return nil, fmt.Errorf("failed to convert value to unstructured for type %T", vu) + } +} + +type noCorrespondingTypeErr struct { + gvk schema.GroupVersionKind +} + +func newNoCorrespondingTypeError(gvk schema.GroupVersionKind) error { + return &noCorrespondingTypeErr{gvk: gvk} +} + +func (k *noCorrespondingTypeErr) Error() string { + return fmt.Sprintf("no corresponding type for %v", k.gvk) +} + +func isNoCorrespondingTypeError(err error) bool { + if err == nil { + return false + } + _, ok := err.(*noCorrespondingTypeErr) + return ok +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/versionconverter.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/versionconverter.go new file mode 100644 index 000000000..477e92f79 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/versionconverter.go @@ -0,0 +1,101 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" + "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +// versionConverter is an implementation of +// sigs.k8s.io/structured-merge-diff/merge.Converter +type versionConverter struct { + typeConverter TypeConverter + objectConvertor runtime.ObjectConvertor + hubGetter func(from schema.GroupVersion) schema.GroupVersion +} + +var _ merge.Converter = &versionConverter{} + +// NewVersionConverter builds a VersionConverter from a TypeConverter and an ObjectConvertor. +func newVersionConverter(t TypeConverter, o runtime.ObjectConvertor, h schema.GroupVersion) merge.Converter { + return &versionConverter{ + typeConverter: t, + objectConvertor: o, + hubGetter: func(from schema.GroupVersion) schema.GroupVersion { + return schema.GroupVersion{ + Group: from.Group, + Version: h.Version, + } + }, + } +} + +// NewCRDVersionConverter builds a VersionConverter for CRDs from a TypeConverter and an ObjectConvertor. +func newCRDVersionConverter(t TypeConverter, o runtime.ObjectConvertor, h schema.GroupVersion) merge.Converter { + return &versionConverter{ + typeConverter: t, + objectConvertor: o, + hubGetter: func(from schema.GroupVersion) schema.GroupVersion { + return h + }, + } +} + +// Convert implements sigs.k8s.io/structured-merge-diff/merge.Converter +func (v *versionConverter) Convert(object *typed.TypedValue, version fieldpath.APIVersion) (*typed.TypedValue, error) { + // Convert the smd typed value to a kubernetes object. + objectToConvert, err := v.typeConverter.TypedToObject(object) + if err != nil { + return object, err + } + + // Parse the target groupVersion. + groupVersion, err := schema.ParseGroupVersion(string(version)) + if err != nil { + return object, err + } + + // If attempting to convert to the same version as we already have, just return it. + fromVersion := objectToConvert.GetObjectKind().GroupVersionKind().GroupVersion() + if fromVersion == groupVersion { + return object, nil + } + + // Convert to internal + internalObject, err := v.objectConvertor.ConvertToVersion(objectToConvert, v.hubGetter(fromVersion)) + if err != nil { + return object, err + } + + // Convert the object into the target version + convertedObject, err := v.objectConvertor.ConvertToVersion(internalObject, groupVersion) + if err != nil { + return object, err + } + + // Convert the object back to a smd typed value and return it. + return v.typeConverter.ObjectToTyped(convertedObject) +} + +// IsMissingVersionError +func (v *versionConverter) IsMissingVersionError(err error) bool { + return runtime.IsNotRegisteredError(err) || isNoCorrespondingTypeError(err) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go new file mode 100644 index 000000000..c3f6e4cbe --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go @@ -0,0 +1,287 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "context" + "fmt" + metainternalversionvalidation "k8s.io/apimachinery/pkg/apis/meta/internalversion/validation" + "k8s.io/apimachinery/pkg/runtime/schema" + "math/rand" + "net/http" + "net/url" + "strings" + "time" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/metrics" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/registry/rest" + utiltrace "k8s.io/utils/trace" +) + +// getterFunc performs a get request with the given context and object name. The request +// may be used to deserialize an options object to pass to the getter. +type getterFunc func(ctx context.Context, name string, req *http.Request, trace *utiltrace.Trace) (runtime.Object, error) + +// getResourceHandler is an HTTP handler function for get requests. It delegates to the +// passed-in getterFunc to perform the actual get. +func getResourceHandler(scope *RequestScope, getter getterFunc) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + trace := utiltrace.New("Get", utiltrace.Field{Key: "url", Value: req.URL.Path}, utiltrace.Field{Key: "user-agent", Value: &lazyTruncatedUserAgent{req}}, utiltrace.Field{Key: "client", Value: &lazyClientIP{req}}) + defer trace.LogIfLong(500 * time.Millisecond) + + namespace, name, err := scope.Namer.Name(req) + if err != nil { + scope.err(err, w, req) + return + } + ctx := req.Context() + ctx = request.WithNamespace(ctx, namespace) + + outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) + if err != nil { + scope.err(err, w, req) + return + } + + result, err := getter(ctx, name, req, trace) + if err != nil { + scope.err(err, w, req) + return + } + + trace.Step("About to write a response") + transformResponseObject(ctx, scope, trace, req, w, http.StatusOK, outputMediaType, result) + trace.Step("Transformed response object") + } +} + +// GetResource returns a function that handles retrieving a single resource from a rest.Storage object. +func GetResource(r rest.Getter, e rest.Exporter, scope *RequestScope) http.HandlerFunc { + return getResourceHandler(scope, + func(ctx context.Context, name string, req *http.Request, trace *utiltrace.Trace) (runtime.Object, error) { + // check for export + options := metav1.GetOptions{} + if values := req.URL.Query(); len(values) > 0 { + exports := metav1.ExportOptions{} + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, &exports); err != nil { + err = errors.NewBadRequest(err.Error()) + return nil, err + } + if exports.Export { + if e == nil { + return nil, errors.NewBadRequest(fmt.Sprintf("export of %q is not supported", scope.Resource.Resource)) + } + return e.Export(ctx, name, exports) + } + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, &options); err != nil { + err = errors.NewBadRequest(err.Error()) + return nil, err + } + } + if trace != nil { + trace.Step("About to Get from storage") + } + return r.Get(ctx, name, &options) + }) +} + +// GetResourceWithOptions returns a function that handles retrieving a single resource from a rest.Storage object. +func GetResourceWithOptions(r rest.GetterWithOptions, scope *RequestScope, isSubresource bool) http.HandlerFunc { + return getResourceHandler(scope, + func(ctx context.Context, name string, req *http.Request, trace *utiltrace.Trace) (runtime.Object, error) { + opts, subpath, subpathKey := r.NewGetOptions() + trace.Step("About to process Get options") + if err := getRequestOptions(req, scope, opts, subpath, subpathKey, isSubresource); err != nil { + err = errors.NewBadRequest(err.Error()) + return nil, err + } + if trace != nil { + trace.Step("About to Get from storage") + } + return r.Get(ctx, name, opts) + }) +} + +// getRequestOptions parses out options and can include path information. The path information shouldn't include the subresource. +func getRequestOptions(req *http.Request, scope *RequestScope, into runtime.Object, subpath bool, subpathKey string, isSubresource bool) error { + if into == nil { + return nil + } + + query := req.URL.Query() + if subpath { + newQuery := make(url.Values) + for k, v := range query { + newQuery[k] = v + } + + ctx := req.Context() + requestInfo, _ := request.RequestInfoFrom(ctx) + startingIndex := 2 + if isSubresource { + startingIndex = 3 + } + + p := strings.Join(requestInfo.Parts[startingIndex:], "/") + + // ensure non-empty subpaths correctly reflect a leading slash + if len(p) > 0 && !strings.HasPrefix(p, "/") { + p = "/" + p + } + + // ensure subpaths correctly reflect the presence of a trailing slash on the original request + if strings.HasSuffix(requestInfo.Path, "/") && !strings.HasSuffix(p, "/") { + p += "/" + } + + newQuery[subpathKey] = []string{p} + query = newQuery + } + return scope.ParameterCodec.DecodeParameters(query, scope.Kind.GroupVersion(), into) +} + +func ListResource(r rest.Lister, rw rest.Watcher, scope *RequestScope, forceWatch bool, minRequestTimeout time.Duration) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + // For performance tracking purposes. + trace := utiltrace.New("List", utiltrace.Field{Key: "url", Value: req.URL.Path}, utiltrace.Field{Key: "user-agent", Value: &lazyTruncatedUserAgent{req}}, utiltrace.Field{Key: "client", Value: &lazyClientIP{req}}) + + namespace, err := scope.Namer.Namespace(req) + if err != nil { + scope.err(err, w, req) + return + } + + // Watches for single objects are routed to this function. + // Treat a name parameter the same as a field selector entry. + hasName := true + _, name, err := scope.Namer.Name(req) + if err != nil { + hasName = false + } + + ctx := req.Context() + ctx = request.WithNamespace(ctx, namespace) + + outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) + if err != nil { + scope.err(err, w, req) + return + } + + opts := metainternalversion.ListOptions{} + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, &opts); err != nil { + err = errors.NewBadRequest(err.Error()) + scope.err(err, w, req) + return + } + + if errs := metainternalversionvalidation.ValidateListOptions(&opts); len(errs) > 0 { + err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "ListOptions"}, "", errs) + scope.err(err, w, req) + return + } + + // transform fields + // TODO: DecodeParametersInto should do this. + if opts.FieldSelector != nil { + fn := func(label, value string) (newLabel, newValue string, err error) { + return scope.Convertor.ConvertFieldLabel(scope.Kind, label, value) + } + if opts.FieldSelector, err = opts.FieldSelector.Transform(fn); err != nil { + // TODO: allow bad request to set field causes based on query parameters + err = errors.NewBadRequest(err.Error()) + scope.err(err, w, req) + return + } + } + + if hasName { + // metadata.name is the canonical internal name. + // SelectionPredicate will notice that this is a request for + // a single object and optimize the storage query accordingly. + nameSelector := fields.OneTermEqualSelector("metadata.name", name) + + // Note that fieldSelector setting explicitly the "metadata.name" + // will result in reaching this branch (as the value of that field + // is propagated to requestInfo as the name parameter. + // That said, the allowed field selectors in this branch are: + // nil, fields.Everything and field selector matching metadata.name + // for our name. + if opts.FieldSelector != nil && !opts.FieldSelector.Empty() { + selectedName, ok := opts.FieldSelector.RequiresExactMatch("metadata.name") + if !ok || name != selectedName { + scope.err(errors.NewBadRequest("fieldSelector metadata.name doesn't match requested name"), w, req) + return + } + } else { + opts.FieldSelector = nameSelector + } + } + + if opts.Watch || forceWatch { + if rw == nil { + scope.err(errors.NewMethodNotSupported(scope.Resource.GroupResource(), "watch"), w, req) + return + } + // TODO: Currently we explicitly ignore ?timeout= and use only ?timeoutSeconds=. + timeout := time.Duration(0) + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + if timeout == 0 && minRequestTimeout > 0 { + timeout = time.Duration(float64(minRequestTimeout) * (rand.Float64() + 1.0)) + } + klog.V(3).InfoS("Starting watch", "path", req.URL.Path, "resourceVersion", opts.ResourceVersion, "labels", opts.LabelSelector, "fields", opts.FieldSelector, "timeout", timeout) + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + watcher, err := rw.Watch(ctx, &opts) + if err != nil { + scope.err(err, w, req) + return + } + requestInfo, _ := request.RequestInfoFrom(ctx) + metrics.RecordLongRunning(req, requestInfo, metrics.APIServerComponent, func() { + serveWatch(watcher, scope, outputMediaType, req, w, timeout) + }) + return + } + + // Log only long List requests (ignore Watch). + defer trace.LogIfLong(500 * time.Millisecond) + trace.Step("About to List from storage") + result, err := r.List(ctx, &opts) + if err != nil { + scope.err(err, w, req) + return + } + trace.Step("Listing from storage done") + + transformResponseObject(ctx, scope, trace, req, w, http.StatusOK, outputMediaType, result) + trace.Step("Writing http response done", utiltrace.Field{"count", meta.LenList(result)}) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/helpers.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/helpers.go new file mode 100644 index 000000000..82170e050 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/helpers.go @@ -0,0 +1,60 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "net/http" + + utilnet "k8s.io/apimachinery/pkg/util/net" +) + +const ( + maxUserAgentLength = 1024 + userAgentTruncateSuffix = "...TRUNCATED" +) + +// lazyTruncatedUserAgent implements String() string and it will +// return user-agent which may be truncated. +type lazyTruncatedUserAgent struct { + req *http.Request +} + +func (lazy *lazyTruncatedUserAgent) String() string { + ua := "unknown" + if lazy.req != nil { + ua = utilnet.GetHTTPClient(lazy.req) + if len(ua) > maxUserAgentLength { + ua = ua[:maxUserAgentLength] + userAgentTruncateSuffix + } + } + return ua +} + +// LazyClientIP implements String() string and it will +// calls GetClientIP() lazily only when required. +type lazyClientIP struct { + req *http.Request +} + +func (lazy *lazyClientIP) String() string { + if lazy.req != nil { + if ip := utilnet.GetClientIP(lazy.req); ip != nil { + return ip.String() + } + } + return "unknown" +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/namer.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/namer.go new file mode 100644 index 000000000..755da22ee --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/namer.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/endpoints/request" +) + +// ScopeNamer handles accessing names from requests and objects +type ScopeNamer interface { + // Namespace returns the appropriate namespace value from the request (may be empty) or an + // error. + Namespace(req *http.Request) (namespace string, err error) + // Name returns the name from the request, and an optional namespace value if this is a namespace + // scoped call. An error is returned if the name is not available. + Name(req *http.Request) (namespace, name string, err error) + // ObjectName returns the namespace and name from an object if they exist, or an error if the object + // does not support names. + ObjectName(obj runtime.Object) (namespace, name string, err error) + // SetSelfLink sets the provided URL onto the object. The method should return nil if the object + // does not support selfLinks. + SetSelfLink(obj runtime.Object, url string) error + // GenerateLink creates an encoded URI for a given runtime object that represents the canonical path + // and query. + GenerateLink(requestInfo *request.RequestInfo, obj runtime.Object) (uri string, err error) + // GenerateListLink creates an encoded URI for a list that represents the canonical path and query. + GenerateListLink(req *http.Request) (uri string, err error) +} + +type ContextBasedNaming struct { + SelfLinker runtime.SelfLinker + ClusterScoped bool + + SelfLinkPathPrefix string + SelfLinkPathSuffix string +} + +// ContextBasedNaming implements ScopeNamer +var _ ScopeNamer = ContextBasedNaming{} + +func (n ContextBasedNaming) SetSelfLink(obj runtime.Object, url string) error { + return n.SelfLinker.SetSelfLink(obj, url) +} + +func (n ContextBasedNaming) Namespace(req *http.Request) (namespace string, err error) { + requestInfo, ok := request.RequestInfoFrom(req.Context()) + if !ok { + return "", fmt.Errorf("missing requestInfo") + } + return requestInfo.Namespace, nil +} + +func (n ContextBasedNaming) Name(req *http.Request) (namespace, name string, err error) { + requestInfo, ok := request.RequestInfoFrom(req.Context()) + if !ok { + return "", "", fmt.Errorf("missing requestInfo") + } + ns, err := n.Namespace(req) + if err != nil { + return "", "", err + } + + if len(requestInfo.Name) == 0 { + return "", "", errEmptyName + } + return ns, requestInfo.Name, nil +} + +// fastURLPathEncode encodes the provided path as a URL path +func fastURLPathEncode(path string) string { + for _, r := range []byte(path) { + switch { + case r >= '-' && r <= '9', r >= 'A' && r <= 'Z', r >= 'a' && r <= 'z': + // characters within this range do not require escaping + default: + var u url.URL + u.Path = path + return u.EscapedPath() + } + } + return path +} + +func (n ContextBasedNaming) GenerateLink(requestInfo *request.RequestInfo, obj runtime.Object) (uri string, err error) { + namespace, name, err := n.ObjectName(obj) + if err == errEmptyName && len(requestInfo.Name) > 0 { + name = requestInfo.Name + } else if err != nil { + return "", err + } + if len(namespace) == 0 && len(requestInfo.Namespace) > 0 { + namespace = requestInfo.Namespace + } + + if n.ClusterScoped { + return n.SelfLinkPathPrefix + url.QueryEscape(name) + n.SelfLinkPathSuffix, nil + } + + builder := strings.Builder{} + builder.Grow(len(n.SelfLinkPathPrefix) + len(namespace) + len(requestInfo.Resource) + len(name) + len(n.SelfLinkPathSuffix) + 8) + builder.WriteString(n.SelfLinkPathPrefix) + builder.WriteString(namespace) + builder.WriteByte('/') + builder.WriteString(requestInfo.Resource) + builder.WriteByte('/') + builder.WriteString(name) + builder.WriteString(n.SelfLinkPathSuffix) + return fastURLPathEncode(builder.String()), nil +} + +func (n ContextBasedNaming) GenerateListLink(req *http.Request) (uri string, err error) { + if len(req.URL.RawPath) > 0 { + return req.URL.RawPath, nil + } + return fastURLPathEncode(req.URL.Path), nil +} + +func (n ContextBasedNaming) ObjectName(obj runtime.Object) (namespace, name string, err error) { + name, err = n.SelfLinker.Name(obj) + if err != nil { + return "", "", err + } + if len(name) == 0 { + return "", "", errEmptyName + } + namespace, err = n.SelfLinker.Namespace(obj) + if err != nil { + return "", "", err + } + return namespace, name, err +} + +// errEmptyName is returned when API requests do not fill the name section of the path. +var errEmptyName = errors.NewBadRequest("name must be provided") diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/doc.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/doc.go new file mode 100644 index 000000000..80f4feb72 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package negotiation contains media type negotiation logic. +package negotiation // import "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/errors.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/errors.go new file mode 100644 index 000000000..86faf525d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/errors.go @@ -0,0 +1,99 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package negotiation + +import ( + "fmt" + "net/http" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// errNotAcceptable indicates Accept negotiation has failed +type errNotAcceptable struct { + accepted []string +} + +// NewNotAcceptableError returns an error of NotAcceptable which contains specified string +func NewNotAcceptableError(accepted []string) error { + return errNotAcceptable{accepted} +} + +func (e errNotAcceptable) Error() string { + return fmt.Sprintf("only the following media types are accepted: %v", strings.Join(e.accepted, ", ")) +} + +func (e errNotAcceptable) Status() metav1.Status { + return metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusNotAcceptable, + Reason: metav1.StatusReasonNotAcceptable, + Message: e.Error(), + } +} + +// errNotAcceptableConversion indicates Accept negotiation has failed specifically +// for a conversion to a known type. +type errNotAcceptableConversion struct { + target string + accepted []string +} + +// NewNotAcceptableConversionError returns an error indicating that the desired +// API transformation to the target group version kind string is not accepted and +// only the listed mime types are allowed. This is temporary while Table does not +// yet support protobuf encoding. +func NewNotAcceptableConversionError(target string, accepted []string) error { + return errNotAcceptableConversion{target, accepted} +} + +func (e errNotAcceptableConversion) Error() string { + return fmt.Sprintf("only the following media types are accepted when converting to %s: %v", e.target, strings.Join(e.accepted, ", ")) +} + +func (e errNotAcceptableConversion) Status() metav1.Status { + return metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusNotAcceptable, + Reason: metav1.StatusReasonNotAcceptable, + Message: e.Error(), + } +} + +// errUnsupportedMediaType indicates Content-Type is not recognized +type errUnsupportedMediaType struct { + accepted []string +} + +// NewUnsupportedMediaTypeError returns an error of UnsupportedMediaType which contains specified string +func NewUnsupportedMediaTypeError(accepted []string) error { + return errUnsupportedMediaType{accepted} +} + +func (e errUnsupportedMediaType) Error() string { + return fmt.Sprintf("the body of the request was in an unknown format - accepted media types include: %v", strings.Join(e.accepted, ", ")) +} + +func (e errUnsupportedMediaType) Status() metav1.Status { + return metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusUnsupportedMediaType, + Reason: metav1.StatusReasonUnsupportedMediaType, + Message: e.Error(), + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go new file mode 100644 index 000000000..9dbad1ea6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go @@ -0,0 +1,263 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package negotiation + +import ( + "mime" + "net/http" + "strconv" + "strings" + + "github.com/munnerz/goautoneg" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// MediaTypesForSerializer returns a list of media and stream media types for the server. +func MediaTypesForSerializer(ns runtime.NegotiatedSerializer) (mediaTypes, streamMediaTypes []string) { + for _, info := range ns.SupportedMediaTypes() { + mediaTypes = append(mediaTypes, info.MediaType) + if info.StreamSerializer != nil { + // stream=watch is the existing mime-type parameter for watch + streamMediaTypes = append(streamMediaTypes, info.MediaType+";stream=watch") + } + } + return mediaTypes, streamMediaTypes +} + +// NegotiateOutputMediaType negotiates the output structured media type and a serializer, or +// returns an error. +func NegotiateOutputMediaType(req *http.Request, ns runtime.NegotiatedSerializer, restrictions EndpointRestrictions) (MediaTypeOptions, runtime.SerializerInfo, error) { + mediaType, ok := NegotiateMediaTypeOptions(req.Header.Get("Accept"), ns.SupportedMediaTypes(), restrictions) + if !ok { + supported, _ := MediaTypesForSerializer(ns) + return mediaType, runtime.SerializerInfo{}, NewNotAcceptableError(supported) + } + // TODO: move into resthandler + info := mediaType.Accepted + if (mediaType.Pretty || isPrettyPrint(req)) && info.PrettySerializer != nil { + info.Serializer = info.PrettySerializer + } + return mediaType, info, nil +} + +// NegotiateOutputMediaTypeStream returns a stream serializer for the given request. +func NegotiateOutputMediaTypeStream(req *http.Request, ns runtime.NegotiatedSerializer, restrictions EndpointRestrictions) (runtime.SerializerInfo, error) { + mediaType, ok := NegotiateMediaTypeOptions(req.Header.Get("Accept"), ns.SupportedMediaTypes(), restrictions) + if !ok || mediaType.Accepted.StreamSerializer == nil { + _, supported := MediaTypesForSerializer(ns) + return runtime.SerializerInfo{}, NewNotAcceptableError(supported) + } + return mediaType.Accepted, nil +} + +// NegotiateInputSerializer returns the input serializer for the provided request. +func NegotiateInputSerializer(req *http.Request, streaming bool, ns runtime.NegotiatedSerializer) (runtime.SerializerInfo, error) { + mediaType := req.Header.Get("Content-Type") + return NegotiateInputSerializerForMediaType(mediaType, streaming, ns) +} + +// NegotiateInputSerializerForMediaType returns the appropriate serializer for the given media type or an error. +func NegotiateInputSerializerForMediaType(mediaType string, streaming bool, ns runtime.NegotiatedSerializer) (runtime.SerializerInfo, error) { + mediaTypes := ns.SupportedMediaTypes() + if len(mediaType) == 0 { + mediaType = mediaTypes[0].MediaType + } + if mediaType, _, err := mime.ParseMediaType(mediaType); err == nil { + if info, ok := runtime.SerializerInfoForMediaType(mediaTypes, mediaType); ok { + return info, nil + } + } + + supported, streamingSupported := MediaTypesForSerializer(ns) + if streaming { + return runtime.SerializerInfo{}, NewUnsupportedMediaTypeError(streamingSupported) + } + return runtime.SerializerInfo{}, NewUnsupportedMediaTypeError(supported) +} + +// isPrettyPrint returns true if the "pretty" query parameter is true or if the User-Agent +// matches known "human" clients. +func isPrettyPrint(req *http.Request) bool { + // DEPRECATED: should be part of the content type + if req.URL != nil { + // avoid an allocation caused by parsing the URL query + if strings.Contains(req.URL.RawQuery, "pretty") { + pp := req.URL.Query().Get("pretty") + if len(pp) > 0 { + pretty, _ := strconv.ParseBool(pp) + return pretty + } + } + } + userAgent := req.UserAgent() + // This covers basic all browsers and cli http tools + if strings.HasPrefix(userAgent, "curl") || strings.HasPrefix(userAgent, "Wget") || strings.HasPrefix(userAgent, "Mozilla/5.0") { + return true + } + return false +} + +// EndpointRestrictions is an interface that allows content-type negotiation +// to verify server support for specific options +type EndpointRestrictions interface { + // AllowsMediaTypeTransform returns true if the endpoint allows either the requested mime type + // or the requested transformation. If false, the caller should ignore this mime type. If the + // target is nil, the client is not requesting a transformation. + AllowsMediaTypeTransform(mimeType, mimeSubType string, target *schema.GroupVersionKind) bool + // AllowsServerVersion should return true if the specified version is valid + // for the server group. + AllowsServerVersion(version string) bool + // AllowsStreamSchema should return true if the specified stream schema is + // valid for the server group. + AllowsStreamSchema(schema string) bool +} + +// DefaultEndpointRestrictions is the default EndpointRestrictions which allows +// content-type negotiation to verify server support for specific options +var DefaultEndpointRestrictions = emptyEndpointRestrictions{} + +type emptyEndpointRestrictions struct{} + +func (emptyEndpointRestrictions) AllowsMediaTypeTransform(mimeType string, mimeSubType string, gvk *schema.GroupVersionKind) bool { + return gvk == nil +} +func (emptyEndpointRestrictions) AllowsServerVersion(string) bool { return false } +func (emptyEndpointRestrictions) AllowsStreamSchema(s string) bool { return s == "watch" } + +// MediaTypeOptions describes information for a given media type that may alter +// the server response +type MediaTypeOptions struct { + // pretty is true if the requested representation should be formatted for human + // viewing + Pretty bool + + // stream, if set, indicates that a streaming protocol variant of this encoding + // is desired. The only currently supported value is watch which returns versioned + // events. In the future, this may refer to other stream protocols. + Stream string + + // convert is a request to alter the type of object returned by the server from the + // normal response + Convert *schema.GroupVersionKind + // useServerVersion is an optional version for the server group + UseServerVersion string + + // export is true if the representation requested should exclude fields the server + // has set + Export bool + + // unrecognized is a list of all unrecognized keys + Unrecognized []string + + // the accepted media type from the client + Accepted runtime.SerializerInfo +} + +// acceptMediaTypeOptions returns an options object that matches the provided media type params. If +// it returns false, the provided options are not allowed and the media type must be skipped. These +// parameters are unversioned and may not be changed. +func acceptMediaTypeOptions(params map[string]string, accepts *runtime.SerializerInfo, endpoint EndpointRestrictions) (MediaTypeOptions, bool) { + var options MediaTypeOptions + + // extract all known parameters + for k, v := range params { + switch k { + + // controls transformation of the object when returned + case "as": + if options.Convert == nil { + options.Convert = &schema.GroupVersionKind{} + } + options.Convert.Kind = v + case "g": + if options.Convert == nil { + options.Convert = &schema.GroupVersionKind{} + } + options.Convert.Group = v + case "v": + if options.Convert == nil { + options.Convert = &schema.GroupVersionKind{} + } + options.Convert.Version = v + + // controls the streaming schema + case "stream": + if len(v) > 0 && (accepts.StreamSerializer == nil || !endpoint.AllowsStreamSchema(v)) { + return MediaTypeOptions{}, false + } + options.Stream = v + + // controls the version of the server API group used + // for generic output + case "sv": + if len(v) > 0 && !endpoint.AllowsServerVersion(v) { + return MediaTypeOptions{}, false + } + options.UseServerVersion = v + + // if specified, the server should transform the returned + // output and remove fields that are always server specified, + // or which fit the default behavior. + case "export": + options.Export = v == "1" + + // if specified, the pretty serializer will be used + case "pretty": + options.Pretty = v == "1" + + default: + options.Unrecognized = append(options.Unrecognized, k) + } + } + + if !endpoint.AllowsMediaTypeTransform(accepts.MediaTypeType, accepts.MediaTypeSubType, options.Convert) { + return MediaTypeOptions{}, false + } + + options.Accepted = *accepts + return options, true +} + +// NegotiateMediaTypeOptions returns the most appropriate content type given the accept header and +// a list of alternatives along with the accepted media type parameters. +func NegotiateMediaTypeOptions(header string, accepted []runtime.SerializerInfo, endpoint EndpointRestrictions) (MediaTypeOptions, bool) { + if len(header) == 0 && len(accepted) > 0 { + return MediaTypeOptions{ + Accepted: accepted[0], + }, true + } + + clauses := goautoneg.ParseAccept(header) + for i := range clauses { + clause := &clauses[i] + for i := range accepted { + accepts := &accepted[i] + switch { + case clause.Type == accepts.MediaTypeType && clause.SubType == accepts.MediaTypeSubType, + clause.Type == accepts.MediaTypeType && clause.SubType == "*", + clause.Type == "*" && clause.SubType == "*": + if retVal, ret := acceptMediaTypeOptions(clause.Params, accepts, endpoint); ret { + return retVal, true + } + } + } + } + + return MediaTypeOptions{}, false +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go new file mode 100644 index 000000000..096330a4a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go @@ -0,0 +1,680 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "context" + "fmt" + "net/http" + "strings" + "time" + + jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/mergepatch" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/apiserver/pkg/util/dryrun" + utilfeature "k8s.io/apiserver/pkg/util/feature" + utiltrace "k8s.io/utils/trace" +) + +const ( + // maximum number of operations a single json patch may contain. + maxJSONPatchOperations = 10000 +) + +// PatchResource returns a function that will handle a resource patch. +func PatchResource(r rest.Patcher, scope *RequestScope, admit admission.Interface, patchTypes []string) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + // For performance tracking purposes. + trace := utiltrace.New("Patch", utiltrace.Field{Key: "url", Value: req.URL.Path}, utiltrace.Field{Key: "user-agent", Value: &lazyTruncatedUserAgent{req}}, utiltrace.Field{Key: "client", Value: &lazyClientIP{req}}) + defer trace.LogIfLong(500 * time.Millisecond) + + if isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) { + scope.err(errors.NewBadRequest("the dryRun feature is disabled"), w, req) + return + } + + // Do this first, otherwise name extraction can fail for unrecognized content types + // TODO: handle this in negotiation + contentType := req.Header.Get("Content-Type") + // Remove "; charset=" if included in header. + if idx := strings.Index(contentType, ";"); idx > 0 { + contentType = contentType[:idx] + } + patchType := types.PatchType(contentType) + + // Ensure the patchType is one we support + if !sets.NewString(patchTypes...).Has(contentType) { + scope.err(negotiation.NewUnsupportedMediaTypeError(patchTypes), w, req) + return + } + + // TODO: we either want to remove timeout or document it (if we + // document, move timeout out of this function and declare it in + // api_installer) + timeout := parseTimeout(req.URL.Query().Get("timeout")) + + namespace, name, err := scope.Namer.Name(req) + if err != nil { + scope.err(err, w, req) + return + } + + ctx, cancel := context.WithTimeout(req.Context(), timeout) + defer cancel() + ctx = request.WithNamespace(ctx, namespace) + + outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) + if err != nil { + scope.err(err, w, req) + return + } + + patchBytes, err := limitedReadBody(req, scope.MaxRequestBodyBytes) + if err != nil { + scope.err(err, w, req) + return + } + + options := &metav1.PatchOptions{} + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { + err = errors.NewBadRequest(err.Error()) + scope.err(err, w, req) + return + } + if errs := validation.ValidatePatchOptions(options, patchType); len(errs) > 0 { + err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "PatchOptions"}, "", errs) + scope.err(err, w, req) + return + } + options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("PatchOptions")) + + ae := request.AuditEventFrom(ctx) + admit = admission.WithAudit(admit, ae) + + audit.LogRequestPatch(ae, patchBytes) + trace.Step("Recorded the audit event") + + baseContentType := runtime.ContentTypeJSON + if patchType == types.ApplyPatchType { + baseContentType = runtime.ContentTypeYAML + } + s, ok := runtime.SerializerInfoForMediaType(scope.Serializer.SupportedMediaTypes(), baseContentType) + if !ok { + scope.err(fmt.Errorf("no serializer defined for %v", baseContentType), w, req) + return + } + gv := scope.Kind.GroupVersion() + + codec := runtime.NewCodec( + scope.Serializer.EncoderForVersion(s.Serializer, gv), + scope.Serializer.DecoderToVersion(s.Serializer, scope.HubGroupVersion), + ) + + userInfo, _ := request.UserFrom(ctx) + staticCreateAttributes := admission.NewAttributesRecord( + nil, + nil, + scope.Kind, + namespace, + name, + scope.Resource, + scope.Subresource, + admission.Create, + patchToCreateOptions(options), + dryrun.IsDryRun(options.DryRun), + userInfo) + staticUpdateAttributes := admission.NewAttributesRecord( + nil, + nil, + scope.Kind, + namespace, + name, + scope.Resource, + scope.Subresource, + admission.Update, + patchToUpdateOptions(options), + dryrun.IsDryRun(options.DryRun), + userInfo, + ) + + mutatingAdmission, _ := admit.(admission.MutationInterface) + createAuthorizerAttributes := authorizer.AttributesRecord{ + User: userInfo, + ResourceRequest: true, + Path: req.URL.Path, + Verb: "create", + APIGroup: scope.Resource.Group, + APIVersion: scope.Resource.Version, + Resource: scope.Resource.Resource, + Subresource: scope.Subresource, + Namespace: namespace, + Name: name, + } + + p := patcher{ + namer: scope.Namer, + creater: scope.Creater, + defaulter: scope.Defaulter, + typer: scope.Typer, + unsafeConvertor: scope.UnsafeConvertor, + kind: scope.Kind, + resource: scope.Resource, + subresource: scope.Subresource, + dryRun: dryrun.IsDryRun(options.DryRun), + + objectInterfaces: scope, + + hubGroupVersion: scope.HubGroupVersion, + + createValidation: withAuthorization(rest.AdmissionToValidateObjectFunc(admit, staticCreateAttributes, scope), scope.Authorizer, createAuthorizerAttributes), + updateValidation: rest.AdmissionToValidateObjectUpdateFunc(admit, staticUpdateAttributes, scope), + admissionCheck: mutatingAdmission, + + codec: codec, + + timeout: timeout, + options: options, + + restPatcher: r, + name: name, + patchType: patchType, + patchBytes: patchBytes, + userAgent: req.UserAgent(), + + trace: trace, + } + + result, wasCreated, err := p.patchResource(ctx, scope) + if err != nil { + scope.err(err, w, req) + return + } + trace.Step("Object stored in database") + + if err := setObjectSelfLink(ctx, result, req, scope.Namer); err != nil { + scope.err(err, w, req) + return + } + trace.Step("Self-link added") + + status := http.StatusOK + if wasCreated { + status = http.StatusCreated + } + transformResponseObject(ctx, scope, trace, req, w, status, outputMediaType, result) + } +} + +type mutateObjectUpdateFunc func(ctx context.Context, obj, old runtime.Object) error + +// patcher breaks the process of patch application and retries into smaller +// pieces of functionality. +// TODO: Use builder pattern to construct this object? +// TODO: As part of that effort, some aspects of PatchResource above could be +// moved into this type. +type patcher struct { + // Pieces of RequestScope + namer ScopeNamer + creater runtime.ObjectCreater + defaulter runtime.ObjectDefaulter + typer runtime.ObjectTyper + unsafeConvertor runtime.ObjectConvertor + resource schema.GroupVersionResource + kind schema.GroupVersionKind + subresource string + dryRun bool + + objectInterfaces admission.ObjectInterfaces + + hubGroupVersion schema.GroupVersion + + // Validation functions + createValidation rest.ValidateObjectFunc + updateValidation rest.ValidateObjectUpdateFunc + admissionCheck admission.MutationInterface + + codec runtime.Codec + + timeout time.Duration + options *metav1.PatchOptions + + // Operation information + restPatcher rest.Patcher + name string + patchType types.PatchType + patchBytes []byte + userAgent string + + trace *utiltrace.Trace + + // Set at invocation-time (by applyPatch) and immutable thereafter + namespace string + updatedObjectInfo rest.UpdatedObjectInfo + mechanism patchMechanism + forceAllowCreate bool +} + +type patchMechanism interface { + applyPatchToCurrentObject(currentObject runtime.Object) (runtime.Object, error) + createNewObject() (runtime.Object, error) +} + +type jsonPatcher struct { + *patcher + + fieldManager *fieldmanager.FieldManager +} + +func (p *jsonPatcher) applyPatchToCurrentObject(currentObject runtime.Object) (runtime.Object, error) { + // Encode will convert & return a versioned object in JSON. + currentObjJS, err := runtime.Encode(p.codec, currentObject) + if err != nil { + return nil, err + } + + // Apply the patch. + patchedObjJS, err := p.applyJSPatch(currentObjJS) + if err != nil { + return nil, err + } + + // Construct the resulting typed, unversioned object. + objToUpdate := p.restPatcher.New() + if err := runtime.DecodeInto(p.codec, patchedObjJS, objToUpdate); err != nil { + return nil, errors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{ + field.Invalid(field.NewPath("patch"), string(patchedObjJS), err.Error()), + }) + } + + if p.fieldManager != nil { + objToUpdate = p.fieldManager.UpdateNoErrors(currentObject, objToUpdate, managerOrUserAgent(p.options.FieldManager, p.userAgent)) + } + return objToUpdate, nil +} + +func (p *jsonPatcher) createNewObject() (runtime.Object, error) { + return nil, errors.NewNotFound(p.resource.GroupResource(), p.name) +} + +// applyJSPatch applies the patch. Input and output objects must both have +// the external version, since that is what the patch must have been constructed against. +func (p *jsonPatcher) applyJSPatch(versionedJS []byte) (patchedJS []byte, retErr error) { + switch p.patchType { + case types.JSONPatchType: + // sanity check potentially abusive patches + // TODO(liggitt): drop this once golang json parser limits stack depth (https://github.com/golang/go/issues/31789) + if len(p.patchBytes) > 1024*1024 { + v := []interface{}{} + if err := json.Unmarshal(p.patchBytes, &v); err != nil { + return nil, errors.NewBadRequest(fmt.Sprintf("error decoding patch: %v", err)) + } + } + + patchObj, err := jsonpatch.DecodePatch(p.patchBytes) + if err != nil { + return nil, errors.NewBadRequest(err.Error()) + } + if len(patchObj) > maxJSONPatchOperations { + return nil, errors.NewRequestEntityTooLargeError( + fmt.Sprintf("The allowed maximum operations in a JSON patch is %d, got %d", + maxJSONPatchOperations, len(patchObj))) + } + patchedJS, err := patchObj.Apply(versionedJS) + if err != nil { + return nil, errors.NewGenericServerResponse(http.StatusUnprocessableEntity, "", schema.GroupResource{}, "", err.Error(), 0, false) + } + return patchedJS, nil + case types.MergePatchType: + // sanity check potentially abusive patches + // TODO(liggitt): drop this once golang json parser limits stack depth (https://github.com/golang/go/issues/31789) + if len(p.patchBytes) > 1024*1024 { + v := map[string]interface{}{} + if err := json.Unmarshal(p.patchBytes, &v); err != nil { + return nil, errors.NewBadRequest(fmt.Sprintf("error decoding patch: %v", err)) + } + } + + return jsonpatch.MergePatch(versionedJS, p.patchBytes) + default: + // only here as a safety net - go-restful filters content-type + return nil, fmt.Errorf("unknown Content-Type header for patch: %v", p.patchType) + } +} + +type smpPatcher struct { + *patcher + + // Schema + schemaReferenceObj runtime.Object + fieldManager *fieldmanager.FieldManager +} + +func (p *smpPatcher) applyPatchToCurrentObject(currentObject runtime.Object) (runtime.Object, error) { + // Since the patch is applied on versioned objects, we need to convert the + // current object to versioned representation first. + currentVersionedObject, err := p.unsafeConvertor.ConvertToVersion(currentObject, p.kind.GroupVersion()) + if err != nil { + return nil, err + } + versionedObjToUpdate, err := p.creater.New(p.kind) + if err != nil { + return nil, err + } + if err := strategicPatchObject(p.defaulter, currentVersionedObject, p.patchBytes, versionedObjToUpdate, p.schemaReferenceObj); err != nil { + return nil, err + } + // Convert the object back to the hub version + newObj, err := p.unsafeConvertor.ConvertToVersion(versionedObjToUpdate, p.hubGroupVersion) + if err != nil { + return nil, err + } + + if p.fieldManager != nil { + newObj = p.fieldManager.UpdateNoErrors(currentObject, newObj, managerOrUserAgent(p.options.FieldManager, p.userAgent)) + } + return newObj, nil +} + +func (p *smpPatcher) createNewObject() (runtime.Object, error) { + return nil, errors.NewNotFound(p.resource.GroupResource(), p.name) +} + +type applyPatcher struct { + patch []byte + options *metav1.PatchOptions + creater runtime.ObjectCreater + kind schema.GroupVersionKind + fieldManager *fieldmanager.FieldManager + userAgent string +} + +func (p *applyPatcher) applyPatchToCurrentObject(obj runtime.Object) (runtime.Object, error) { + force := false + if p.options.Force != nil { + force = *p.options.Force + } + if p.fieldManager == nil { + panic("FieldManager must be installed to run apply") + } + + patchObj := &unstructured.Unstructured{Object: map[string]interface{}{}} + if err := yaml.Unmarshal(p.patch, &patchObj.Object); err != nil { + return nil, errors.NewBadRequest(fmt.Sprintf("error decoding YAML: %v", err)) + } + + return p.fieldManager.Apply(obj, patchObj, p.options.FieldManager, force) +} + +func (p *applyPatcher) createNewObject() (runtime.Object, error) { + obj, err := p.creater.New(p.kind) + if err != nil { + return nil, fmt.Errorf("failed to create new object: %v", err) + } + return p.applyPatchToCurrentObject(obj) +} + +// strategicPatchObject applies a strategic merge patch of to +// and stores the result in . +// It additionally returns the map[string]interface{} representation of the +// and . +// NOTE: Both and are supposed to be versioned. +func strategicPatchObject( + defaulter runtime.ObjectDefaulter, + originalObject runtime.Object, + patchBytes []byte, + objToUpdate runtime.Object, + schemaReferenceObj runtime.Object, +) error { + originalObjMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(originalObject) + if err != nil { + return err + } + + patchMap := make(map[string]interface{}) + if err := json.Unmarshal(patchBytes, &patchMap); err != nil { + return errors.NewBadRequest(err.Error()) + } + + if err := applyPatchToObject(defaulter, originalObjMap, patchMap, objToUpdate, schemaReferenceObj); err != nil { + return err + } + return nil +} + +// applyPatch is called every time GuaranteedUpdate asks for the updated object, +// and is given the currently persisted object as input. +// TODO: rename this function because the name implies it is related to applyPatcher +func (p *patcher) applyPatch(_ context.Context, _, currentObject runtime.Object) (objToUpdate runtime.Object, patchErr error) { + // Make sure we actually have a persisted currentObject + p.trace.Step("About to apply patch") + currentObjectHasUID, err := hasUID(currentObject) + if err != nil { + return nil, err + } else if !currentObjectHasUID { + objToUpdate, patchErr = p.mechanism.createNewObject() + } else { + objToUpdate, patchErr = p.mechanism.applyPatchToCurrentObject(currentObject) + } + + if patchErr != nil { + return nil, patchErr + } + + objToUpdateHasUID, err := hasUID(objToUpdate) + if err != nil { + return nil, err + } + if objToUpdateHasUID && !currentObjectHasUID { + accessor, err := meta.Accessor(objToUpdate) + if err != nil { + return nil, err + } + return nil, errors.NewConflict(p.resource.GroupResource(), p.name, fmt.Errorf("uid mismatch: the provided object specified uid %s, and no existing object was found", accessor.GetUID())) + } + + if err := checkName(objToUpdate, p.name, p.namespace, p.namer); err != nil { + return nil, err + } + return objToUpdate, nil +} + +func (p *patcher) admissionAttributes(ctx context.Context, updatedObject runtime.Object, currentObject runtime.Object, operation admission.Operation, operationOptions runtime.Object) admission.Attributes { + userInfo, _ := request.UserFrom(ctx) + return admission.NewAttributesRecord(updatedObject, currentObject, p.kind, p.namespace, p.name, p.resource, p.subresource, operation, operationOptions, p.dryRun, userInfo) +} + +// applyAdmission is called every time GuaranteedUpdate asks for the updated object, +// and is given the currently persisted object and the patched object as input. +// TODO: rename this function because the name implies it is related to applyPatcher +func (p *patcher) applyAdmission(ctx context.Context, patchedObject runtime.Object, currentObject runtime.Object) (runtime.Object, error) { + p.trace.Step("About to check admission control") + var operation admission.Operation + var options runtime.Object + if hasUID, err := hasUID(currentObject); err != nil { + return nil, err + } else if !hasUID { + operation = admission.Create + currentObject = nil + options = patchToCreateOptions(p.options) + } else { + operation = admission.Update + options = patchToUpdateOptions(p.options) + } + if p.admissionCheck != nil && p.admissionCheck.Handles(operation) { + attributes := p.admissionAttributes(ctx, patchedObject, currentObject, operation, options) + return patchedObject, p.admissionCheck.Admit(ctx, attributes, p.objectInterfaces) + } + return patchedObject, nil +} + +// patchResource divides PatchResource for easier unit testing +func (p *patcher) patchResource(ctx context.Context, scope *RequestScope) (runtime.Object, bool, error) { + p.namespace = request.NamespaceValue(ctx) + switch p.patchType { + case types.JSONPatchType, types.MergePatchType: + p.mechanism = &jsonPatcher{ + patcher: p, + fieldManager: scope.FieldManager, + } + case types.StrategicMergePatchType: + schemaReferenceObj, err := p.unsafeConvertor.ConvertToVersion(p.restPatcher.New(), p.kind.GroupVersion()) + if err != nil { + return nil, false, err + } + p.mechanism = &smpPatcher{ + patcher: p, + schemaReferenceObj: schemaReferenceObj, + fieldManager: scope.FieldManager, + } + // this case is unreachable if ServerSideApply is not enabled because we will have already rejected the content type + case types.ApplyPatchType: + p.mechanism = &applyPatcher{ + fieldManager: scope.FieldManager, + patch: p.patchBytes, + options: p.options, + creater: p.creater, + kind: p.kind, + userAgent: p.userAgent, + } + p.forceAllowCreate = true + default: + return nil, false, fmt.Errorf("%v: unimplemented patch type", p.patchType) + } + dedupOwnerReferencesTransformer := func(_ context.Context, obj, _ runtime.Object) (runtime.Object, error) { + // Dedup owner references after mutating admission happens + dedupOwnerReferencesAndAddWarning(obj, ctx, true) + return obj, nil + } + + wasCreated := false + p.updatedObjectInfo = rest.DefaultUpdatedObjectInfo(nil, p.applyPatch, p.applyAdmission, dedupOwnerReferencesTransformer) + requestFunc := func() (runtime.Object, error) { + // Pass in UpdateOptions to override UpdateStrategy.AllowUpdateOnCreate + options := patchToUpdateOptions(p.options) + updateObject, created, updateErr := p.restPatcher.Update(ctx, p.name, p.updatedObjectInfo, p.createValidation, p.updateValidation, p.forceAllowCreate, options) + wasCreated = created + return updateObject, updateErr + } + result, err := finishRequest(p.timeout, func() (runtime.Object, error) { + result, err := requestFunc() + // If the object wasn't committed to storage because it's serialized size was too large, + // it is safe to remove managedFields (which can be large) and try again. + if isTooLargeError(err) && p.patchType != types.ApplyPatchType { + if _, accessorErr := meta.Accessor(p.restPatcher.New()); accessorErr == nil { + p.updatedObjectInfo = rest.DefaultUpdatedObjectInfo(nil, + p.applyPatch, + p.applyAdmission, + dedupOwnerReferencesTransformer, + func(_ context.Context, obj, _ runtime.Object) (runtime.Object, error) { + accessor, _ := meta.Accessor(obj) + accessor.SetManagedFields(nil) + return obj, nil + }) + result, err = requestFunc() + } + } + return result, err + }) + return result, wasCreated, err +} + +// applyPatchToObject applies a strategic merge patch of to +// and stores the result in . +// NOTE: must be a versioned object. +func applyPatchToObject( + defaulter runtime.ObjectDefaulter, + originalMap map[string]interface{}, + patchMap map[string]interface{}, + objToUpdate runtime.Object, + schemaReferenceObj runtime.Object, +) error { + patchedObjMap, err := strategicpatch.StrategicMergeMapPatch(originalMap, patchMap, schemaReferenceObj) + if err != nil { + return interpretStrategicMergePatchError(err) + } + + // Rather than serialize the patched map to JSON, then decode it to an object, we go directly from a map to an object + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(patchedObjMap, objToUpdate); err != nil { + return errors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{ + field.Invalid(field.NewPath("patch"), fmt.Sprintf("%+v", patchMap), err.Error()), + }) + } + // Decoding from JSON to a versioned object would apply defaults, so we do the same here + defaulter.Default(objToUpdate) + + return nil +} + +// interpretStrategicMergePatchError interprets the error type and returns an error with appropriate HTTP code. +func interpretStrategicMergePatchError(err error) error { + switch err { + case mergepatch.ErrBadJSONDoc, mergepatch.ErrBadPatchFormatForPrimitiveList, mergepatch.ErrBadPatchFormatForRetainKeys, mergepatch.ErrBadPatchFormatForSetElementOrderList, mergepatch.ErrUnsupportedStrategicMergePatchFormat: + return errors.NewBadRequest(err.Error()) + case mergepatch.ErrNoListOfLists, mergepatch.ErrPatchContentNotMatchRetainKeys: + return errors.NewGenericServerResponse(http.StatusUnprocessableEntity, "", schema.GroupResource{}, "", err.Error(), 0, false) + default: + return err + } +} + +// patchToUpdateOptions creates an UpdateOptions with the same field values as the provided PatchOptions. +func patchToUpdateOptions(po *metav1.PatchOptions) *metav1.UpdateOptions { + if po == nil { + return nil + } + uo := &metav1.UpdateOptions{ + DryRun: po.DryRun, + FieldManager: po.FieldManager, + } + uo.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("UpdateOptions")) + return uo +} + +// patchToCreateOptions creates an CreateOptions with the same field values as the provided PatchOptions. +func patchToCreateOptions(po *metav1.PatchOptions) *metav1.CreateOptions { + if po == nil { + return nil + } + co := &metav1.CreateOptions{ + DryRun: po.DryRun, + FieldManager: po.FieldManager, + } + co.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("CreateOptions")) + return co +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go new file mode 100644 index 000000000..f9b38e07d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go @@ -0,0 +1,275 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "context" + "fmt" + "net/http" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" + "k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + utiltrace "k8s.io/utils/trace" +) + +// transformObject takes the object as returned by storage and ensures it is in +// the client's desired form, as well as ensuring any API level fields like self-link +// are properly set. +func transformObject(ctx context.Context, obj runtime.Object, opts interface{}, mediaType negotiation.MediaTypeOptions, scope *RequestScope, req *http.Request) (runtime.Object, error) { + if co, ok := obj.(runtime.CacheableObject); ok { + if mediaType.Convert != nil { + // Non-nil mediaType.Convert means that some conversion of the object + // has to happen. Currently conversion may potentially modify the + // object or assume something about it (e.g. asTable operates on + // reflection, which won't work for any wrapper). + // To ensure it will work correctly, let's operate on base objects + // and not cache it for now. + // + // TODO: Long-term, transformObject should be changed so that it + // implements runtime.Encoder interface. + return doTransformObject(ctx, co.GetObject(), opts, mediaType, scope, req) + } + } + return doTransformObject(ctx, obj, opts, mediaType, scope, req) +} + +func doTransformObject(ctx context.Context, obj runtime.Object, opts interface{}, mediaType negotiation.MediaTypeOptions, scope *RequestScope, req *http.Request) (runtime.Object, error) { + if _, ok := obj.(*metav1.Status); ok { + return obj, nil + } + if err := setObjectSelfLink(ctx, obj, req, scope.Namer); err != nil { + return nil, err + } + + switch target := mediaType.Convert; { + case target == nil: + return obj, nil + + case target.Kind == "PartialObjectMetadata": + return asPartialObjectMetadata(obj, target.GroupVersion()) + + case target.Kind == "PartialObjectMetadataList": + return asPartialObjectMetadataList(obj, target.GroupVersion()) + + case target.Kind == "Table": + options, ok := opts.(*metav1.TableOptions) + if !ok { + return nil, fmt.Errorf("unexpected TableOptions, got %T", opts) + } + return asTable(ctx, obj, options, scope, target.GroupVersion()) + + default: + accepted, _ := negotiation.MediaTypesForSerializer(metainternalversionscheme.Codecs) + err := negotiation.NewNotAcceptableError(accepted) + return nil, err + } +} + +// optionsForTransform will load and validate any additional query parameter options for +// a conversion or return an error. +func optionsForTransform(mediaType negotiation.MediaTypeOptions, req *http.Request) (interface{}, error) { + switch target := mediaType.Convert; { + case target == nil: + case target.Kind == "Table" && (target.GroupVersion() == metav1beta1.SchemeGroupVersion || target.GroupVersion() == metav1.SchemeGroupVersion): + opts := &metav1.TableOptions{} + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, opts); err != nil { + return nil, err + } + switch errs := validation.ValidateTableOptions(opts); len(errs) { + case 0: + return opts, nil + case 1: + return nil, errors.NewBadRequest(fmt.Sprintf("Unable to convert to Table as requested: %v", errs[0].Error())) + default: + return nil, errors.NewBadRequest(fmt.Sprintf("Unable to convert to Table as requested: %v", errs)) + } + } + return nil, nil +} + +// targetEncodingForTransform returns the appropriate serializer for the input media type +func targetEncodingForTransform(scope *RequestScope, mediaType negotiation.MediaTypeOptions, req *http.Request) (schema.GroupVersionKind, runtime.NegotiatedSerializer, bool) { + switch target := mediaType.Convert; { + case target == nil: + case (target.Kind == "PartialObjectMetadata" || target.Kind == "PartialObjectMetadataList" || target.Kind == "Table") && + (target.GroupVersion() == metav1beta1.SchemeGroupVersion || target.GroupVersion() == metav1.SchemeGroupVersion): + return *target, metainternalversionscheme.Codecs, true + } + return scope.Kind, scope.Serializer, false +} + +// transformResponseObject takes an object loaded from storage and performs any necessary transformations. +// Will write the complete response object. +func transformResponseObject(ctx context.Context, scope *RequestScope, trace *utiltrace.Trace, req *http.Request, w http.ResponseWriter, statusCode int, mediaType negotiation.MediaTypeOptions, result runtime.Object) { + options, err := optionsForTransform(mediaType, req) + if err != nil { + scope.err(err, w, req) + return + } + obj, err := transformObject(ctx, result, options, mediaType, scope, req) + if err != nil { + scope.err(err, w, req) + return + } + kind, serializer, _ := targetEncodingForTransform(scope, mediaType, req) + responsewriters.WriteObjectNegotiated(serializer, scope, kind.GroupVersion(), w, req, statusCode, obj) +} + +// errNotAcceptable indicates Accept negotiation has failed +type errNotAcceptable struct { + message string +} + +func newNotAcceptableError(message string) error { + return errNotAcceptable{message} +} + +func (e errNotAcceptable) Error() string { + return e.message +} + +func (e errNotAcceptable) Status() metav1.Status { + return metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusNotAcceptable, + Reason: metav1.StatusReason("NotAcceptable"), + Message: e.Error(), + } +} + +func asTable(ctx context.Context, result runtime.Object, opts *metav1.TableOptions, scope *RequestScope, groupVersion schema.GroupVersion) (runtime.Object, error) { + switch groupVersion { + case metav1beta1.SchemeGroupVersion, metav1.SchemeGroupVersion: + default: + return nil, newNotAcceptableError(fmt.Sprintf("no Table exists in group version %s", groupVersion)) + } + + obj, err := scope.TableConvertor.ConvertToTable(ctx, result, opts) + if err != nil { + return nil, err + } + + table := (*metav1.Table)(obj) + + for i := range table.Rows { + item := &table.Rows[i] + switch opts.IncludeObject { + case metav1.IncludeObject: + item.Object.Object, err = scope.Convertor.ConvertToVersion(item.Object.Object, scope.Kind.GroupVersion()) + if err != nil { + return nil, err + } + // TODO: rely on defaulting for the value here? + case metav1.IncludeMetadata, "": + m, err := meta.Accessor(item.Object.Object) + if err != nil { + return nil, err + } + // TODO: turn this into an internal type and do conversion in order to get object kind automatically set? + partial := meta.AsPartialObjectMetadata(m) + partial.GetObjectKind().SetGroupVersionKind(groupVersion.WithKind("PartialObjectMetadata")) + item.Object.Object = partial + case metav1.IncludeNone: + item.Object.Object = nil + default: + err = errors.NewBadRequest(fmt.Sprintf("unrecognized includeObject value: %q", opts.IncludeObject)) + return nil, err + } + } + + return table, nil +} + +func asPartialObjectMetadata(result runtime.Object, groupVersion schema.GroupVersion) (runtime.Object, error) { + if meta.IsListType(result) { + err := newNotAcceptableError(fmt.Sprintf("you requested PartialObjectMetadata, but the requested object is a list (%T)", result)) + return nil, err + } + switch groupVersion { + case metav1beta1.SchemeGroupVersion, metav1.SchemeGroupVersion: + default: + return nil, newNotAcceptableError(fmt.Sprintf("no PartialObjectMetadataList exists in group version %s", groupVersion)) + } + m, err := meta.Accessor(result) + if err != nil { + return nil, err + } + partial := meta.AsPartialObjectMetadata(m) + partial.GetObjectKind().SetGroupVersionKind(groupVersion.WithKind("PartialObjectMetadata")) + return partial, nil +} + +func asPartialObjectMetadataList(result runtime.Object, groupVersion schema.GroupVersion) (runtime.Object, error) { + li, ok := result.(metav1.ListInterface) + if !ok { + return nil, newNotAcceptableError(fmt.Sprintf("you requested PartialObjectMetadataList, but the requested object is not a list (%T)", result)) + } + + gvk := groupVersion.WithKind("PartialObjectMetadata") + switch { + case groupVersion == metav1beta1.SchemeGroupVersion: + list := &metav1beta1.PartialObjectMetadataList{} + err := meta.EachListItem(result, func(obj runtime.Object) error { + m, err := meta.Accessor(obj) + if err != nil { + return err + } + partial := meta.AsPartialObjectMetadata(m) + partial.GetObjectKind().SetGroupVersionKind(gvk) + list.Items = append(list.Items, *partial) + return nil + }) + if err != nil { + return nil, err + } + list.SelfLink = li.GetSelfLink() + list.ResourceVersion = li.GetResourceVersion() + list.Continue = li.GetContinue() + return list, nil + + case groupVersion == metav1.SchemeGroupVersion: + list := &metav1.PartialObjectMetadataList{} + err := meta.EachListItem(result, func(obj runtime.Object) error { + m, err := meta.Accessor(obj) + if err != nil { + return err + } + partial := meta.AsPartialObjectMetadata(m) + partial.GetObjectKind().SetGroupVersionKind(gvk) + list.Items = append(list.Items, *partial) + return nil + }) + if err != nil { + return nil, err + } + list.SelfLink = li.GetSelfLink() + list.ResourceVersion = li.GetResourceVersion() + list.Continue = li.GetContinue() + return list, nil + + default: + return nil, newNotAcceptableError(fmt.Sprintf("no PartialObjectMetadataList exists in group version %s", groupVersion)) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/doc.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/doc.go new file mode 100644 index 000000000..b76758b79 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package responsewriters containers helpers to write responses in HTTP handlers. +package responsewriters // import "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/errors.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/errors.go new file mode 100644 index 000000000..d13bee4d2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/errors.go @@ -0,0 +1,78 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package responsewriters + +import ( + "context" + "fmt" + "net/http" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +// Avoid emitting errors that look like valid HTML. Quotes are okay. +var sanitizer = strings.NewReplacer(`&`, "&", `<`, "<", `>`, ">") + +// Forbidden renders a simple forbidden error +func Forbidden(ctx context.Context, attributes authorizer.Attributes, w http.ResponseWriter, req *http.Request, reason string, s runtime.NegotiatedSerializer) { + msg := sanitizer.Replace(forbiddenMessage(attributes)) + w.Header().Set("X-Content-Type-Options", "nosniff") + + var errMsg string + if len(reason) == 0 { + errMsg = fmt.Sprintf("%s", msg) + } else { + errMsg = fmt.Sprintf("%s: %s", msg, reason) + } + gv := schema.GroupVersion{Group: attributes.GetAPIGroup(), Version: attributes.GetAPIVersion()} + gr := schema.GroupResource{Group: attributes.GetAPIGroup(), Resource: attributes.GetResource()} + ErrorNegotiated(apierrors.NewForbidden(gr, attributes.GetName(), fmt.Errorf(errMsg)), s, gv, w, req) +} + +func forbiddenMessage(attributes authorizer.Attributes) string { + username := "" + if user := attributes.GetUser(); user != nil { + username = user.GetName() + } + + if !attributes.IsResourceRequest() { + return fmt.Sprintf("User %q cannot %s path %q", username, attributes.GetVerb(), attributes.GetPath()) + } + + resource := attributes.GetResource() + if subresource := attributes.GetSubresource(); len(subresource) > 0 { + resource = resource + "/" + subresource + } + + if ns := attributes.GetNamespace(); len(ns) > 0 { + return fmt.Sprintf("User %q cannot %s resource %q in API group %q in the namespace %q", username, attributes.GetVerb(), resource, attributes.GetAPIGroup(), ns) + } + + return fmt.Sprintf("User %q cannot %s resource %q in API group %q at the cluster scope", username, attributes.GetVerb(), resource, attributes.GetAPIGroup()) +} + +// InternalError renders a simple internal error +func InternalError(w http.ResponseWriter, req *http.Request, err error) { + http.Error(w, sanitizer.Replace(fmt.Sprintf("Internal Server Error: %q: %v", req.RequestURI, err)), + http.StatusInternalServerError) + utilruntime.HandleError(err) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/status.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/status.go new file mode 100644 index 000000000..5a8454350 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/status.go @@ -0,0 +1,83 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package responsewriters + +import ( + "fmt" + "net/http" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/storage" +) + +// statusError is an object that can be converted into an metav1.Status +type statusError interface { + Status() metav1.Status +} + +// ErrorToAPIStatus converts an error to an metav1.Status object. +func ErrorToAPIStatus(err error) *metav1.Status { + switch t := err.(type) { + case statusError: + status := t.Status() + if len(status.Status) == 0 { + status.Status = metav1.StatusFailure + } + switch status.Status { + case metav1.StatusSuccess: + if status.Code == 0 { + status.Code = http.StatusOK + } + case metav1.StatusFailure: + if status.Code == 0 { + status.Code = http.StatusInternalServerError + } + default: + runtime.HandleError(fmt.Errorf("apiserver received an error with wrong status field : %#+v", err)) + if status.Code == 0 { + status.Code = http.StatusInternalServerError + } + } + status.Kind = "Status" + status.APIVersion = "v1" + //TODO: check for invalid responses + return &status + default: + status := http.StatusInternalServerError + switch { + //TODO: replace me with NewConflictErr + case storage.IsConflict(err): + status = http.StatusConflict + } + // Log errors that were not converted to an error status + // by REST storage - these typically indicate programmer + // error by not using pkg/api/errors, or unexpected failure + // cases. + runtime.HandleError(fmt.Errorf("apiserver received an error that is not an metav1.Status: %#+v", err)) + return &metav1.Status{ + TypeMeta: metav1.TypeMeta{ + Kind: "Status", + APIVersion: "v1", + }, + Status: metav1.StatusFailure, + Code: int32(status), + Reason: metav1.StatusReasonUnknown, + Message: err.Error(), + } + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go new file mode 100644 index 000000000..65cb389e5 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go @@ -0,0 +1,286 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package responsewriters + +import ( + "compress/gzip" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "strconv" + "strings" + "sync" + + "k8s.io/apiserver/pkg/features" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/metrics" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/registry/rest" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/apiserver/pkg/util/flushwriter" + "k8s.io/apiserver/pkg/util/wsstream" +) + +// StreamObject performs input stream negotiation from a ResourceStreamer and writes that to the response. +// If the client requests a websocket upgrade, negotiate for a websocket reader protocol (because many +// browser clients cannot easily handle binary streaming protocols). +func StreamObject(statusCode int, gv schema.GroupVersion, s runtime.NegotiatedSerializer, stream rest.ResourceStreamer, w http.ResponseWriter, req *http.Request) { + out, flush, contentType, err := stream.InputStream(req.Context(), gv.String(), req.Header.Get("Accept")) + if err != nil { + ErrorNegotiated(err, s, gv, w, req) + return + } + if out == nil { + // No output provided - return StatusNoContent + w.WriteHeader(http.StatusNoContent) + return + } + defer out.Close() + + if wsstream.IsWebSocketRequest(req) { + r := wsstream.NewReader(out, true, wsstream.NewDefaultReaderProtocols()) + if err := r.Copy(w, req); err != nil { + utilruntime.HandleError(fmt.Errorf("error encountered while streaming results via websocket: %v", err)) + } + return + } + + if len(contentType) == 0 { + contentType = "application/octet-stream" + } + w.Header().Set("Content-Type", contentType) + w.WriteHeader(statusCode) + // Flush headers, if possible + if flusher, ok := w.(http.Flusher); ok { + flusher.Flush() + } + writer := w.(io.Writer) + if flush { + writer = flushwriter.Wrap(w) + } + io.Copy(writer, out) +} + +// SerializeObject renders an object in the content type negotiated by the client using the provided encoder. +// The context is optional and can be nil. This method will perform optional content compression if requested by +// a client and the feature gate for APIResponseCompression is enabled. +func SerializeObject(mediaType string, encoder runtime.Encoder, hw http.ResponseWriter, req *http.Request, statusCode int, object runtime.Object) { + w := &deferredResponseWriter{ + mediaType: mediaType, + statusCode: statusCode, + contentEncoding: negotiateContentEncoding(req), + hw: hw, + } + + err := encoder.Encode(object, w) + if err == nil { + err = w.Close() + if err != nil { + // we cannot write an error to the writer anymore as the Encode call was successful. + utilruntime.HandleError(fmt.Errorf("apiserver was unable to close cleanly the response writer: %v", err)) + } + return + } + + // make a best effort to write the object if a failure is detected + utilruntime.HandleError(fmt.Errorf("apiserver was unable to write a JSON response: %v", err)) + status := ErrorToAPIStatus(err) + candidateStatusCode := int(status.Code) + // if the current status code is successful, allow the error's status code to overwrite it + if statusCode >= http.StatusOK && statusCode < http.StatusBadRequest { + w.statusCode = candidateStatusCode + } + output, err := runtime.Encode(encoder, status) + if err != nil { + w.mediaType = "text/plain" + output = []byte(fmt.Sprintf("%s: %s", status.Reason, status.Message)) + } + if _, err := w.Write(output); err != nil { + utilruntime.HandleError(fmt.Errorf("apiserver was unable to write a fallback JSON response: %v", err)) + } + w.Close() +} + +var gzipPool = &sync.Pool{ + New: func() interface{} { + gw, err := gzip.NewWriterLevel(nil, defaultGzipContentEncodingLevel) + if err != nil { + panic(err) + } + return gw + }, +} + +const ( + // defaultGzipContentEncodingLevel is set to 4 which uses less CPU than the default level + defaultGzipContentEncodingLevel = 4 + // defaultGzipThresholdBytes is compared to the size of the first write from the stream + // (usually the entire object), and if the size is smaller no gzipping will be performed + // if the client requests it. + defaultGzipThresholdBytes = 128 * 1024 +) + +// negotiateContentEncoding returns a supported client-requested content encoding for the +// provided request. It will return the empty string if no supported content encoding was +// found or if response compression is disabled. +func negotiateContentEncoding(req *http.Request) string { + encoding := req.Header.Get("Accept-Encoding") + if len(encoding) == 0 { + return "" + } + if !utilfeature.DefaultFeatureGate.Enabled(features.APIResponseCompression) { + return "" + } + for len(encoding) > 0 { + var token string + if next := strings.Index(encoding, ","); next != -1 { + token = encoding[:next] + encoding = encoding[next+1:] + } else { + token = encoding + encoding = "" + } + switch strings.TrimSpace(token) { + case "gzip": + return "gzip" + } + } + return "" +} + +type deferredResponseWriter struct { + mediaType string + statusCode int + contentEncoding string + + hasWritten bool + hw http.ResponseWriter + w io.Writer +} + +func (w *deferredResponseWriter) Write(p []byte) (n int, err error) { + if w.hasWritten { + return w.w.Write(p) + } + w.hasWritten = true + + hw := w.hw + header := hw.Header() + switch { + case w.contentEncoding == "gzip" && len(p) > defaultGzipThresholdBytes: + header.Set("Content-Encoding", "gzip") + header.Add("Vary", "Accept-Encoding") + + gw := gzipPool.Get().(*gzip.Writer) + gw.Reset(hw) + + w.w = gw + default: + w.w = hw + } + + header.Set("Content-Type", w.mediaType) + hw.WriteHeader(w.statusCode) + return w.w.Write(p) +} + +func (w *deferredResponseWriter) Close() error { + if !w.hasWritten { + return nil + } + var err error + switch t := w.w.(type) { + case *gzip.Writer: + err = t.Close() + t.Reset(nil) + gzipPool.Put(t) + } + return err +} + +var nopCloser = ioutil.NopCloser(nil) + +// WriteObjectNegotiated renders an object in the content type negotiated by the client. +func WriteObjectNegotiated(s runtime.NegotiatedSerializer, restrictions negotiation.EndpointRestrictions, gv schema.GroupVersion, w http.ResponseWriter, req *http.Request, statusCode int, object runtime.Object) { + stream, ok := object.(rest.ResourceStreamer) + if ok { + requestInfo, _ := request.RequestInfoFrom(req.Context()) + metrics.RecordLongRunning(req, requestInfo, metrics.APIServerComponent, func() { + StreamObject(statusCode, gv, s, stream, w, req) + }) + return + } + + _, serializer, err := negotiation.NegotiateOutputMediaType(req, s, restrictions) + if err != nil { + // if original statusCode was not successful we need to return the original error + // we cannot hide it behind negotiation problems + if statusCode < http.StatusOK || statusCode >= http.StatusBadRequest { + WriteRawJSON(int(statusCode), object, w) + return + } + status := ErrorToAPIStatus(err) + WriteRawJSON(int(status.Code), status, w) + return + } + + if ae := request.AuditEventFrom(req.Context()); ae != nil { + audit.LogResponseObject(ae, object, gv, s) + } + + encoder := s.EncoderForVersion(serializer.Serializer, gv) + SerializeObject(serializer.MediaType, encoder, w, req, statusCode, object) +} + +// ErrorNegotiated renders an error to the response. Returns the HTTP status code of the error. +// The context is optional and may be nil. +func ErrorNegotiated(err error, s runtime.NegotiatedSerializer, gv schema.GroupVersion, w http.ResponseWriter, req *http.Request) int { + status := ErrorToAPIStatus(err) + code := int(status.Code) + // when writing an error, check to see if the status indicates a retry after period + if status.Details != nil && status.Details.RetryAfterSeconds > 0 { + delay := strconv.Itoa(int(status.Details.RetryAfterSeconds)) + w.Header().Set("Retry-After", delay) + } + + if code == http.StatusNoContent { + w.WriteHeader(code) + return code + } + + WriteObjectNegotiated(s, negotiation.DefaultEndpointRestrictions, gv, w, req, code, status) + return code +} + +// WriteRawJSON writes a non-API object in JSON. +func WriteRawJSON(statusCode int, object interface{}, w http.ResponseWriter) { + output, err := json.MarshalIndent(object, "", " ") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(statusCode) + w.Write(output) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go new file mode 100644 index 000000000..01396f271 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go @@ -0,0 +1,529 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "context" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + goruntime "runtime" + "strings" + "time" + + grpccodes "google.golang.org/grpc/codes" + grpcstatus "google.golang.org/grpc/status" + + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + "k8s.io/apiserver/pkg/endpoints/metrics" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/registry/rest" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/apiserver/pkg/warning" + "k8s.io/klog/v2" +) + +const ( + // DuplicateOwnerReferencesWarningFormat is the warning that a client receives when a create/update request contains + // duplicate owner reference entries. + DuplicateOwnerReferencesWarningFormat = ".metadata.ownerReferences contains duplicate entries; API server dedups owner references in 1.20+, and may reject such requests as early as 1.24; please fix your requests; duplicate UID(s) observed: %v" + // DuplicateOwnerReferencesAfterMutatingAdmissionWarningFormat indicates the duplication was observed + // after mutating admission. + // NOTE: For CREATE and UPDATE requests the API server dedups both before and after mutating admission. + // For PATCH request the API server only dedups after mutating admission. + DuplicateOwnerReferencesAfterMutatingAdmissionWarningFormat = ".metadata.ownerReferences contains duplicate entries after mutating admission happens; API server dedups owner references in 1.20+, and may reject such requests as early as 1.24; please fix your requests; duplicate UID(s) observed: %v" +) + +// RequestScope encapsulates common fields across all RESTful handler methods. +type RequestScope struct { + Namer ScopeNamer + + Serializer runtime.NegotiatedSerializer + runtime.ParameterCodec + + // StandardSerializers, if set, restricts which serializers can be used when + // we aren't transforming the output (into Table or PartialObjectMetadata). + // Used only by CRDs which do not yet support Protobuf. + StandardSerializers []runtime.SerializerInfo + + Creater runtime.ObjectCreater + Convertor runtime.ObjectConvertor + Defaulter runtime.ObjectDefaulter + Typer runtime.ObjectTyper + UnsafeConvertor runtime.ObjectConvertor + Authorizer authorizer.Authorizer + + EquivalentResourceMapper runtime.EquivalentResourceMapper + + TableConvertor rest.TableConvertor + FieldManager *fieldmanager.FieldManager + + Resource schema.GroupVersionResource + Kind schema.GroupVersionKind + Subresource string + + MetaGroupVersion schema.GroupVersion + + // HubGroupVersion indicates what version objects read from etcd or incoming requests should be converted to for in-memory handling. + HubGroupVersion schema.GroupVersion + + MaxRequestBodyBytes int64 +} + +func (scope *RequestScope) err(err error, w http.ResponseWriter, req *http.Request) { + responsewriters.ErrorNegotiated(err, scope.Serializer, scope.Kind.GroupVersion(), w, req) +} + +func (scope *RequestScope) AllowsMediaTypeTransform(mimeType, mimeSubType string, gvk *schema.GroupVersionKind) bool { + // some handlers like CRDs can't serve all the mime types that PartialObjectMetadata or Table can - if + // gvk is nil (no conversion) allow StandardSerializers to further restrict the set of mime types. + if gvk == nil { + if len(scope.StandardSerializers) == 0 { + return true + } + for _, info := range scope.StandardSerializers { + if info.MediaTypeType == mimeType && info.MediaTypeSubType == mimeSubType { + return true + } + } + return false + } + + // TODO: this is temporary, replace with an abstraction calculated at endpoint installation time + if gvk.GroupVersion() == metav1beta1.SchemeGroupVersion || gvk.GroupVersion() == metav1.SchemeGroupVersion { + switch gvk.Kind { + case "Table": + return scope.TableConvertor != nil && + mimeType == "application" && + (mimeSubType == "json" || mimeSubType == "yaml") + case "PartialObjectMetadata", "PartialObjectMetadataList": + // TODO: should delineate between lists and non-list endpoints + return true + default: + return false + } + } + return false +} + +func (scope *RequestScope) AllowsServerVersion(version string) bool { + return version == scope.MetaGroupVersion.Version +} + +func (scope *RequestScope) AllowsStreamSchema(s string) bool { + return s == "watch" +} + +var _ admission.ObjectInterfaces = &RequestScope{} + +func (r *RequestScope) GetObjectCreater() runtime.ObjectCreater { return r.Creater } +func (r *RequestScope) GetObjectTyper() runtime.ObjectTyper { return r.Typer } +func (r *RequestScope) GetObjectDefaulter() runtime.ObjectDefaulter { return r.Defaulter } +func (r *RequestScope) GetObjectConvertor() runtime.ObjectConvertor { return r.Convertor } +func (r *RequestScope) GetEquivalentResourceMapper() runtime.EquivalentResourceMapper { + return r.EquivalentResourceMapper +} + +// ConnectResource returns a function that handles a connect request on a rest.Storage object. +func ConnectResource(connecter rest.Connecter, scope *RequestScope, admit admission.Interface, restPath string, isSubresource bool) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + if isDryRun(req.URL) { + scope.err(errors.NewBadRequest("dryRun is not supported"), w, req) + return + } + + namespace, name, err := scope.Namer.Name(req) + if err != nil { + scope.err(err, w, req) + return + } + ctx := req.Context() + ctx = request.WithNamespace(ctx, namespace) + ae := request.AuditEventFrom(ctx) + admit = admission.WithAudit(admit, ae) + + opts, subpath, subpathKey := connecter.NewConnectOptions() + if err := getRequestOptions(req, scope, opts, subpath, subpathKey, isSubresource); err != nil { + err = errors.NewBadRequest(err.Error()) + scope.err(err, w, req) + return + } + if admit != nil && admit.Handles(admission.Connect) { + userInfo, _ := request.UserFrom(ctx) + // TODO: remove the mutating admission here as soon as we have ported all plugin that handle CONNECT + if mutatingAdmission, ok := admit.(admission.MutationInterface); ok { + err = mutatingAdmission.Admit(ctx, admission.NewAttributesRecord(opts, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Connect, nil, false, userInfo), scope) + if err != nil { + scope.err(err, w, req) + return + } + } + if validatingAdmission, ok := admit.(admission.ValidationInterface); ok { + err = validatingAdmission.Validate(ctx, admission.NewAttributesRecord(opts, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Connect, nil, false, userInfo), scope) + if err != nil { + scope.err(err, w, req) + return + } + } + } + requestInfo, _ := request.RequestInfoFrom(ctx) + metrics.RecordLongRunning(req, requestInfo, metrics.APIServerComponent, func() { + handler, err := connecter.Connect(ctx, name, opts, &responder{scope: scope, req: req, w: w}) + if err != nil { + scope.err(err, w, req) + return + } + handler.ServeHTTP(w, req) + }) + } +} + +// responder implements rest.Responder for assisting a connector in writing objects or errors. +type responder struct { + scope *RequestScope + req *http.Request + w http.ResponseWriter +} + +func (r *responder) Object(statusCode int, obj runtime.Object) { + responsewriters.WriteObjectNegotiated(r.scope.Serializer, r.scope, r.scope.Kind.GroupVersion(), r.w, r.req, statusCode, obj) +} + +func (r *responder) Error(err error) { + r.scope.err(err, r.w, r.req) +} + +// resultFunc is a function that returns a rest result and can be run in a goroutine +type resultFunc func() (runtime.Object, error) + +// finishRequest makes a given resultFunc asynchronous and handles errors returned by the response. +// An api.Status object with status != success is considered an "error", which interrupts the normal response flow. +func finishRequest(timeout time.Duration, fn resultFunc) (result runtime.Object, err error) { + // these channels need to be buffered to prevent the goroutine below from hanging indefinitely + // when the select statement reads something other than the one the goroutine sends on. + ch := make(chan runtime.Object, 1) + errCh := make(chan error, 1) + panicCh := make(chan interface{}, 1) + go func() { + // panics don't cross goroutine boundaries, so we have to handle ourselves + defer func() { + panicReason := recover() + if panicReason != nil { + // do not wrap the sentinel ErrAbortHandler panic value + if panicReason != http.ErrAbortHandler { + // Same as stdlib http server code. Manually allocate stack + // trace buffer size to prevent excessively large logs + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:goruntime.Stack(buf, false)] + panicReason = fmt.Sprintf("%v\n%s", panicReason, buf) + } + // Propagate to parent goroutine + panicCh <- panicReason + } + }() + + if result, err := fn(); err != nil { + errCh <- err + } else { + ch <- result + } + }() + + select { + case result = <-ch: + if status, ok := result.(*metav1.Status); ok { + if status.Status != metav1.StatusSuccess { + return nil, errors.FromObject(status) + } + } + return result, nil + case err = <-errCh: + return nil, err + case p := <-panicCh: + panic(p) + case <-time.After(timeout): + return nil, errors.NewTimeoutError(fmt.Sprintf("request did not complete within requested timeout %s", timeout), 0) + } +} + +// transformDecodeError adds additional information into a bad-request api error when a decode fails. +func transformDecodeError(typer runtime.ObjectTyper, baseErr error, into runtime.Object, gvk *schema.GroupVersionKind, body []byte) error { + objGVKs, _, err := typer.ObjectKinds(into) + if err != nil { + return errors.NewBadRequest(err.Error()) + } + objGVK := objGVKs[0] + if gvk != nil && len(gvk.Kind) > 0 { + return errors.NewBadRequest(fmt.Sprintf("%s in version %q cannot be handled as a %s: %v", gvk.Kind, gvk.Version, objGVK.Kind, baseErr)) + } + summary := summarizeData(body, 30) + return errors.NewBadRequest(fmt.Sprintf("the object provided is unrecognized (must be of type %s): %v (%s)", objGVK.Kind, baseErr, summary)) +} + +// setSelfLink sets the self link of an object (or the child items in a list) to the base URL of the request +// plus the path and query generated by the provided linkFunc +func setSelfLink(obj runtime.Object, requestInfo *request.RequestInfo, namer ScopeNamer) error { + // TODO: SelfLink generation should return a full URL? + uri, err := namer.GenerateLink(requestInfo, obj) + if err != nil { + return nil + } + + return namer.SetSelfLink(obj, uri) +} + +func hasUID(obj runtime.Object) (bool, error) { + if obj == nil { + return false, nil + } + accessor, err := meta.Accessor(obj) + if err != nil { + return false, errors.NewInternalError(err) + } + if len(accessor.GetUID()) == 0 { + return false, nil + } + return true, nil +} + +// checkName checks the provided name against the request +func checkName(obj runtime.Object, name, namespace string, namer ScopeNamer) error { + objNamespace, objName, err := namer.ObjectName(obj) + if err != nil { + return errors.NewBadRequest(fmt.Sprintf( + "the name of the object (%s based on URL) was undeterminable: %v", name, err)) + } + if objName != name { + return errors.NewBadRequest(fmt.Sprintf( + "the name of the object (%s) does not match the name on the URL (%s)", objName, name)) + } + if len(namespace) > 0 { + if len(objNamespace) > 0 && objNamespace != namespace { + return errors.NewBadRequest(fmt.Sprintf( + "the namespace of the object (%s) does not match the namespace on the request (%s)", objNamespace, namespace)) + } + } + + return nil +} + +// dedupOwnerReferences dedups owner references over the entire entry. +// NOTE: We don't know enough about the existing cases of owner references +// sharing the same UID but different fields. Nor do we know what might break. +// In the future we may just dedup/reject owner references with the same UID. +func dedupOwnerReferences(refs []metav1.OwnerReference) ([]metav1.OwnerReference, []string) { + var result []metav1.OwnerReference + var duplicates []string + seen := make(map[types.UID]struct{}) + for _, ref := range refs { + _, ok := seen[ref.UID] + // Short-circuit if we haven't seen the UID before. Otherwise + // check the entire list we have so far. + if !ok || !hasOwnerReference(result, ref) { + seen[ref.UID] = struct{}{} + result = append(result, ref) + } else { + duplicates = append(duplicates, string(ref.UID)) + } + } + return result, duplicates +} + +// hasOwnerReference returns true if refs has an item equal to ref. The function +// focuses on semantic equality instead of memory equality, to catch duplicates +// with different pointer addresses. The function uses apiequality.Semantic +// instead of implementing its own comparison, to tolerate API changes to +// metav1.OwnerReference. +// NOTE: This is expensive, but we accept it because we've made sure it only +// happens to owner references containing duplicate UIDs, plus typically the +// number of items in the list should be small. +func hasOwnerReference(refs []metav1.OwnerReference, ref metav1.OwnerReference) bool { + for _, r := range refs { + if apiequality.Semantic.DeepEqual(r, ref) { + return true + } + } + return false +} + +// dedupOwnerReferencesAndAddWarning dedups owner references in the object metadata. +// If duplicates are found, the function records a warning to the provided context. +func dedupOwnerReferencesAndAddWarning(obj runtime.Object, requestContext context.Context, afterMutatingAdmission bool) { + accessor, err := meta.Accessor(obj) + if err != nil { + // The object doesn't have metadata. Nothing we need to do here. + return + } + refs := accessor.GetOwnerReferences() + deduped, duplicates := dedupOwnerReferences(refs) + if len(duplicates) > 0 { + // NOTE: For CREATE and UPDATE requests the API server dedups both before and after mutating admission. + // For PATCH request the API server only dedups after mutating admission. + format := DuplicateOwnerReferencesWarningFormat + if afterMutatingAdmission { + format = DuplicateOwnerReferencesAfterMutatingAdmissionWarningFormat + } + warning.AddWarning(requestContext, "", fmt.Sprintf(format, + strings.Join(duplicates, ", "))) + accessor.SetOwnerReferences(deduped) + } +} + +// setObjectSelfLink sets the self link of an object as needed. +// TODO: remove the need for the namer LinkSetters by requiring objects implement either Object or List +// interfaces +func setObjectSelfLink(ctx context.Context, obj runtime.Object, req *http.Request, namer ScopeNamer) error { + if utilfeature.DefaultFeatureGate.Enabled(features.RemoveSelfLink) { + // Ensure that for empty lists we don't return items. + if meta.IsListType(obj) && meta.LenList(obj) == 0 { + if err := meta.SetList(obj, []runtime.Object{}); err != nil { + return err + } + } + return nil + } + + // We only generate list links on objects that implement ListInterface - historically we duck typed this + // check via reflection, but as we move away from reflection we require that you not only carry Items but + // ListMeta into order to be identified as a list. + if !meta.IsListType(obj) { + requestInfo, ok := request.RequestInfoFrom(ctx) + if !ok { + return fmt.Errorf("missing requestInfo") + } + return setSelfLink(obj, requestInfo, namer) + } + + uri, err := namer.GenerateListLink(req) + if err != nil { + return err + } + if err := namer.SetSelfLink(obj, uri); err != nil { + klog.V(4).Infof("Unable to set self link on object: %v", err) + } + requestInfo, ok := request.RequestInfoFrom(ctx) + if !ok { + return fmt.Errorf("missing requestInfo") + } + + count := 0 + err = meta.EachListItem(obj, func(obj runtime.Object) error { + count++ + return setSelfLink(obj, requestInfo, namer) + }) + + if count == 0 { + if err := meta.SetList(obj, []runtime.Object{}); err != nil { + return err + } + } + + return err +} + +func summarizeData(data []byte, maxLength int) string { + switch { + case len(data) == 0: + return "" + case data[0] == '{': + if len(data) > maxLength { + return string(data[:maxLength]) + " ..." + } + return string(data) + default: + if len(data) > maxLength { + return hex.EncodeToString(data[:maxLength]) + " ..." + } + return hex.EncodeToString(data) + } +} + +func limitedReadBody(req *http.Request, limit int64) ([]byte, error) { + defer req.Body.Close() + if limit <= 0 { + return ioutil.ReadAll(req.Body) + } + lr := &io.LimitedReader{ + R: req.Body, + N: limit + 1, + } + data, err := ioutil.ReadAll(lr) + if err != nil { + return nil, err + } + if lr.N <= 0 { + return nil, errors.NewRequestEntityTooLargeError(fmt.Sprintf("limit is %d", limit)) + } + return data, nil +} + +func parseTimeout(str string) time.Duration { + if str != "" { + timeout, err := time.ParseDuration(str) + if err == nil { + return timeout + } + klog.Errorf("Failed to parse %q: %v", str, err) + } + // 34 chose as a number close to 30 that is likely to be unique enough to jump out at me the next time I see a timeout. Everyone chooses 30. + return 34 * time.Second +} + +func isDryRun(url *url.URL) bool { + return len(url.Query()["dryRun"]) != 0 +} + +type etcdError interface { + Code() grpccodes.Code + Error() string +} + +type grpcError interface { + GRPCStatus() *grpcstatus.Status +} + +func isTooLargeError(err error) bool { + if err != nil { + if etcdErr, ok := err.(etcdError); ok { + if etcdErr.Code() == grpccodes.InvalidArgument && etcdErr.Error() == "etcdserver: request is too large" { + return true + } + } + if grpcErr, ok := err.(grpcError); ok { + if grpcErr.GRPCStatus().Code() == grpccodes.ResourceExhausted && strings.Contains(grpcErr.GRPCStatus().Message(), "trying to send message larger than max") { + return true + } + } + } + return false +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go new file mode 100644 index 000000000..f0473323f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go @@ -0,0 +1,269 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "context" + "fmt" + "net/http" + "sync" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/apiserver/pkg/util/dryrun" + utilfeature "k8s.io/apiserver/pkg/util/feature" + utiltrace "k8s.io/utils/trace" +) + +// UpdateResource returns a function that will handle a resource update +func UpdateResource(r rest.Updater, scope *RequestScope, admit admission.Interface) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + // For performance tracking purposes. + trace := utiltrace.New("Update", utiltrace.Field{Key: "url", Value: req.URL.Path}, utiltrace.Field{Key: "user-agent", Value: &lazyTruncatedUserAgent{req}}, utiltrace.Field{Key: "client", Value: &lazyClientIP{req}}) + defer trace.LogIfLong(500 * time.Millisecond) + + if isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) { + scope.err(errors.NewBadRequest("the dryRun feature is disabled"), w, req) + return + } + + // TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer) + timeout := parseTimeout(req.URL.Query().Get("timeout")) + + namespace, name, err := scope.Namer.Name(req) + if err != nil { + scope.err(err, w, req) + return + } + ctx, cancel := context.WithTimeout(req.Context(), timeout) + defer cancel() + ctx = request.WithNamespace(ctx, namespace) + + outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) + if err != nil { + scope.err(err, w, req) + return + } + + body, err := limitedReadBody(req, scope.MaxRequestBodyBytes) + if err != nil { + scope.err(err, w, req) + return + } + + options := &metav1.UpdateOptions{} + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { + err = errors.NewBadRequest(err.Error()) + scope.err(err, w, req) + return + } + if errs := validation.ValidateUpdateOptions(options); len(errs) > 0 { + err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "UpdateOptions"}, "", errs) + scope.err(err, w, req) + return + } + options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("UpdateOptions")) + + s, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer) + if err != nil { + scope.err(err, w, req) + return + } + defaultGVK := scope.Kind + original := r.New() + + trace.Step("About to convert to expected version") + decoder := scope.Serializer.DecoderToVersion(s.Serializer, scope.HubGroupVersion) + obj, gvk, err := decoder.Decode(body, &defaultGVK, original) + if err != nil { + err = transformDecodeError(scope.Typer, err, original, gvk, body) + scope.err(err, w, req) + return + } + if gvk.GroupVersion() != defaultGVK.GroupVersion() { + err = errors.NewBadRequest(fmt.Sprintf("the API version in the data (%s) does not match the expected API version (%s)", gvk.GroupVersion(), defaultGVK.GroupVersion())) + scope.err(err, w, req) + return + } + trace.Step("Conversion done") + + ae := request.AuditEventFrom(ctx) + audit.LogRequestObject(ae, obj, scope.Resource, scope.Subresource, scope.Serializer) + admit = admission.WithAudit(admit, ae) + + if err := checkName(obj, name, namespace, scope.Namer); err != nil { + scope.err(err, w, req) + return + } + + userInfo, _ := request.UserFrom(ctx) + transformers := []rest.TransformFunc{} + + // allows skipping managedFields update if the resulting object is too big + shouldUpdateManagedFields := true + if scope.FieldManager != nil { + transformers = append(transformers, func(_ context.Context, newObj, liveObj runtime.Object) (runtime.Object, error) { + if shouldUpdateManagedFields { + return scope.FieldManager.UpdateNoErrors(liveObj, newObj, managerOrUserAgent(options.FieldManager, req.UserAgent())), nil + } + return newObj, nil + }) + } + + if mutatingAdmission, ok := admit.(admission.MutationInterface); ok { + transformers = append(transformers, func(ctx context.Context, newObj, oldObj runtime.Object) (runtime.Object, error) { + isNotZeroObject, err := hasUID(oldObj) + if err != nil { + return nil, fmt.Errorf("unexpected error when extracting UID from oldObj: %v", err.Error()) + } else if !isNotZeroObject { + if mutatingAdmission.Handles(admission.Create) { + return newObj, mutatingAdmission.Admit(ctx, admission.NewAttributesRecord(newObj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, updateToCreateOptions(options), dryrun.IsDryRun(options.DryRun), userInfo), scope) + } + } else { + if mutatingAdmission.Handles(admission.Update) { + return newObj, mutatingAdmission.Admit(ctx, admission.NewAttributesRecord(newObj, oldObj, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, options, dryrun.IsDryRun(options.DryRun), userInfo), scope) + } + } + return newObj, nil + }) + transformers = append(transformers, func(ctx context.Context, newObj, oldObj runtime.Object) (runtime.Object, error) { + // Dedup owner references again after mutating admission happens + dedupOwnerReferencesAndAddWarning(newObj, req.Context(), true) + return newObj, nil + }) + } + + createAuthorizerAttributes := authorizer.AttributesRecord{ + User: userInfo, + ResourceRequest: true, + Path: req.URL.Path, + Verb: "create", + APIGroup: scope.Resource.Group, + APIVersion: scope.Resource.Version, + Resource: scope.Resource.Resource, + Subresource: scope.Subresource, + Namespace: namespace, + Name: name, + } + + trace.Step("About to store object in database") + wasCreated := false + requestFunc := func() (runtime.Object, error) { + obj, created, err := r.Update( + ctx, + name, + rest.DefaultUpdatedObjectInfo(obj, transformers...), + withAuthorization(rest.AdmissionToValidateObjectFunc( + admit, + admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, updateToCreateOptions(options), dryrun.IsDryRun(options.DryRun), userInfo), scope), + scope.Authorizer, createAuthorizerAttributes), + rest.AdmissionToValidateObjectUpdateFunc( + admit, + admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, options, dryrun.IsDryRun(options.DryRun), userInfo), scope), + false, + options, + ) + wasCreated = created + return obj, err + } + // Dedup owner references before updating managed fields + dedupOwnerReferencesAndAddWarning(obj, req.Context(), false) + result, err := finishRequest(timeout, func() (runtime.Object, error) { + result, err := requestFunc() + // If the object wasn't committed to storage because it's serialized size was too large, + // it is safe to remove managedFields (which can be large) and try again. + if isTooLargeError(err) && scope.FieldManager != nil { + if accessor, accessorErr := meta.Accessor(obj); accessorErr == nil { + accessor.SetManagedFields(nil) + shouldUpdateManagedFields = false + result, err = requestFunc() + } + } + return result, err + }) + if err != nil { + scope.err(err, w, req) + return + } + trace.Step("Object stored in database") + + status := http.StatusOK + if wasCreated { + status = http.StatusCreated + } + + transformResponseObject(ctx, scope, trace, req, w, status, outputMediaType, result) + } +} + +func withAuthorization(validate rest.ValidateObjectFunc, a authorizer.Authorizer, attributes authorizer.Attributes) rest.ValidateObjectFunc { + var once sync.Once + var authorizerDecision authorizer.Decision + var authorizerReason string + var authorizerErr error + return func(ctx context.Context, obj runtime.Object) error { + if a == nil { + return errors.NewInternalError(fmt.Errorf("no authorizer provided, unable to authorize a create on update")) + } + once.Do(func() { + authorizerDecision, authorizerReason, authorizerErr = a.Authorize(ctx, attributes) + }) + // an authorizer like RBAC could encounter evaluation errors and still allow the request, so authorizer decision is checked before error here. + if authorizerDecision == authorizer.DecisionAllow { + // Continue to validating admission + return validate(ctx, obj) + } + if authorizerErr != nil { + return errors.NewInternalError(authorizerErr) + } + + // The user is not authorized to perform this action, so we need to build the error response + gr := schema.GroupResource{ + Group: attributes.GetAPIGroup(), + Resource: attributes.GetResource(), + } + name := attributes.GetName() + err := fmt.Errorf("%v", authorizerReason) + return errors.NewForbidden(gr, name, err) + } +} + +// updateToCreateOptions creates a CreateOptions with the same field values as the provided UpdateOptions. +func updateToCreateOptions(uo *metav1.UpdateOptions) *metav1.CreateOptions { + if uo == nil { + return nil + } + co := &metav1.CreateOptions{ + DryRun: uo.DryRun, + FieldManager: uo.FieldManager, + } + co.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("CreateOptions")) + return co +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go new file mode 100644 index 000000000..22945ccf6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go @@ -0,0 +1,337 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "bytes" + "fmt" + "net/http" + "reflect" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer/streaming" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/metrics" + "k8s.io/apiserver/pkg/server/httplog" + "k8s.io/apiserver/pkg/util/wsstream" + + "golang.org/x/net/websocket" +) + +// nothing will ever be sent down this channel +var neverExitWatch <-chan time.Time = make(chan time.Time) + +// timeoutFactory abstracts watch timeout logic for testing +type TimeoutFactory interface { + TimeoutCh() (<-chan time.Time, func() bool) +} + +// realTimeoutFactory implements timeoutFactory +type realTimeoutFactory struct { + timeout time.Duration +} + +// TimeoutCh returns a channel which will receive something when the watch times out, +// and a cleanup function to call when this happens. +func (w *realTimeoutFactory) TimeoutCh() (<-chan time.Time, func() bool) { + if w.timeout == 0 { + return neverExitWatch, func() bool { return false } + } + t := time.NewTimer(w.timeout) + return t.C, t.Stop +} + +// serveWatch will serve a watch response. +// TODO: the functionality in this method and in WatchServer.Serve is not cleanly decoupled. +func serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions negotiation.MediaTypeOptions, req *http.Request, w http.ResponseWriter, timeout time.Duration) { + defer watcher.Stop() + + options, err := optionsForTransform(mediaTypeOptions, req) + if err != nil { + scope.err(err, w, req) + return + } + + // negotiate for the stream serializer from the scope's serializer + serializer, err := negotiation.NegotiateOutputMediaTypeStream(req, scope.Serializer, scope) + if err != nil { + scope.err(err, w, req) + return + } + framer := serializer.StreamSerializer.Framer + streamSerializer := serializer.StreamSerializer.Serializer + encoder := scope.Serializer.EncoderForVersion(streamSerializer, scope.Kind.GroupVersion()) + useTextFraming := serializer.EncodesAsText + if framer == nil { + scope.err(fmt.Errorf("no framer defined for %q available for embedded encoding", serializer.MediaType), w, req) + return + } + // TODO: next step, get back mediaTypeOptions from negotiate and return the exact value here + mediaType := serializer.MediaType + if mediaType != runtime.ContentTypeJSON { + mediaType += ";stream=watch" + } + + // locate the appropriate embedded encoder based on the transform + var embeddedEncoder runtime.Encoder + contentKind, contentSerializer, transform := targetEncodingForTransform(scope, mediaTypeOptions, req) + if transform { + info, ok := runtime.SerializerInfoForMediaType(contentSerializer.SupportedMediaTypes(), serializer.MediaType) + if !ok { + scope.err(fmt.Errorf("no encoder for %q exists in the requested target %#v", serializer.MediaType, contentSerializer), w, req) + return + } + embeddedEncoder = contentSerializer.EncoderForVersion(info.Serializer, contentKind.GroupVersion()) + } else { + embeddedEncoder = scope.Serializer.EncoderForVersion(serializer.Serializer, contentKind.GroupVersion()) + } + + ctx := req.Context() + + server := &WatchServer{ + Watching: watcher, + Scope: scope, + + UseTextFraming: useTextFraming, + MediaType: mediaType, + Framer: framer, + Encoder: encoder, + EmbeddedEncoder: embeddedEncoder, + + Fixup: func(obj runtime.Object) runtime.Object { + result, err := transformObject(ctx, obj, options, mediaTypeOptions, scope, req) + if err != nil { + utilruntime.HandleError(fmt.Errorf("failed to transform object %v: %v", reflect.TypeOf(obj), err)) + return obj + } + // When we are transformed to a table, use the table options as the state for whether we + // should print headers - on watch, we only want to print table headers on the first object + // and omit them on subsequent events. + if tableOptions, ok := options.(*metav1.TableOptions); ok { + tableOptions.NoHeaders = true + } + return result + }, + + TimeoutFactory: &realTimeoutFactory{timeout}, + } + + server.ServeHTTP(w, req) +} + +// WatchServer serves a watch.Interface over a websocket or vanilla HTTP. +type WatchServer struct { + Watching watch.Interface + Scope *RequestScope + + // true if websocket messages should use text framing (as opposed to binary framing) + UseTextFraming bool + // the media type this watch is being served with + MediaType string + // used to frame the watch stream + Framer runtime.Framer + // used to encode the watch stream event itself + Encoder runtime.Encoder + // used to encode the nested object in the watch stream + EmbeddedEncoder runtime.Encoder + // used to correct the object before we send it to the serializer + Fixup func(runtime.Object) runtime.Object + + TimeoutFactory TimeoutFactory +} + +// ServeHTTP serves a series of encoded events via HTTP with Transfer-Encoding: chunked +// or over a websocket connection. +func (s *WatchServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { + kind := s.Scope.Kind + metrics.RegisteredWatchers.WithLabelValues(kind.Group, kind.Version, kind.Kind).Inc() + defer metrics.RegisteredWatchers.WithLabelValues(kind.Group, kind.Version, kind.Kind).Dec() + + w = httplog.Unlogged(req, w) + + if wsstream.IsWebSocketRequest(req) { + w.Header().Set("Content-Type", s.MediaType) + websocket.Handler(s.HandleWS).ServeHTTP(w, req) + return + } + + flusher, ok := w.(http.Flusher) + if !ok { + err := fmt.Errorf("unable to start watch - can't get http.Flusher: %#v", w) + utilruntime.HandleError(err) + s.Scope.err(errors.NewInternalError(err), w, req) + return + } + + framer := s.Framer.NewFrameWriter(w) + if framer == nil { + // programmer error + err := fmt.Errorf("no stream framing support is available for media type %q", s.MediaType) + utilruntime.HandleError(err) + s.Scope.err(errors.NewBadRequest(err.Error()), w, req) + return + } + e := streaming.NewEncoder(framer, s.Encoder) + + // ensure the connection times out + timeoutCh, cleanup := s.TimeoutFactory.TimeoutCh() + defer cleanup() + + // begin the stream + w.Header().Set("Content-Type", s.MediaType) + w.Header().Set("Transfer-Encoding", "chunked") + w.WriteHeader(http.StatusOK) + flusher.Flush() + + var unknown runtime.Unknown + internalEvent := &metav1.InternalEvent{} + outEvent := &metav1.WatchEvent{} + buf := &bytes.Buffer{} + ch := s.Watching.ResultChan() + done := req.Context().Done() + + for { + select { + case <-done: + return + case <-timeoutCh: + return + case event, ok := <-ch: + if !ok { + // End of results. + return + } + metrics.WatchEvents.WithLabelValues(kind.Group, kind.Version, kind.Kind).Inc() + + obj := s.Fixup(event.Object) + if err := s.EmbeddedEncoder.Encode(obj, buf); err != nil { + // unexpected error + utilruntime.HandleError(fmt.Errorf("unable to encode watch object %T: %v", obj, err)) + return + } + + // ContentType is not required here because we are defaulting to the serializer + // type + unknown.Raw = buf.Bytes() + event.Object = &unknown + metrics.WatchEventsSizes.WithLabelValues(kind.Group, kind.Version, kind.Kind).Observe(float64(len(unknown.Raw))) + + *outEvent = metav1.WatchEvent{} + + // create the external type directly and encode it. Clients will only recognize the serialization we provide. + // The internal event is being reused, not reallocated so its just a few extra assignments to do it this way + // and we get the benefit of using conversion functions which already have to stay in sync + *internalEvent = metav1.InternalEvent(event) + err := metav1.Convert_v1_InternalEvent_To_v1_WatchEvent(internalEvent, outEvent, nil) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to convert watch object: %v", err)) + // client disconnect. + return + } + if err := e.Encode(outEvent); err != nil { + utilruntime.HandleError(fmt.Errorf("unable to encode watch object %T: %v (%#v)", outEvent, err, e)) + // client disconnect. + return + } + if len(ch) == 0 { + flusher.Flush() + } + + buf.Reset() + } + } +} + +// HandleWS implements a websocket handler. +func (s *WatchServer) HandleWS(ws *websocket.Conn) { + defer ws.Close() + done := make(chan struct{}) + + go func() { + defer utilruntime.HandleCrash() + // This blocks until the connection is closed. + // Client should not send anything. + wsstream.IgnoreReceives(ws, 0) + // Once the client closes, we should also close + close(done) + }() + + var unknown runtime.Unknown + internalEvent := &metav1.InternalEvent{} + buf := &bytes.Buffer{} + streamBuf := &bytes.Buffer{} + ch := s.Watching.ResultChan() + + for { + select { + case <-done: + return + case event, ok := <-ch: + if !ok { + // End of results. + return + } + obj := s.Fixup(event.Object) + if err := s.EmbeddedEncoder.Encode(obj, buf); err != nil { + // unexpected error + utilruntime.HandleError(fmt.Errorf("unable to encode watch object %T: %v", obj, err)) + return + } + + // ContentType is not required here because we are defaulting to the serializer + // type + unknown.Raw = buf.Bytes() + event.Object = &unknown + + // the internal event will be versioned by the encoder + // create the external type directly and encode it. Clients will only recognize the serialization we provide. + // The internal event is being reused, not reallocated so its just a few extra assignments to do it this way + // and we get the benefit of using conversion functions which already have to stay in sync + outEvent := &metav1.WatchEvent{} + *internalEvent = metav1.InternalEvent(event) + err := metav1.Convert_v1_InternalEvent_To_v1_WatchEvent(internalEvent, outEvent, nil) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to convert watch object: %v", err)) + // client disconnect. + return + } + if err := s.Encoder.Encode(outEvent, streamBuf); err != nil { + // encoding error + utilruntime.HandleError(fmt.Errorf("unable to encode event: %v", err)) + return + } + if s.UseTextFraming { + if err := websocket.Message.Send(ws, streamBuf.String()); err != nil { + // Client disconnect. + return + } + } else { + if err := websocket.Message.Send(ws, streamBuf.Bytes()); err != nil { + // Client disconnect. + return + } + } + buf.Reset() + streamBuf.Reset() + } + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/installer.go b/vendor/k8s.io/apiserver/pkg/endpoints/installer.go new file mode 100644 index 000000000..6549771ce --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/installer.go @@ -0,0 +1,1246 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoints + +import ( + "fmt" + "net/http" + gpath "path" + "reflect" + "sort" + "strings" + "time" + "unicode" + + restful "github.com/emicklei/go-restful" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/endpoints/deprecation" + "k8s.io/apiserver/pkg/endpoints/discovery" + "k8s.io/apiserver/pkg/endpoints/handlers" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/metrics" + utilwarning "k8s.io/apiserver/pkg/endpoints/warning" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/apiserver/pkg/storageversion" + utilfeature "k8s.io/apiserver/pkg/util/feature" + versioninfo "k8s.io/component-base/version" +) + +const ( + ROUTE_META_GVK = "x-kubernetes-group-version-kind" + ROUTE_META_ACTION = "x-kubernetes-action" +) + +type APIInstaller struct { + group *APIGroupVersion + prefix string // Path prefix where API resources are to be registered. + minRequestTimeout time.Duration +} + +// Struct capturing information about an action ("GET", "POST", "WATCH", "PROXY", etc). +type action struct { + Verb string // Verb identifying the action ("GET", "POST", "WATCH", "PROXY", etc). + Path string // The path of the action + Params []*restful.Parameter // List of parameters associated with the action. + Namer handlers.ScopeNamer + AllNamespaces bool // true iff the action is namespaced but works on aggregate result for all namespaces +} + +// An interface to see if one storage supports override its default verb for monitoring +type StorageMetricsOverride interface { + // OverrideMetricsVerb gives a storage object an opportunity to override the verb reported to the metrics endpoint + OverrideMetricsVerb(oldVerb string) (newVerb string) +} + +// An interface to see if an object supports swagger documentation as a method +type documentable interface { + SwaggerDoc() map[string]string +} + +// toDiscoveryKubeVerb maps an action.Verb to the logical kube verb, used for discovery +var toDiscoveryKubeVerb = map[string]string{ + "CONNECT": "", // do not list in discovery. + "DELETE": "delete", + "DELETECOLLECTION": "deletecollection", + "GET": "get", + "LIST": "list", + "PATCH": "patch", + "POST": "create", + "PROXY": "proxy", + "PUT": "update", + "WATCH": "watch", + "WATCHLIST": "watch", +} + +// Install handlers for API resources. +func (a *APIInstaller) Install() ([]metav1.APIResource, []*storageversion.ResourceInfo, *restful.WebService, []error) { + var apiResources []metav1.APIResource + var resourceInfos []*storageversion.ResourceInfo + var errors []error + ws := a.newWebService() + + // Register the paths in a deterministic (sorted) order to get a deterministic swagger spec. + paths := make([]string, len(a.group.Storage)) + var i int = 0 + for path := range a.group.Storage { + paths[i] = path + i++ + } + sort.Strings(paths) + for _, path := range paths { + apiResource, resourceInfo, err := a.registerResourceHandlers(path, a.group.Storage[path], ws) + if err != nil { + errors = append(errors, fmt.Errorf("error in registering resource: %s, %v", path, err)) + } + if apiResource != nil { + apiResources = append(apiResources, *apiResource) + } + if resourceInfo != nil { + resourceInfos = append(resourceInfos, resourceInfo) + } + } + return apiResources, resourceInfos, ws, errors +} + +// newWebService creates a new restful webservice with the api installer's prefix and version. +func (a *APIInstaller) newWebService() *restful.WebService { + ws := new(restful.WebService) + ws.Path(a.prefix) + // a.prefix contains "prefix/group/version" + ws.Doc("API at " + a.prefix) + // Backwards compatibility, we accepted objects with empty content-type at V1. + // If we stop using go-restful, we can default empty content-type to application/json on an + // endpoint by endpoint basis + ws.Consumes("*/*") + mediaTypes, streamMediaTypes := negotiation.MediaTypesForSerializer(a.group.Serializer) + ws.Produces(append(mediaTypes, streamMediaTypes...)...) + ws.ApiVersion(a.group.GroupVersion.String()) + + return ws +} + +// calculate the storage gvk, the gvk objects are converted to before persisted to the etcd. +func getStorageVersionKind(storageVersioner runtime.GroupVersioner, storage rest.Storage, typer runtime.ObjectTyper) (schema.GroupVersionKind, error) { + object := storage.New() + fqKinds, _, err := typer.ObjectKinds(object) + if err != nil { + return schema.GroupVersionKind{}, err + } + gvk, ok := storageVersioner.KindForGroupVersionKinds(fqKinds) + if !ok { + return schema.GroupVersionKind{}, fmt.Errorf("cannot find the storage version kind for %v", reflect.TypeOf(object)) + } + return gvk, nil +} + +// GetResourceKind returns the external group version kind registered for the given storage +// object. If the storage object is a subresource and has an override supplied for it, it returns +// the group version kind supplied in the override. +func GetResourceKind(groupVersion schema.GroupVersion, storage rest.Storage, typer runtime.ObjectTyper) (schema.GroupVersionKind, error) { + // Let the storage tell us exactly what GVK it has + if gvkProvider, ok := storage.(rest.GroupVersionKindProvider); ok { + return gvkProvider.GroupVersionKind(groupVersion), nil + } + + object := storage.New() + fqKinds, _, err := typer.ObjectKinds(object) + if err != nil { + return schema.GroupVersionKind{}, err + } + + // a given go type can have multiple potential fully qualified kinds. Find the one that corresponds with the group + // we're trying to register here + fqKindToRegister := schema.GroupVersionKind{} + for _, fqKind := range fqKinds { + if fqKind.Group == groupVersion.Group { + fqKindToRegister = groupVersion.WithKind(fqKind.Kind) + break + } + } + if fqKindToRegister.Empty() { + return schema.GroupVersionKind{}, fmt.Errorf("unable to locate fully qualified kind for %v: found %v when registering for %v", reflect.TypeOf(object), fqKinds, groupVersion) + } + + // group is guaranteed to match based on the check above + return fqKindToRegister, nil +} + +func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storage, ws *restful.WebService) (*metav1.APIResource, *storageversion.ResourceInfo, error) { + admit := a.group.Admit + + optionsExternalVersion := a.group.GroupVersion + if a.group.OptionsExternalVersion != nil { + optionsExternalVersion = *a.group.OptionsExternalVersion + } + + resource, subresource, err := splitSubresource(path) + if err != nil { + return nil, nil, err + } + + group, version := a.group.GroupVersion.Group, a.group.GroupVersion.Version + + fqKindToRegister, err := GetResourceKind(a.group.GroupVersion, storage, a.group.Typer) + if err != nil { + return nil, nil, err + } + + versionedPtr, err := a.group.Creater.New(fqKindToRegister) + if err != nil { + return nil, nil, err + } + defaultVersionedObject := indirectArbitraryPointer(versionedPtr) + kind := fqKindToRegister.Kind + isSubresource := len(subresource) > 0 + + // If there is a subresource, namespace scoping is defined by the parent resource + namespaceScoped := true + if isSubresource { + parentStorage, ok := a.group.Storage[resource] + if !ok { + return nil, nil, fmt.Errorf("missing parent storage: %q", resource) + } + scoper, ok := parentStorage.(rest.Scoper) + if !ok { + return nil, nil, fmt.Errorf("%q must implement scoper", resource) + } + namespaceScoped = scoper.NamespaceScoped() + + } else { + scoper, ok := storage.(rest.Scoper) + if !ok { + return nil, nil, fmt.Errorf("%q must implement scoper", resource) + } + namespaceScoped = scoper.NamespaceScoped() + } + + // what verbs are supported by the storage, used to know what verbs we support per path + creater, isCreater := storage.(rest.Creater) + namedCreater, isNamedCreater := storage.(rest.NamedCreater) + lister, isLister := storage.(rest.Lister) + getter, isGetter := storage.(rest.Getter) + getterWithOptions, isGetterWithOptions := storage.(rest.GetterWithOptions) + gracefulDeleter, isGracefulDeleter := storage.(rest.GracefulDeleter) + collectionDeleter, isCollectionDeleter := storage.(rest.CollectionDeleter) + updater, isUpdater := storage.(rest.Updater) + patcher, isPatcher := storage.(rest.Patcher) + watcher, isWatcher := storage.(rest.Watcher) + connecter, isConnecter := storage.(rest.Connecter) + storageMeta, isMetadata := storage.(rest.StorageMetadata) + storageVersionProvider, isStorageVersionProvider := storage.(rest.StorageVersionProvider) + if !isMetadata { + storageMeta = defaultStorageMetadata{} + } + exporter, isExporter := storage.(rest.Exporter) + if !isExporter { + exporter = nil + } + + versionedExportOptions, err := a.group.Creater.New(optionsExternalVersion.WithKind("ExportOptions")) + if err != nil { + return nil, nil, err + } + + if isNamedCreater { + isCreater = true + } + + var versionedList interface{} + if isLister { + list := lister.NewList() + listGVKs, _, err := a.group.Typer.ObjectKinds(list) + if err != nil { + return nil, nil, err + } + versionedListPtr, err := a.group.Creater.New(a.group.GroupVersion.WithKind(listGVKs[0].Kind)) + if err != nil { + return nil, nil, err + } + versionedList = indirectArbitraryPointer(versionedListPtr) + } + + versionedListOptions, err := a.group.Creater.New(optionsExternalVersion.WithKind("ListOptions")) + if err != nil { + return nil, nil, err + } + versionedCreateOptions, err := a.group.Creater.New(optionsExternalVersion.WithKind("CreateOptions")) + if err != nil { + return nil, nil, err + } + versionedPatchOptions, err := a.group.Creater.New(optionsExternalVersion.WithKind("PatchOptions")) + if err != nil { + return nil, nil, err + } + versionedUpdateOptions, err := a.group.Creater.New(optionsExternalVersion.WithKind("UpdateOptions")) + if err != nil { + return nil, nil, err + } + + var versionedDeleteOptions runtime.Object + var versionedDeleterObject interface{} + deleteReturnsDeletedObject := false + if isGracefulDeleter { + versionedDeleteOptions, err = a.group.Creater.New(optionsExternalVersion.WithKind("DeleteOptions")) + if err != nil { + return nil, nil, err + } + versionedDeleterObject = indirectArbitraryPointer(versionedDeleteOptions) + + if mayReturnFullObjectDeleter, ok := storage.(rest.MayReturnFullObjectDeleter); ok { + deleteReturnsDeletedObject = mayReturnFullObjectDeleter.DeleteReturnsDeletedObject() + } + } + + versionedStatusPtr, err := a.group.Creater.New(optionsExternalVersion.WithKind("Status")) + if err != nil { + return nil, nil, err + } + versionedStatus := indirectArbitraryPointer(versionedStatusPtr) + var ( + getOptions runtime.Object + versionedGetOptions runtime.Object + getOptionsInternalKind schema.GroupVersionKind + getSubpath bool + ) + if isGetterWithOptions { + getOptions, getSubpath, _ = getterWithOptions.NewGetOptions() + getOptionsInternalKinds, _, err := a.group.Typer.ObjectKinds(getOptions) + if err != nil { + return nil, nil, err + } + getOptionsInternalKind = getOptionsInternalKinds[0] + versionedGetOptions, err = a.group.Creater.New(a.group.GroupVersion.WithKind(getOptionsInternalKind.Kind)) + if err != nil { + versionedGetOptions, err = a.group.Creater.New(optionsExternalVersion.WithKind(getOptionsInternalKind.Kind)) + if err != nil { + return nil, nil, err + } + } + isGetter = true + } + + var versionedWatchEvent interface{} + if isWatcher { + versionedWatchEventPtr, err := a.group.Creater.New(a.group.GroupVersion.WithKind("WatchEvent")) + if err != nil { + return nil, nil, err + } + versionedWatchEvent = indirectArbitraryPointer(versionedWatchEventPtr) + } + + var ( + connectOptions runtime.Object + versionedConnectOptions runtime.Object + connectOptionsInternalKind schema.GroupVersionKind + connectSubpath bool + ) + if isConnecter { + connectOptions, connectSubpath, _ = connecter.NewConnectOptions() + if connectOptions != nil { + connectOptionsInternalKinds, _, err := a.group.Typer.ObjectKinds(connectOptions) + if err != nil { + return nil, nil, err + } + + connectOptionsInternalKind = connectOptionsInternalKinds[0] + versionedConnectOptions, err = a.group.Creater.New(a.group.GroupVersion.WithKind(connectOptionsInternalKind.Kind)) + if err != nil { + versionedConnectOptions, err = a.group.Creater.New(optionsExternalVersion.WithKind(connectOptionsInternalKind.Kind)) + if err != nil { + return nil, nil, err + } + } + } + } + + allowWatchList := isWatcher && isLister // watching on lists is allowed only for kinds that support both watch and list. + nameParam := ws.PathParameter("name", "name of the "+kind).DataType("string") + pathParam := ws.PathParameter("path", "path to the resource").DataType("string") + + params := []*restful.Parameter{} + actions := []action{} + + var resourceKind string + kindProvider, ok := storage.(rest.KindProvider) + if ok { + resourceKind = kindProvider.Kind() + } else { + resourceKind = kind + } + + tableProvider, isTableProvider := storage.(rest.TableConvertor) + if isLister && !isTableProvider { + // All listers must implement TableProvider + return nil, nil, fmt.Errorf("%q must implement TableConvertor", resource) + } + + var apiResource metav1.APIResource + if utilfeature.DefaultFeatureGate.Enabled(features.StorageVersionHash) && + isStorageVersionProvider && + storageVersionProvider.StorageVersion() != nil { + versioner := storageVersionProvider.StorageVersion() + gvk, err := getStorageVersionKind(versioner, storage, a.group.Typer) + if err != nil { + return nil, nil, err + } + apiResource.StorageVersionHash = discovery.StorageVersionHash(gvk.Group, gvk.Version, gvk.Kind) + } + + // Get the list of actions for the given scope. + switch { + case !namespaceScoped: + // Handle non-namespace scoped resources like nodes. + resourcePath := resource + resourceParams := params + itemPath := resourcePath + "/{name}" + nameParams := append(params, nameParam) + proxyParams := append(nameParams, pathParam) + suffix := "" + if isSubresource { + suffix = "/" + subresource + itemPath = itemPath + suffix + resourcePath = itemPath + resourceParams = nameParams + } + apiResource.Name = path + apiResource.Namespaced = false + apiResource.Kind = resourceKind + namer := handlers.ContextBasedNaming{ + SelfLinker: a.group.Linker, + ClusterScoped: true, + SelfLinkPathPrefix: gpath.Join(a.prefix, resource) + "/", + SelfLinkPathSuffix: suffix, + } + + // Handler for standard REST verbs (GET, PUT, POST and DELETE). + // Add actions at the resource path: /api/apiVersion/resource + actions = appendIf(actions, action{"LIST", resourcePath, resourceParams, namer, false}, isLister) + actions = appendIf(actions, action{"POST", resourcePath, resourceParams, namer, false}, isCreater) + actions = appendIf(actions, action{"DELETECOLLECTION", resourcePath, resourceParams, namer, false}, isCollectionDeleter) + // DEPRECATED in 1.11 + actions = appendIf(actions, action{"WATCHLIST", "watch/" + resourcePath, resourceParams, namer, false}, allowWatchList) + + // Add actions at the item path: /api/apiVersion/resource/{name} + actions = appendIf(actions, action{"GET", itemPath, nameParams, namer, false}, isGetter) + if getSubpath { + actions = appendIf(actions, action{"GET", itemPath + "/{path:*}", proxyParams, namer, false}, isGetter) + } + actions = appendIf(actions, action{"PUT", itemPath, nameParams, namer, false}, isUpdater) + actions = appendIf(actions, action{"PATCH", itemPath, nameParams, namer, false}, isPatcher) + actions = appendIf(actions, action{"DELETE", itemPath, nameParams, namer, false}, isGracefulDeleter) + // DEPRECATED in 1.11 + actions = appendIf(actions, action{"WATCH", "watch/" + itemPath, nameParams, namer, false}, isWatcher) + actions = appendIf(actions, action{"CONNECT", itemPath, nameParams, namer, false}, isConnecter) + actions = appendIf(actions, action{"CONNECT", itemPath + "/{path:*}", proxyParams, namer, false}, isConnecter && connectSubpath) + default: + namespaceParamName := "namespaces" + // Handler for standard REST verbs (GET, PUT, POST and DELETE). + namespaceParam := ws.PathParameter("namespace", "object name and auth scope, such as for teams and projects").DataType("string") + namespacedPath := namespaceParamName + "/{namespace}/" + resource + namespaceParams := []*restful.Parameter{namespaceParam} + + resourcePath := namespacedPath + resourceParams := namespaceParams + itemPath := namespacedPath + "/{name}" + nameParams := append(namespaceParams, nameParam) + proxyParams := append(nameParams, pathParam) + itemPathSuffix := "" + if isSubresource { + itemPathSuffix = "/" + subresource + itemPath = itemPath + itemPathSuffix + resourcePath = itemPath + resourceParams = nameParams + } + apiResource.Name = path + apiResource.Namespaced = true + apiResource.Kind = resourceKind + namer := handlers.ContextBasedNaming{ + SelfLinker: a.group.Linker, + ClusterScoped: false, + SelfLinkPathPrefix: gpath.Join(a.prefix, namespaceParamName) + "/", + SelfLinkPathSuffix: itemPathSuffix, + } + + actions = appendIf(actions, action{"LIST", resourcePath, resourceParams, namer, false}, isLister) + actions = appendIf(actions, action{"POST", resourcePath, resourceParams, namer, false}, isCreater) + actions = appendIf(actions, action{"DELETECOLLECTION", resourcePath, resourceParams, namer, false}, isCollectionDeleter) + // DEPRECATED in 1.11 + actions = appendIf(actions, action{"WATCHLIST", "watch/" + resourcePath, resourceParams, namer, false}, allowWatchList) + + actions = appendIf(actions, action{"GET", itemPath, nameParams, namer, false}, isGetter) + if getSubpath { + actions = appendIf(actions, action{"GET", itemPath + "/{path:*}", proxyParams, namer, false}, isGetter) + } + actions = appendIf(actions, action{"PUT", itemPath, nameParams, namer, false}, isUpdater) + actions = appendIf(actions, action{"PATCH", itemPath, nameParams, namer, false}, isPatcher) + actions = appendIf(actions, action{"DELETE", itemPath, nameParams, namer, false}, isGracefulDeleter) + // DEPRECATED in 1.11 + actions = appendIf(actions, action{"WATCH", "watch/" + itemPath, nameParams, namer, false}, isWatcher) + actions = appendIf(actions, action{"CONNECT", itemPath, nameParams, namer, false}, isConnecter) + actions = appendIf(actions, action{"CONNECT", itemPath + "/{path:*}", proxyParams, namer, false}, isConnecter && connectSubpath) + + // list or post across namespace. + // For ex: LIST all pods in all namespaces by sending a LIST request at /api/apiVersion/pods. + // TODO: more strongly type whether a resource allows these actions on "all namespaces" (bulk delete) + if !isSubresource { + actions = appendIf(actions, action{"LIST", resource, params, namer, true}, isLister) + // DEPRECATED in 1.11 + actions = appendIf(actions, action{"WATCHLIST", "watch/" + resource, params, namer, true}, allowWatchList) + } + } + + var resourceInfo *storageversion.ResourceInfo + if utilfeature.DefaultFeatureGate.Enabled(features.StorageVersionAPI) && + utilfeature.DefaultFeatureGate.Enabled(features.APIServerIdentity) && + isStorageVersionProvider && + storageVersionProvider.StorageVersion() != nil { + + versioner := storageVersionProvider.StorageVersion() + encodingGVK, err := getStorageVersionKind(versioner, storage, a.group.Typer) + if err != nil { + return nil, nil, err + } + resourceInfo = &storageversion.ResourceInfo{ + GroupResource: schema.GroupResource{ + Group: a.group.GroupVersion.Group, + Resource: apiResource.Name, + }, + EncodingVersion: encodingGVK.GroupVersion().String(), + // We record EquivalentResourceMapper first instead of calculate + // DecodableVersions immediately because API installation must + // be completed first for us to know equivalent APIs + EquivalentResourceMapper: a.group.EquivalentResourceRegistry, + } + } + + // Create Routes for the actions. + // TODO: Add status documentation using Returns() + // Errors (see api/errors/errors.go as well as go-restful router): + // http.StatusNotFound, http.StatusMethodNotAllowed, + // http.StatusUnsupportedMediaType, http.StatusNotAcceptable, + // http.StatusBadRequest, http.StatusUnauthorized, http.StatusForbidden, + // http.StatusRequestTimeout, http.StatusConflict, http.StatusPreconditionFailed, + // http.StatusUnprocessableEntity, http.StatusInternalServerError, + // http.StatusServiceUnavailable + // and api error codes + // Note that if we specify a versioned Status object here, we may need to + // create one for the tests, also + // Success: + // http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent + // + // test/integration/auth_test.go is currently the most comprehensive status code test + + for _, s := range a.group.Serializer.SupportedMediaTypes() { + if len(s.MediaTypeSubType) == 0 || len(s.MediaTypeType) == 0 { + return nil, nil, fmt.Errorf("all serializers in the group Serializer must have MediaTypeType and MediaTypeSubType set: %s", s.MediaType) + } + } + mediaTypes, streamMediaTypes := negotiation.MediaTypesForSerializer(a.group.Serializer) + allMediaTypes := append(mediaTypes, streamMediaTypes...) + ws.Produces(allMediaTypes...) + + kubeVerbs := map[string]struct{}{} + reqScope := handlers.RequestScope{ + Serializer: a.group.Serializer, + ParameterCodec: a.group.ParameterCodec, + Creater: a.group.Creater, + Convertor: a.group.Convertor, + Defaulter: a.group.Defaulter, + Typer: a.group.Typer, + UnsafeConvertor: a.group.UnsafeConvertor, + Authorizer: a.group.Authorizer, + + EquivalentResourceMapper: a.group.EquivalentResourceRegistry, + + // TODO: Check for the interface on storage + TableConvertor: tableProvider, + + // TODO: This seems wrong for cross-group subresources. It makes an assumption that a subresource and its parent are in the same group version. Revisit this. + Resource: a.group.GroupVersion.WithResource(resource), + Subresource: subresource, + Kind: fqKindToRegister, + + HubGroupVersion: schema.GroupVersion{Group: fqKindToRegister.Group, Version: runtime.APIVersionInternal}, + + MetaGroupVersion: metav1.SchemeGroupVersion, + + MaxRequestBodyBytes: a.group.MaxRequestBodyBytes, + } + if a.group.MetaGroupVersion != nil { + reqScope.MetaGroupVersion = *a.group.MetaGroupVersion + } + if a.group.OpenAPIModels != nil && utilfeature.DefaultFeatureGate.Enabled(features.ServerSideApply) { + reqScope.FieldManager, err = fieldmanager.NewDefaultFieldManager( + a.group.TypeConverter, + a.group.UnsafeConvertor, + a.group.Defaulter, + a.group.Creater, + fqKindToRegister, + reqScope.HubGroupVersion, + isSubresource, + ) + if err != nil { + return nil, nil, fmt.Errorf("failed to create field manager: %v", err) + } + } + for _, action := range actions { + producedObject := storageMeta.ProducesObject(action.Verb) + if producedObject == nil { + producedObject = defaultVersionedObject + } + reqScope.Namer = action.Namer + + requestScope := "cluster" + var namespaced string + var operationSuffix string + if apiResource.Namespaced { + requestScope = "namespace" + namespaced = "Namespaced" + } + if strings.HasSuffix(action.Path, "/{path:*}") { + requestScope = "resource" + operationSuffix = operationSuffix + "WithPath" + } + if strings.Index(action.Path, "/{name}") != -1 || action.Verb == "POST" { + requestScope = "resource" + } + if action.AllNamespaces { + requestScope = "cluster" + operationSuffix = operationSuffix + "ForAllNamespaces" + namespaced = "" + } + + if kubeVerb, found := toDiscoveryKubeVerb[action.Verb]; found { + if len(kubeVerb) != 0 { + kubeVerbs[kubeVerb] = struct{}{} + } + } else { + return nil, nil, fmt.Errorf("unknown action verb for discovery: %s", action.Verb) + } + + routes := []*restful.RouteBuilder{} + + // If there is a subresource, kind should be the parent's kind. + if isSubresource { + parentStorage, ok := a.group.Storage[resource] + if !ok { + return nil, nil, fmt.Errorf("missing parent storage: %q", resource) + } + + fqParentKind, err := GetResourceKind(a.group.GroupVersion, parentStorage, a.group.Typer) + if err != nil { + return nil, nil, err + } + kind = fqParentKind.Kind + } + + verbOverrider, needOverride := storage.(StorageMetricsOverride) + + // accumulate endpoint-level warnings + var ( + enableWarningHeaders = utilfeature.DefaultFeatureGate.Enabled(features.WarningHeaders) + + warnings []string + deprecated bool + removedRelease string + ) + + { + versionedPtrWithGVK := versionedPtr.DeepCopyObject() + versionedPtrWithGVK.GetObjectKind().SetGroupVersionKind(fqKindToRegister) + currentMajor, currentMinor, _ := deprecation.MajorMinor(versioninfo.Get()) + deprecated = deprecation.IsDeprecated(versionedPtrWithGVK, currentMajor, currentMinor) + if deprecated { + removedRelease = deprecation.RemovedRelease(versionedPtrWithGVK) + warnings = append(warnings, deprecation.WarningMessage(versionedPtrWithGVK)) + } + } + + switch action.Verb { + case "GET": // Get a resource. + var handler restful.RouteFunction + if isGetterWithOptions { + handler = restfulGetResourceWithOptions(getterWithOptions, reqScope, isSubresource) + } else { + handler = restfulGetResource(getter, exporter, reqScope) + } + + if needOverride { + // need change the reported verb + handler = metrics.InstrumentRouteFunc(verbOverrider.OverrideMetricsVerb(action.Verb), group, version, resource, subresource, requestScope, metrics.APIServerComponent, deprecated, removedRelease, handler) + } else { + handler = metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, deprecated, removedRelease, handler) + } + if enableWarningHeaders { + handler = utilwarning.AddWarningsHandler(handler, warnings) + } + + doc := "read the specified " + kind + if isSubresource { + doc = "read " + subresource + " of the specified " + kind + } + route := ws.GET(action.Path).To(handler). + Doc(doc). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Operation("read"+namespaced+kind+strings.Title(subresource)+operationSuffix). + Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). + Returns(http.StatusOK, "OK", producedObject). + Writes(producedObject) + if isGetterWithOptions { + if err := AddObjectParams(ws, route, versionedGetOptions); err != nil { + return nil, nil, err + } + } + if isExporter { + if err := AddObjectParams(ws, route, versionedExportOptions); err != nil { + return nil, nil, err + } + } + addParams(route, action.Params) + routes = append(routes, route) + case "LIST": // List all resources of a kind. + doc := "list objects of kind " + kind + if isSubresource { + doc = "list " + subresource + " of objects of kind " + kind + } + handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, deprecated, removedRelease, restfulListResource(lister, watcher, reqScope, false, a.minRequestTimeout)) + if enableWarningHeaders { + handler = utilwarning.AddWarningsHandler(handler, warnings) + } + route := ws.GET(action.Path).To(handler). + Doc(doc). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Operation("list"+namespaced+kind+strings.Title(subresource)+operationSuffix). + Produces(append(storageMeta.ProducesMIMETypes(action.Verb), allMediaTypes...)...). + Returns(http.StatusOK, "OK", versionedList). + Writes(versionedList) + if err := AddObjectParams(ws, route, versionedListOptions); err != nil { + return nil, nil, err + } + switch { + case isLister && isWatcher: + doc := "list or watch objects of kind " + kind + if isSubresource { + doc = "list or watch " + subresource + " of objects of kind " + kind + } + route.Doc(doc) + case isWatcher: + doc := "watch objects of kind " + kind + if isSubresource { + doc = "watch " + subresource + "of objects of kind " + kind + } + route.Doc(doc) + } + addParams(route, action.Params) + routes = append(routes, route) + case "PUT": // Update a resource. + doc := "replace the specified " + kind + if isSubresource { + doc = "replace " + subresource + " of the specified " + kind + } + handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, deprecated, removedRelease, restfulUpdateResource(updater, reqScope, admit)) + if enableWarningHeaders { + handler = utilwarning.AddWarningsHandler(handler, warnings) + } + route := ws.PUT(action.Path).To(handler). + Doc(doc). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Operation("replace"+namespaced+kind+strings.Title(subresource)+operationSuffix). + Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). + Returns(http.StatusOK, "OK", producedObject). + // TODO: in some cases, the API may return a v1.Status instead of the versioned object + // but currently go-restful can't handle multiple different objects being returned. + Returns(http.StatusCreated, "Created", producedObject). + Reads(defaultVersionedObject). + Writes(producedObject) + if err := AddObjectParams(ws, route, versionedUpdateOptions); err != nil { + return nil, nil, err + } + addParams(route, action.Params) + routes = append(routes, route) + case "PATCH": // Partially update a resource + doc := "partially update the specified " + kind + if isSubresource { + doc = "partially update " + subresource + " of the specified " + kind + } + supportedTypes := []string{ + string(types.JSONPatchType), + string(types.MergePatchType), + string(types.StrategicMergePatchType), + } + if utilfeature.DefaultFeatureGate.Enabled(features.ServerSideApply) { + supportedTypes = append(supportedTypes, string(types.ApplyPatchType)) + } + handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, deprecated, removedRelease, restfulPatchResource(patcher, reqScope, admit, supportedTypes)) + if enableWarningHeaders { + handler = utilwarning.AddWarningsHandler(handler, warnings) + } + route := ws.PATCH(action.Path).To(handler). + Doc(doc). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Consumes(supportedTypes...). + Operation("patch"+namespaced+kind+strings.Title(subresource)+operationSuffix). + Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). + Returns(http.StatusOK, "OK", producedObject). + Reads(metav1.Patch{}). + Writes(producedObject) + if err := AddObjectParams(ws, route, versionedPatchOptions); err != nil { + return nil, nil, err + } + addParams(route, action.Params) + routes = append(routes, route) + case "POST": // Create a resource. + var handler restful.RouteFunction + if isNamedCreater { + handler = restfulCreateNamedResource(namedCreater, reqScope, admit) + } else { + handler = restfulCreateResource(creater, reqScope, admit) + } + handler = metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, deprecated, removedRelease, handler) + if enableWarningHeaders { + handler = utilwarning.AddWarningsHandler(handler, warnings) + } + article := GetArticleForNoun(kind, " ") + doc := "create" + article + kind + if isSubresource { + doc = "create " + subresource + " of" + article + kind + } + route := ws.POST(action.Path).To(handler). + Doc(doc). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Operation("create"+namespaced+kind+strings.Title(subresource)+operationSuffix). + Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). + Returns(http.StatusOK, "OK", producedObject). + // TODO: in some cases, the API may return a v1.Status instead of the versioned object + // but currently go-restful can't handle multiple different objects being returned. + Returns(http.StatusCreated, "Created", producedObject). + Returns(http.StatusAccepted, "Accepted", producedObject). + Reads(defaultVersionedObject). + Writes(producedObject) + if err := AddObjectParams(ws, route, versionedCreateOptions); err != nil { + return nil, nil, err + } + addParams(route, action.Params) + routes = append(routes, route) + case "DELETE": // Delete a resource. + article := GetArticleForNoun(kind, " ") + doc := "delete" + article + kind + if isSubresource { + doc = "delete " + subresource + " of" + article + kind + } + deleteReturnType := versionedStatus + if deleteReturnsDeletedObject { + deleteReturnType = producedObject + } + handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, deprecated, removedRelease, restfulDeleteResource(gracefulDeleter, isGracefulDeleter, reqScope, admit)) + if enableWarningHeaders { + handler = utilwarning.AddWarningsHandler(handler, warnings) + } + route := ws.DELETE(action.Path).To(handler). + Doc(doc). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Operation("delete"+namespaced+kind+strings.Title(subresource)+operationSuffix). + Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). + Writes(deleteReturnType). + Returns(http.StatusOK, "OK", deleteReturnType). + Returns(http.StatusAccepted, "Accepted", deleteReturnType) + if isGracefulDeleter { + route.Reads(versionedDeleterObject) + route.ParameterNamed("body").Required(false) + if err := AddObjectParams(ws, route, versionedDeleteOptions); err != nil { + return nil, nil, err + } + } + addParams(route, action.Params) + routes = append(routes, route) + case "DELETECOLLECTION": + doc := "delete collection of " + kind + if isSubresource { + doc = "delete collection of " + subresource + " of a " + kind + } + handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, deprecated, removedRelease, restfulDeleteCollection(collectionDeleter, isCollectionDeleter, reqScope, admit)) + if enableWarningHeaders { + handler = utilwarning.AddWarningsHandler(handler, warnings) + } + route := ws.DELETE(action.Path).To(handler). + Doc(doc). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Operation("deletecollection"+namespaced+kind+strings.Title(subresource)+operationSuffix). + Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). + Writes(versionedStatus). + Returns(http.StatusOK, "OK", versionedStatus) + if isCollectionDeleter { + route.Reads(versionedDeleterObject) + route.ParameterNamed("body").Required(false) + if err := AddObjectParams(ws, route, versionedDeleteOptions); err != nil { + return nil, nil, err + } + } + if err := AddObjectParams(ws, route, versionedListOptions, "watch", "allowWatchBookmarks"); err != nil { + return nil, nil, err + } + addParams(route, action.Params) + routes = append(routes, route) + // deprecated in 1.11 + case "WATCH": // Watch a resource. + doc := "watch changes to an object of kind " + kind + if isSubresource { + doc = "watch changes to " + subresource + " of an object of kind " + kind + } + doc += ". deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter." + handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, deprecated, removedRelease, restfulListResource(lister, watcher, reqScope, true, a.minRequestTimeout)) + if enableWarningHeaders { + handler = utilwarning.AddWarningsHandler(handler, warnings) + } + route := ws.GET(action.Path).To(handler). + Doc(doc). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Operation("watch"+namespaced+kind+strings.Title(subresource)+operationSuffix). + Produces(allMediaTypes...). + Returns(http.StatusOK, "OK", versionedWatchEvent). + Writes(versionedWatchEvent) + if err := AddObjectParams(ws, route, versionedListOptions); err != nil { + return nil, nil, err + } + addParams(route, action.Params) + routes = append(routes, route) + // deprecated in 1.11 + case "WATCHLIST": // Watch all resources of a kind. + doc := "watch individual changes to a list of " + kind + if isSubresource { + doc = "watch individual changes to a list of " + subresource + " of " + kind + } + doc += ". deprecated: use the 'watch' parameter with a list operation instead." + handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, deprecated, removedRelease, restfulListResource(lister, watcher, reqScope, true, a.minRequestTimeout)) + if enableWarningHeaders { + handler = utilwarning.AddWarningsHandler(handler, warnings) + } + route := ws.GET(action.Path).To(handler). + Doc(doc). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Operation("watch"+namespaced+kind+strings.Title(subresource)+"List"+operationSuffix). + Produces(allMediaTypes...). + Returns(http.StatusOK, "OK", versionedWatchEvent). + Writes(versionedWatchEvent) + if err := AddObjectParams(ws, route, versionedListOptions); err != nil { + return nil, nil, err + } + addParams(route, action.Params) + routes = append(routes, route) + case "CONNECT": + for _, method := range connecter.ConnectMethods() { + connectProducedObject := storageMeta.ProducesObject(method) + if connectProducedObject == nil { + connectProducedObject = "string" + } + doc := "connect " + method + " requests to " + kind + if isSubresource { + doc = "connect " + method + " requests to " + subresource + " of " + kind + } + handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, deprecated, removedRelease, restfulConnectResource(connecter, reqScope, admit, path, isSubresource)) + if enableWarningHeaders { + handler = utilwarning.AddWarningsHandler(handler, warnings) + } + route := ws.Method(method).Path(action.Path). + To(handler). + Doc(doc). + Operation("connect" + strings.Title(strings.ToLower(method)) + namespaced + kind + strings.Title(subresource) + operationSuffix). + Produces("*/*"). + Consumes("*/*"). + Writes(connectProducedObject) + if versionedConnectOptions != nil { + if err := AddObjectParams(ws, route, versionedConnectOptions); err != nil { + return nil, nil, err + } + } + addParams(route, action.Params) + routes = append(routes, route) + + // transform ConnectMethods to kube verbs + if kubeVerb, found := toDiscoveryKubeVerb[method]; found { + if len(kubeVerb) != 0 { + kubeVerbs[kubeVerb] = struct{}{} + } + } + } + default: + return nil, nil, fmt.Errorf("unrecognized action verb: %s", action.Verb) + } + for _, route := range routes { + route.Metadata(ROUTE_META_GVK, metav1.GroupVersionKind{ + Group: reqScope.Kind.Group, + Version: reqScope.Kind.Version, + Kind: reqScope.Kind.Kind, + }) + route.Metadata(ROUTE_META_ACTION, strings.ToLower(action.Verb)) + ws.Route(route) + } + // Note: update GetAuthorizerAttributes() when adding a custom handler. + } + + apiResource.Verbs = make([]string, 0, len(kubeVerbs)) + for kubeVerb := range kubeVerbs { + apiResource.Verbs = append(apiResource.Verbs, kubeVerb) + } + sort.Strings(apiResource.Verbs) + + if shortNamesProvider, ok := storage.(rest.ShortNamesProvider); ok { + apiResource.ShortNames = shortNamesProvider.ShortNames() + } + if categoriesProvider, ok := storage.(rest.CategoriesProvider); ok { + apiResource.Categories = categoriesProvider.Categories() + } + if gvkProvider, ok := storage.(rest.GroupVersionKindProvider); ok { + gvk := gvkProvider.GroupVersionKind(a.group.GroupVersion) + apiResource.Group = gvk.Group + apiResource.Version = gvk.Version + apiResource.Kind = gvk.Kind + } + + // Record the existence of the GVR and the corresponding GVK + a.group.EquivalentResourceRegistry.RegisterKindFor(reqScope.Resource, reqScope.Subresource, fqKindToRegister) + + return &apiResource, resourceInfo, nil +} + +// indirectArbitraryPointer returns *ptrToObject for an arbitrary pointer +func indirectArbitraryPointer(ptrToObject interface{}) interface{} { + return reflect.Indirect(reflect.ValueOf(ptrToObject)).Interface() +} + +func appendIf(actions []action, a action, shouldAppend bool) []action { + if shouldAppend { + actions = append(actions, a) + } + return actions +} + +func addParams(route *restful.RouteBuilder, params []*restful.Parameter) { + for _, param := range params { + route.Param(param) + } +} + +// AddObjectParams converts a runtime.Object into a set of go-restful Param() definitions on the route. +// The object must be a pointer to a struct; only fields at the top level of the struct that are not +// themselves interfaces or structs are used; only fields with a json tag that is non empty (the standard +// Go JSON behavior for omitting a field) become query parameters. The name of the query parameter is +// the JSON field name. If a description struct tag is set on the field, that description is used on the +// query parameter. In essence, it converts a standard JSON top level object into a query param schema. +func AddObjectParams(ws *restful.WebService, route *restful.RouteBuilder, obj interface{}, excludedNames ...string) error { + sv, err := conversion.EnforcePtr(obj) + if err != nil { + return err + } + st := sv.Type() + excludedNameSet := sets.NewString(excludedNames...) + switch st.Kind() { + case reflect.Struct: + for i := 0; i < st.NumField(); i++ { + name := st.Field(i).Name + sf, ok := st.FieldByName(name) + if !ok { + continue + } + switch sf.Type.Kind() { + case reflect.Interface, reflect.Struct: + case reflect.Ptr: + // TODO: This is a hack to let metav1.Time through. This needs to be fixed in a more generic way eventually. bug #36191 + if (sf.Type.Elem().Kind() == reflect.Interface || sf.Type.Elem().Kind() == reflect.Struct) && strings.TrimPrefix(sf.Type.String(), "*") != "metav1.Time" { + continue + } + fallthrough + default: + jsonTag := sf.Tag.Get("json") + if len(jsonTag) == 0 { + continue + } + jsonName := strings.SplitN(jsonTag, ",", 2)[0] + if len(jsonName) == 0 { + continue + } + if excludedNameSet.Has(jsonName) { + continue + } + var desc string + if docable, ok := obj.(documentable); ok { + desc = docable.SwaggerDoc()[jsonName] + } + route.Param(ws.QueryParameter(jsonName, desc).DataType(typeToJSON(sf.Type.String()))) + } + } + } + return nil +} + +// TODO: this is incomplete, expand as needed. +// Convert the name of a golang type to the name of a JSON type +func typeToJSON(typeName string) string { + switch typeName { + case "bool", "*bool": + return "boolean" + case "uint8", "*uint8", "int", "*int", "int32", "*int32", "int64", "*int64", "uint32", "*uint32", "uint64", "*uint64": + return "integer" + case "float64", "*float64", "float32", "*float32": + return "number" + case "metav1.Time", "*metav1.Time": + return "string" + case "byte", "*byte": + return "string" + case "v1.DeletionPropagation", "*v1.DeletionPropagation": + return "string" + case "v1.ResourceVersionMatch", "*v1.ResourceVersionMatch": + return "string" + case "v1.IncludeObjectPolicy", "*v1.IncludeObjectPolicy": + return "string" + + // TODO: Fix these when go-restful supports a way to specify an array query param: + // https://github.com/emicklei/go-restful/issues/225 + case "[]string", "[]*string": + return "string" + case "[]int32", "[]*int32": + return "integer" + + default: + return typeName + } +} + +// defaultStorageMetadata provides default answers to rest.StorageMetadata. +type defaultStorageMetadata struct{} + +// defaultStorageMetadata implements rest.StorageMetadata +var _ rest.StorageMetadata = defaultStorageMetadata{} + +func (defaultStorageMetadata) ProducesMIMETypes(verb string) []string { + return nil +} + +func (defaultStorageMetadata) ProducesObject(verb string) interface{} { + return nil +} + +// splitSubresource checks if the given storage path is the path of a subresource and returns +// the resource and subresource components. +func splitSubresource(path string) (string, string, error) { + var resource, subresource string + switch parts := strings.Split(path, "/"); len(parts) { + case 2: + resource, subresource = parts[0], parts[1] + case 1: + resource = parts[0] + default: + // TODO: support deeper paths + return "", "", fmt.Errorf("api_installer allows only one or two segment paths (resource or resource/subresource)") + } + return resource, subresource, nil +} + +// GetArticleForNoun returns the article needed for the given noun. +func GetArticleForNoun(noun string, padding string) string { + if !strings.HasSuffix(noun, "ss") && strings.HasSuffix(noun, "s") { + // Plurals don't have an article. + // Don't catch words like class + return fmt.Sprintf("%v", padding) + } + + article := "a" + if isVowel(rune(noun[0])) { + article = "an" + } + + return fmt.Sprintf("%s%s%s", padding, article, padding) +} + +// isVowel returns true if the rune is a vowel (case insensitive). +func isVowel(c rune) bool { + vowels := []rune{'a', 'e', 'i', 'o', 'u'} + for _, value := range vowels { + if value == unicode.ToLower(c) { + return true + } + } + return false +} + +func restfulListResource(r rest.Lister, rw rest.Watcher, scope handlers.RequestScope, forceWatch bool, minRequestTimeout time.Duration) restful.RouteFunction { + return func(req *restful.Request, res *restful.Response) { + handlers.ListResource(r, rw, &scope, forceWatch, minRequestTimeout)(res.ResponseWriter, req.Request) + } +} + +func restfulCreateNamedResource(r rest.NamedCreater, scope handlers.RequestScope, admit admission.Interface) restful.RouteFunction { + return func(req *restful.Request, res *restful.Response) { + handlers.CreateNamedResource(r, &scope, admit)(res.ResponseWriter, req.Request) + } +} + +func restfulCreateResource(r rest.Creater, scope handlers.RequestScope, admit admission.Interface) restful.RouteFunction { + return func(req *restful.Request, res *restful.Response) { + handlers.CreateResource(r, &scope, admit)(res.ResponseWriter, req.Request) + } +} + +func restfulDeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope handlers.RequestScope, admit admission.Interface) restful.RouteFunction { + return func(req *restful.Request, res *restful.Response) { + handlers.DeleteResource(r, allowsOptions, &scope, admit)(res.ResponseWriter, req.Request) + } +} + +func restfulDeleteCollection(r rest.CollectionDeleter, checkBody bool, scope handlers.RequestScope, admit admission.Interface) restful.RouteFunction { + return func(req *restful.Request, res *restful.Response) { + handlers.DeleteCollection(r, checkBody, &scope, admit)(res.ResponseWriter, req.Request) + } +} + +func restfulUpdateResource(r rest.Updater, scope handlers.RequestScope, admit admission.Interface) restful.RouteFunction { + return func(req *restful.Request, res *restful.Response) { + handlers.UpdateResource(r, &scope, admit)(res.ResponseWriter, req.Request) + } +} + +func restfulPatchResource(r rest.Patcher, scope handlers.RequestScope, admit admission.Interface, supportedTypes []string) restful.RouteFunction { + return func(req *restful.Request, res *restful.Response) { + handlers.PatchResource(r, &scope, admit, supportedTypes)(res.ResponseWriter, req.Request) + } +} + +func restfulGetResource(r rest.Getter, e rest.Exporter, scope handlers.RequestScope) restful.RouteFunction { + return func(req *restful.Request, res *restful.Response) { + handlers.GetResource(r, e, &scope)(res.ResponseWriter, req.Request) + } +} + +func restfulGetResourceWithOptions(r rest.GetterWithOptions, scope handlers.RequestScope, isSubresource bool) restful.RouteFunction { + return func(req *restful.Request, res *restful.Response) { + handlers.GetResourceWithOptions(r, &scope, isSubresource)(res.ResponseWriter, req.Request) + } +} + +func restfulConnectResource(connecter rest.Connecter, scope handlers.RequestScope, admit admission.Interface, restPath string, isSubresource bool) restful.RouteFunction { + return func(req *restful.Request, res *restful.Response) { + handlers.ConnectResource(connecter, &scope, admit, restPath, isSubresource)(res.ResponseWriter, req.Request) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go index 8d042dc51..d4f6068b4 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go @@ -32,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/types" utilsets "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/features" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -185,6 +186,36 @@ var ( }, []string{"verb", "group", "version", "resource", "subresource", "scope", "component", "code"}, ) + + apiSelfRequestCounter = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Name: "apiserver_selfrequest_total", + Help: "Counter of apiserver self-requests broken out for each verb, API resource and subresource.", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"verb", "resource", "subresource"}, + ) + + requestFilterDuration = compbasemetrics.NewHistogramVec( + &compbasemetrics.HistogramOpts{ + Name: "apiserver_request_filter_duration_seconds", + Help: "Request filter latency distribution in seconds, for each filter type", + Buckets: []float64{0.0001, 0.0003, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1.0, 5.0}, + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"filter"}, + ) + + // requestAbortsTotal is a number of aborted requests with http.ErrAbortHandler + requestAbortsTotal = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Name: "apiserver_request_aborts_total", + Help: "Number of requests which apiserver aborted possibly due to a timeout, for each group, version, verb, resource, subresource and scope", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"verb", "group", "version", "resource", "subresource", "scope"}, + ) + kubectlExeRegexp = regexp.MustCompile(`^.*((?i:kubectl\.exe))`) metrics = []resettableCollector{ @@ -201,6 +232,9 @@ var ( currentInflightRequests, currentInqueueRequests, requestTerminationsTotal, + apiSelfRequestCounter, + requestFilterDuration, + requestAbortsTotal, } // these are the known (e.g. whitelisted/known) content types which we will report for @@ -290,6 +324,26 @@ func UpdateInflightRequestMetrics(phase string, nonmutating, mutating int) { } } +func RecordFilterLatency(name string, elapsed time.Duration) { + requestFilterDuration.WithLabelValues(name).Observe(elapsed.Seconds()) +} + +// RecordRequestAbort records that the request was aborted possibly due to a timeout. +func RecordRequestAbort(req *http.Request, requestInfo *request.RequestInfo) { + if requestInfo == nil { + requestInfo = &request.RequestInfo{Verb: req.Method, Path: req.URL.Path} + } + + scope := CleanScope(requestInfo) + reportedVerb := cleanVerb(canonicalVerb(strings.ToUpper(req.Method), scope), req) + resource := requestInfo.Resource + subresource := requestInfo.Subresource + group := requestInfo.APIGroup + version := requestInfo.APIVersion + + requestAbortsTotal.WithLabelValues(reportedVerb, group, version, resource, subresource, scope).Inc() +} + // RecordRequestTermination records that the request was terminated early as part of a resource // preservation or apiserver self-defense mechanism (e.g. timeouts, maxinflight throttling, // proxyHandler errors). RecordRequestTermination should only be called zero or one times @@ -299,20 +353,17 @@ func RecordRequestTermination(req *http.Request, requestInfo *request.RequestInf requestInfo = &request.RequestInfo{Verb: req.Method, Path: req.URL.Path} } scope := CleanScope(requestInfo) - // We don't use verb from , as for the healthy path - // MonitorRequest is called from InstrumentRouteFunc which is registered - // in installer.go with predefined list of verbs (different than those - // translated to RequestInfo). + + // We don't use verb from , as this may be propagated from + // InstrumentRouteFunc which is registered in installer.go with predefined + // list of verbs (different than those translated to RequestInfo). // However, we need to tweak it e.g. to differentiate GET from LIST. - verb := canonicalVerb(strings.ToUpper(req.Method), scope) - // set verbs to a bounded set of known and expected verbs - if !validRequestMethods.Has(verb) { - verb = OtherRequestMethod - } + reportedVerb := cleanVerb(canonicalVerb(strings.ToUpper(req.Method), scope), req) + if requestInfo.IsResourceRequest { - requestTerminationsTotal.WithLabelValues(cleanVerb(verb, req), requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component, codeToString(code)).Inc() + requestTerminationsTotal.WithLabelValues(reportedVerb, requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component, codeToString(code)).Inc() } else { - requestTerminationsTotal.WithLabelValues(cleanVerb(verb, req), "", "", "", requestInfo.Path, scope, component, codeToString(code)).Inc() + requestTerminationsTotal.WithLabelValues(reportedVerb, "", "", "", requestInfo.Path, scope, component, codeToString(code)).Inc() } } @@ -324,12 +375,13 @@ func RecordLongRunning(req *http.Request, requestInfo *request.RequestInfo, comp } var g compbasemetrics.GaugeMetric scope := CleanScope(requestInfo) - // We don't use verb from , as for the healthy path - // MonitorRequest is called from InstrumentRouteFunc which is registered - // in installer.go with predefined list of verbs (different than those - // translated to RequestInfo). + + // We don't use verb from , as this may be propagated from + // InstrumentRouteFunc which is registered in installer.go with predefined + // list of verbs (different than those translated to RequestInfo). // However, we need to tweak it e.g. to differentiate GET from LIST. reportedVerb := cleanVerb(canonicalVerb(strings.ToUpper(req.Method), scope), req) + if requestInfo.IsResourceRequest { g = longRunningRequestGauge.WithLabelValues(reportedVerb, requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component) } else { @@ -343,11 +395,21 @@ func RecordLongRunning(req *http.Request, requestInfo *request.RequestInfo, comp // MonitorRequest handles standard transformations for client and the reported verb and then invokes Monitor to record // a request. verb must be uppercase to be backwards compatible with existing monitoring tooling. func MonitorRequest(req *http.Request, verb, group, version, resource, subresource, scope, component string, deprecated bool, removedRelease string, contentType string, httpCode, respSize int, elapsed time.Duration) { - reportedVerb := cleanVerb(verb, req) + // We don't use verb from , as this may be propagated from + // InstrumentRouteFunc which is registered in installer.go with predefined + // list of verbs (different than those translated to RequestInfo). + // However, we need to tweak it e.g. to differentiate GET from LIST. + reportedVerb := cleanVerb(canonicalVerb(strings.ToUpper(req.Method), scope), req) + dryRun := cleanDryRun(req.URL) elapsedSeconds := elapsed.Seconds() cleanContentType := cleanContentType(contentType) requestCounter.WithLabelValues(reportedVerb, dryRun, group, version, resource, subresource, scope, component, cleanContentType, codeToString(httpCode)).Inc() + // MonitorRequest happens after authentication, so we can trust the username given by the request + info, ok := request.UserFrom(req.Context()) + if ok && info.GetName() == user.APIServerUser { + apiSelfRequestCounter.WithLabelValues(reportedVerb, resource, subresource).Inc() + } if deprecated { deprecatedRequestGauge.WithLabelValues(group, version, resource, subresource, removedRelease).Set(1) audit.AddAuditAnnotation(req.Context(), deprecatedAnnotationKey, "true") @@ -365,8 +427,11 @@ func MonitorRequest(req *http.Request, verb, group, version, resource, subresour // InstrumentRouteFunc works like Prometheus' InstrumentHandlerFunc but wraps // the go-restful RouteFunction instead of a HandlerFunc plus some Kubernetes endpoint specific information. func InstrumentRouteFunc(verb, group, version, resource, subresource, scope, component string, deprecated bool, removedRelease string, routeFunc restful.RouteFunction) restful.RouteFunction { - return restful.RouteFunction(func(request *restful.Request, response *restful.Response) { - now := time.Now() + return restful.RouteFunction(func(req *restful.Request, response *restful.Response) { + requestReceivedTimestamp, ok := request.ReceivedTimestampFrom(req.Request.Context()) + if !ok { + requestReceivedTimestamp = time.Now() + } delegate := &ResponseWriterDelegator{ResponseWriter: response.ResponseWriter} @@ -381,16 +446,19 @@ func InstrumentRouteFunc(verb, group, version, resource, subresource, scope, com } response.ResponseWriter = rw - routeFunc(request, response) + routeFunc(req, response) - MonitorRequest(request.Request, verb, group, version, resource, subresource, scope, component, deprecated, removedRelease, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Since(now)) + MonitorRequest(req.Request, verb, group, version, resource, subresource, scope, component, deprecated, removedRelease, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Since(requestReceivedTimestamp)) }) } // InstrumentHandlerFunc works like Prometheus' InstrumentHandlerFunc but adds some Kubernetes endpoint specific information. func InstrumentHandlerFunc(verb, group, version, resource, subresource, scope, component string, deprecated bool, removedRelease string, handler http.HandlerFunc) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { - now := time.Now() + requestReceivedTimestamp, ok := request.ReceivedTimestampFrom(req.Context()) + if !ok { + requestReceivedTimestamp = time.Now() + } delegate := &ResponseWriterDelegator{ResponseWriter: w} @@ -405,7 +473,7 @@ func InstrumentHandlerFunc(verb, group, version, resource, subresource, scope, c handler(w, req) - MonitorRequest(req, verb, group, version, resource, subresource, scope, component, deprecated, removedRelease, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Since(now)) + MonitorRequest(req, verb, group, version, resource, subresource, scope, component, deprecated, removedRelease, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Since(requestReceivedTimestamp)) } } @@ -440,7 +508,7 @@ func CleanScope(requestInfo *request.RequestInfo) string { func canonicalVerb(verb string, scope string) string { switch verb { case "GET", "HEAD": - if scope != "resource" { + if scope != "resource" && scope != "" { return "LIST" } return "GET" diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/openapi/OWNERS b/vendor/k8s.io/apiserver/pkg/endpoints/openapi/OWNERS new file mode 100644 index 000000000..006f0125e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/openapi/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- mbohlool diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go b/vendor/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go new file mode 100644 index 000000000..e3bd028bb --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go @@ -0,0 +1,191 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openapi + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "strings" + "unicode" + + restful "github.com/emicklei/go-restful" + "github.com/go-openapi/spec" + + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kube-openapi/pkg/util" +) + +var verbs = util.NewTrie([]string{"get", "log", "read", "replace", "patch", "delete", "deletecollection", "watch", "connect", "proxy", "list", "create", "patch"}) + +const ( + extensionGVK = "x-kubernetes-group-version-kind" +) + +// ToValidOperationID makes an string a valid op ID (e.g. removing punctuations and whitespaces and make it camel case) +func ToValidOperationID(s string, capitalizeFirstLetter bool) string { + var buffer bytes.Buffer + capitalize := capitalizeFirstLetter + for i, r := range s { + if unicode.IsLetter(r) || r == '_' || (i != 0 && unicode.IsDigit(r)) { + if capitalize { + buffer.WriteRune(unicode.ToUpper(r)) + capitalize = false + } else { + buffer.WriteRune(r) + } + } else { + capitalize = true + } + } + return buffer.String() +} + +// GetOperationIDAndTags returns a customize operation ID and a list of tags for kubernetes API server's OpenAPI spec to prevent duplicate IDs. +func GetOperationIDAndTags(r *restful.Route) (string, []string, error) { + op := r.Operation + path := r.Path + var tags []string + prefix, exists := verbs.GetPrefix(op) + if !exists { + return op, tags, fmt.Errorf("operation names should start with a verb. Cannot determine operation verb from %v", op) + } + op = op[len(prefix):] + parts := strings.Split(strings.Trim(path, "/"), "/") + // Assume /api is /apis/core, remove this when we actually server /api/... on /apis/core/... + if len(parts) >= 1 && parts[0] == "api" { + parts = append([]string{"apis", "core"}, parts[1:]...) + } + if len(parts) >= 2 && parts[0] == "apis" { + trimmed := strings.TrimSuffix(parts[1], ".k8s.io") + prefix = prefix + ToValidOperationID(trimmed, prefix != "") + tag := ToValidOperationID(trimmed, false) + if len(parts) > 2 { + prefix = prefix + ToValidOperationID(parts[2], prefix != "") + tag = tag + "_" + ToValidOperationID(parts[2], false) + } + tags = append(tags, tag) + } else if len(parts) >= 1 { + tags = append(tags, ToValidOperationID(parts[0], false)) + } + return prefix + ToValidOperationID(op, prefix != ""), tags, nil +} + +type groupVersionKinds []v1.GroupVersionKind + +func (s groupVersionKinds) Len() int { + return len(s) +} + +func (s groupVersionKinds) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s groupVersionKinds) Less(i, j int) bool { + if s[i].Group == s[j].Group { + if s[i].Version == s[j].Version { + return s[i].Kind < s[j].Kind + } + return s[i].Version < s[j].Version + } + return s[i].Group < s[j].Group +} + +func (s groupVersionKinds) JSON() []interface{} { + j := []interface{}{} + for _, gvk := range s { + j = append(j, map[string]interface{}{ + "group": gvk.Group, + "version": gvk.Version, + "kind": gvk.Kind, + }) + } + return j +} + +// DefinitionNamer is the type to customize OpenAPI definition name. +type DefinitionNamer struct { + typeGroupVersionKinds map[string]groupVersionKinds +} + +func gvkConvert(gvk schema.GroupVersionKind) v1.GroupVersionKind { + return v1.GroupVersionKind{ + Group: gvk.Group, + Version: gvk.Version, + Kind: gvk.Kind, + } +} + +func friendlyName(name string) string { + nameParts := strings.Split(name, "/") + // Reverse first part. e.g., io.k8s... instead of k8s.io... + if len(nameParts) > 0 && strings.Contains(nameParts[0], ".") { + parts := strings.Split(nameParts[0], ".") + for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 { + parts[i], parts[j] = parts[j], parts[i] + } + nameParts[0] = strings.Join(parts, ".") + } + return strings.Join(nameParts, ".") +} + +func typeName(t reflect.Type) string { + path := t.PkgPath() + if strings.Contains(path, "/vendor/") { + path = path[strings.Index(path, "/vendor/")+len("/vendor/"):] + } + return fmt.Sprintf("%s.%s", path, t.Name()) +} + +// NewDefinitionNamer constructs a new DefinitionNamer to be used to customize OpenAPI spec. +func NewDefinitionNamer(schemes ...*runtime.Scheme) *DefinitionNamer { + ret := &DefinitionNamer{ + typeGroupVersionKinds: map[string]groupVersionKinds{}, + } + for _, s := range schemes { + for gvk, rtype := range s.AllKnownTypes() { + newGVK := gvkConvert(gvk) + exists := false + for _, existingGVK := range ret.typeGroupVersionKinds[typeName(rtype)] { + if newGVK == existingGVK { + exists = true + break + } + } + if !exists { + ret.typeGroupVersionKinds[typeName(rtype)] = append(ret.typeGroupVersionKinds[typeName(rtype)], newGVK) + } + } + } + for _, gvk := range ret.typeGroupVersionKinds { + sort.Sort(gvk) + } + return ret +} + +// GetDefinitionName returns the name and tags for a given definition +func (d *DefinitionNamer) GetDefinitionName(name string) (string, spec.Extensions) { + if groupVersionKinds, ok := d.typeGroupVersionKinds[name]; ok { + return friendlyName(name), spec.Extensions{ + extensionGVK: groupVersionKinds.JSON(), + } + } + return friendlyName(name), nil +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/request/received_time.go b/vendor/k8s.io/apiserver/pkg/endpoints/request/received_time.go new file mode 100644 index 000000000..7d58cf3ad --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/request/received_time.go @@ -0,0 +1,45 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package request + +import ( + "context" + "time" +) + +type requestReceivedTimestampKeyType int + +// requestReceivedTimestampKey is the ReceivedTimestamp (the time the request reached the apiserver) +// key for the context. +const requestReceivedTimestampKey requestReceivedTimestampKeyType = iota + +// WithReceivedTimestamp returns a copy of parent context in which the ReceivedTimestamp +// (the time the request reached the apiserver) is set. +// +// If the specified ReceivedTimestamp is zero, no value is set and the parent context is returned as is. +func WithReceivedTimestamp(parent context.Context, receivedTimestamp time.Time) context.Context { + if receivedTimestamp.IsZero() { + return parent + } + return WithValue(parent, requestReceivedTimestampKey, receivedTimestamp) +} + +// ReceivedTimestampFrom returns the value of the ReceivedTimestamp key from the specified context. +func ReceivedTimestampFrom(ctx context.Context) (time.Time, bool) { + info, ok := ctx.Value(requestReceivedTimestampKey).(time.Time) + return info, ok +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go b/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go index 0890ae4ff..1f5dc28a9 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go @@ -77,7 +77,7 @@ var specialVerbsNoSubresources = sets.NewString("proxy") // this list allows the parser to distinguish between a namespace subresource, and a namespaced resource var namespaceSubresources = sets.NewString("status", "finalize") -// NamespaceSubResourcesForTest exports namespaceSubresources for testing in pkg/master/master_test.go, so we never drift +// NamespaceSubResourcesForTest exports namespaceSubresources for testing in pkg/controlplane/master_test.go, so we never drift var NamespaceSubResourcesForTest = sets.NewString(namespaceSubresources.List()...) type RequestInfoFactory struct { diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/warning/warning.go b/vendor/k8s.io/apiserver/pkg/endpoints/warning/warning.go new file mode 100644 index 000000000..c0dde6e06 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/warning/warning.go @@ -0,0 +1,39 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package warning + +import ( + restful "github.com/emicklei/go-restful" + + "k8s.io/apiserver/pkg/warning" +) + +// AddWarningsHandler returns a handler that adds the provided warnings to all requests, +// then delegates to the provided handler. +func AddWarningsHandler(handler restful.RouteFunction, warnings []string) restful.RouteFunction { + if len(warnings) == 0 { + return handler + } + + return func(req *restful.Request, res *restful.Response) { + ctx := req.Request.Context() + for _, msg := range warnings { + warning.AddWarning(ctx, "", msg) + } + handler(req, res) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiserver/pkg/features/kube_features.go index a407c3492..612be9845 100644 --- a/vendor/k8s.io/apiserver/pkg/features/kube_features.go +++ b/vendor/k8s.io/apiserver/pkg/features/kube_features.go @@ -61,6 +61,7 @@ const ( // owner: @ilackams // alpha: v1.7 + // beta: v1.16 // // Enables compression of REST responses (GET and LIST only) APIResponseCompression featuregate.Feature = "APIResponseCompression" @@ -85,6 +86,7 @@ const ( // owner: @caesarxuchao // alpha: v1.15 + // beta: v1.16 // // Allow apiservers to show a count of remaining items in the response // to a chunking list request. @@ -105,6 +107,12 @@ const ( // document. StorageVersionHash featuregate.Feature = "StorageVersionHash" + // owner: @caesarxuchao @roycaihw + // alpha: v1.20 + // + // Enable the storage version API. + StorageVersionAPI featuregate.Feature = "StorageVersionAPI" + // owner: @wojtek-t // alpha: v1.15 // beta: v1.16 @@ -122,6 +130,7 @@ const ( // owner: @wojtek-t // alpha: v1.16 + // beta: v1.20 // // Deprecates and removes SelfLink from ObjectMeta and ListMeta. RemoveSelfLink featuregate.Feature = "RemoveSelfLink" @@ -129,6 +138,7 @@ const ( // owner: @shaloulcy, @wojtek-t // alpha: v1.18 // beta: v1.19 + // GA: v1.20 // // Allows label and field based indexes in apiserver watch cache to accelerate list operations. SelectorIndex featuregate.Feature = "SelectorIndex" @@ -138,6 +148,18 @@ const ( // // Allows sending warning headers in API responses. WarningHeaders featuregate.Feature = "WarningHeaders" + + // owner: @wojtek-t + // alpha: v1.20 + // + // Allows for updating watchcache resource version with progress notify events. + EfficientWatchResumption featuregate.Feature = "EfficientWatchResumption" + + // owner: @roycaihw + // alpha: v1.20 + // + // Assigns each kube-apiserver an ID in a cluster. + APIServerIdentity featuregate.Feature = "APIServerIdentity" ) func init() { @@ -148,18 +170,21 @@ func init() { // To add a new feature, define a key for it above and add it here. The features will be // available throughout Kubernetes binaries. var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ - StreamingProxyRedirects: {Default: true, PreRelease: featuregate.Deprecated}, - ValidateProxyRedirects: {Default: true, PreRelease: featuregate.Beta}, - AdvancedAuditing: {Default: true, PreRelease: featuregate.GA}, - APIResponseCompression: {Default: true, PreRelease: featuregate.Beta}, - APIListChunking: {Default: true, PreRelease: featuregate.Beta}, - DryRun: {Default: true, PreRelease: featuregate.GA}, - RemainingItemCount: {Default: true, PreRelease: featuregate.Beta}, - ServerSideApply: {Default: true, PreRelease: featuregate.Beta}, - StorageVersionHash: {Default: true, PreRelease: featuregate.Beta}, - WatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, - APIPriorityAndFairness: {Default: false, PreRelease: featuregate.Alpha}, - RemoveSelfLink: {Default: false, PreRelease: featuregate.Alpha}, - SelectorIndex: {Default: true, PreRelease: featuregate.Beta}, - WarningHeaders: {Default: true, PreRelease: featuregate.Beta}, + StreamingProxyRedirects: {Default: true, PreRelease: featuregate.Deprecated}, + ValidateProxyRedirects: {Default: true, PreRelease: featuregate.Beta}, + AdvancedAuditing: {Default: true, PreRelease: featuregate.GA}, + APIResponseCompression: {Default: true, PreRelease: featuregate.Beta}, + APIListChunking: {Default: true, PreRelease: featuregate.Beta}, + DryRun: {Default: true, PreRelease: featuregate.GA}, + RemainingItemCount: {Default: true, PreRelease: featuregate.Beta}, + ServerSideApply: {Default: true, PreRelease: featuregate.Beta}, + StorageVersionHash: {Default: true, PreRelease: featuregate.Beta}, + StorageVersionAPI: {Default: false, PreRelease: featuregate.Alpha}, + WatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + APIPriorityAndFairness: {Default: true, PreRelease: featuregate.Beta}, + RemoveSelfLink: {Default: true, PreRelease: featuregate.Beta}, + SelectorIndex: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + WarningHeaders: {Default: true, PreRelease: featuregate.Beta}, + EfficientWatchResumption: {Default: false, PreRelease: featuregate.Alpha}, + APIServerIdentity: {Default: false, PreRelease: featuregate.Alpha}, } diff --git a/vendor/k8s.io/apiserver/pkg/quota/v1/OWNERS b/vendor/k8s.io/apiserver/pkg/quota/v1/OWNERS new file mode 100644 index 000000000..d812b5d3e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/quota/v1/OWNERS @@ -0,0 +1,13 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- deads2k +- derekwaynecarr +- vishh +reviewers: +- deads2k +- derekwaynecarr +- smarterclayton +- vishh +labels: +- sig/api-machinery diff --git a/vendor/k8s.io/apiserver/pkg/quota/v1/interfaces.go b/vendor/k8s.io/apiserver/pkg/quota/v1/interfaces.go new file mode 100644 index 000000000..15f8b7613 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/quota/v1/interfaces.go @@ -0,0 +1,88 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/client-go/tools/cache" +) + +// UsageStatsOptions is an options structs that describes how stats should be calculated +type UsageStatsOptions struct { + // Namespace where stats should be calculate + Namespace string + // Scopes that must match counted objects + Scopes []corev1.ResourceQuotaScope + // Resources are the set of resources to include in the measurement + Resources []corev1.ResourceName + ScopeSelector *corev1.ScopeSelector +} + +// UsageStats is result of measuring observed resource use in the system +type UsageStats struct { + // Used maps resource to quantity used + Used corev1.ResourceList +} + +// Evaluator knows how to evaluate quota usage for a particular group resource +type Evaluator interface { + // Constraints ensures that each required resource is present on item + Constraints(required []corev1.ResourceName, item runtime.Object) error + // GroupResource returns the groupResource that this object knows how to evaluate + GroupResource() schema.GroupResource + // Handles determines if quota could be impacted by the specified attribute. + // If true, admission control must perform quota processing for the operation, otherwise it is safe to ignore quota. + Handles(operation admission.Attributes) bool + // Matches returns true if the specified quota matches the input item + Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) + // MatchingScopes takes the input specified list of scopes and input object and returns the set of scopes that matches input object. + MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) + // UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope + UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) + // MatchingResources takes the input specified list of resources and returns the set of resources evaluator matches. + MatchingResources(input []corev1.ResourceName) []corev1.ResourceName + // Usage returns the resource usage for the specified object + Usage(item runtime.Object) (corev1.ResourceList, error) + // UsageStats calculates latest observed usage stats for all objects + UsageStats(options UsageStatsOptions) (UsageStats, error) +} + +// Configuration defines how the quota system is configured. +type Configuration interface { + // IgnoredResources are ignored by quota. + IgnoredResources() map[schema.GroupResource]struct{} + // Evaluators for quota evaluation. + Evaluators() []Evaluator +} + +// Registry maintains a list of evaluators +type Registry interface { + // Add to registry + Add(e Evaluator) + // Remove from registry + Remove(e Evaluator) + // Get by group resource + Get(gr schema.GroupResource) Evaluator + // List from registry + List() []Evaluator +} + +// ListerForResourceFunc knows how to get a lister for a specific resource +type ListerForResourceFunc func(schema.GroupVersionResource) (cache.GenericLister, error) diff --git a/vendor/k8s.io/apiserver/pkg/quota/v1/resources.go b/vendor/k8s.io/apiserver/pkg/quota/v1/resources.go new file mode 100644 index 000000000..3c2927d73 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/quota/v1/resources.go @@ -0,0 +1,293 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "sort" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" +) + +// Equals returns true if the two lists are equivalent +func Equals(a corev1.ResourceList, b corev1.ResourceList) bool { + if len(a) != len(b) { + return false + } + + for key, value1 := range a { + value2, found := b[key] + if !found { + return false + } + if value1.Cmp(value2) != 0 { + return false + } + } + + return true +} + +// LessThanOrEqual returns true if a < b for each key in b +// If false, it returns the keys in a that exceeded b +func LessThanOrEqual(a corev1.ResourceList, b corev1.ResourceList) (bool, []corev1.ResourceName) { + result := true + resourceNames := []corev1.ResourceName{} + for key, value := range b { + if other, found := a[key]; found { + if other.Cmp(value) > 0 { + result = false + resourceNames = append(resourceNames, key) + } + } + } + return result, resourceNames +} + +// Max returns the result of Max(a, b) for each named resource +func Max(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { + result := corev1.ResourceList{} + for key, value := range a { + if other, found := b[key]; found { + if value.Cmp(other) <= 0 { + result[key] = other.DeepCopy() + continue + } + } + result[key] = value.DeepCopy() + } + for key, value := range b { + if _, found := result[key]; !found { + result[key] = value.DeepCopy() + } + } + return result +} + +// Add returns the result of a + b for each named resource +func Add(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { + result := corev1.ResourceList{} + for key, value := range a { + quantity := value.DeepCopy() + if other, found := b[key]; found { + quantity.Add(other) + } + result[key] = quantity + } + for key, value := range b { + if _, found := result[key]; !found { + result[key] = value.DeepCopy() + } + } + return result +} + +// SubtractWithNonNegativeResult - subtracts and returns result of a - b but +// makes sure we don't return negative values to prevent negative resource usage. +func SubtractWithNonNegativeResult(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { + zero := resource.MustParse("0") + + result := corev1.ResourceList{} + for key, value := range a { + quantity := value.DeepCopy() + if other, found := b[key]; found { + quantity.Sub(other) + } + if quantity.Cmp(zero) > 0 { + result[key] = quantity + } else { + result[key] = zero + } + } + + for key := range b { + if _, found := result[key]; !found { + result[key] = zero + } + } + return result +} + +// Subtract returns the result of a - b for each named resource +func Subtract(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { + result := corev1.ResourceList{} + for key, value := range a { + quantity := value.DeepCopy() + if other, found := b[key]; found { + quantity.Sub(other) + } + result[key] = quantity + } + for key, value := range b { + if _, found := result[key]; !found { + quantity := value.DeepCopy() + quantity.Neg() + result[key] = quantity + } + } + return result +} + +// Mask returns a new resource list that only has the values with the specified names +func Mask(resources corev1.ResourceList, names []corev1.ResourceName) corev1.ResourceList { + nameSet := ToSet(names) + result := corev1.ResourceList{} + for key, value := range resources { + if nameSet.Has(string(key)) { + result[key] = value.DeepCopy() + } + } + return result +} + +// ResourceNames returns a list of all resource names in the ResourceList +func ResourceNames(resources corev1.ResourceList) []corev1.ResourceName { + result := []corev1.ResourceName{} + for resourceName := range resources { + result = append(result, resourceName) + } + return result +} + +// Contains returns true if the specified item is in the list of items +func Contains(items []corev1.ResourceName, item corev1.ResourceName) bool { + for _, i := range items { + if i == item { + return true + } + } + return false +} + +// ContainsPrefix returns true if the specified item has a prefix that contained in given prefix Set +func ContainsPrefix(prefixSet []string, item corev1.ResourceName) bool { + for _, prefix := range prefixSet { + if strings.HasPrefix(string(item), prefix) { + return true + } + } + return false +} + +// Intersection returns the intersection of both list of resources, deduped and sorted +func Intersection(a []corev1.ResourceName, b []corev1.ResourceName) []corev1.ResourceName { + result := make([]corev1.ResourceName, 0, len(a)) + for _, item := range a { + if Contains(result, item) { + continue + } + if !Contains(b, item) { + continue + } + result = append(result, item) + } + sort.Slice(result, func(i, j int) bool { return result[i] < result[j] }) + return result +} + +// Difference returns the list of resources resulting from a-b, deduped and sorted +func Difference(a []corev1.ResourceName, b []corev1.ResourceName) []corev1.ResourceName { + result := make([]corev1.ResourceName, 0, len(a)) + for _, item := range a { + if Contains(b, item) || Contains(result, item) { + continue + } + result = append(result, item) + } + sort.Slice(result, func(i, j int) bool { return result[i] < result[j] }) + return result +} + +// IsZero returns true if each key maps to the quantity value 0 +func IsZero(a corev1.ResourceList) bool { + zero := resource.MustParse("0") + for _, v := range a { + if v.Cmp(zero) != 0 { + return false + } + } + return true +} + +// IsNegative returns the set of resource names that have a negative value. +func IsNegative(a corev1.ResourceList) []corev1.ResourceName { + results := []corev1.ResourceName{} + zero := resource.MustParse("0") + for k, v := range a { + if v.Cmp(zero) < 0 { + results = append(results, k) + } + } + return results +} + +// ToSet takes a list of resource names and converts to a string set +func ToSet(resourceNames []corev1.ResourceName) sets.String { + result := sets.NewString() + for _, resourceName := range resourceNames { + result.Insert(string(resourceName)) + } + return result +} + +// CalculateUsage calculates and returns the requested ResourceList usage. +// If an error is returned, usage only contains the resources which encountered no calculation errors. +func CalculateUsage(namespaceName string, scopes []corev1.ResourceQuotaScope, hardLimits corev1.ResourceList, registry Registry, scopeSelector *corev1.ScopeSelector) (corev1.ResourceList, error) { + // find the intersection between the hard resources on the quota + // and the resources this controller can track to know what we can + // look to measure updated usage stats for + hardResources := ResourceNames(hardLimits) + potentialResources := []corev1.ResourceName{} + evaluators := registry.List() + for _, evaluator := range evaluators { + potentialResources = append(potentialResources, evaluator.MatchingResources(hardResources)...) + } + // NOTE: the intersection just removes duplicates since the evaluator match intersects with hard + matchedResources := Intersection(hardResources, potentialResources) + + errors := []error{} + + // sum the observed usage from each evaluator + newUsage := corev1.ResourceList{} + for _, evaluator := range evaluators { + // only trigger the evaluator if it matches a resource in the quota, otherwise, skip calculating anything + intersection := evaluator.MatchingResources(matchedResources) + if len(intersection) == 0 { + continue + } + + usageStatsOptions := UsageStatsOptions{Namespace: namespaceName, Scopes: scopes, Resources: intersection, ScopeSelector: scopeSelector} + stats, err := evaluator.UsageStats(usageStatsOptions) + if err != nil { + // remember the error + errors = append(errors, err) + // exclude resources which encountered calculation errors + matchedResources = Difference(matchedResources, intersection) + continue + } + newUsage = Add(newUsage, stats.Used) + } + + // mask the observed usage to only the set of resources tracked by this quota + // merge our observed usage with the quota usage status + // if the new usage is different than the last usage, we will need to do an update + newUsage = Mask(newUsage, matchedResources) + return newUsage, utilerrors.NewAggregate(errors) +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/OWNERS b/vendor/k8s.io/apiserver/pkg/registry/generic/OWNERS new file mode 100644 index 000000000..3ac0d161d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/OWNERS @@ -0,0 +1,28 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- yujuhong +- derekwaynecarr +- caesarxuchao +- mikedanese +- liggitt +- nikhiljindal +- gmarek +- davidopp +- saad-ali +- janetkuo +- pwittrock +- ncdc +- piosz +- dims +- hongchaodeng +- krousey +- xiang90 +- resouer +- sdminonne +- enj diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/doc.go b/vendor/k8s.io/apiserver/pkg/registry/generic/doc.go new file mode 100644 index 000000000..ea79d130a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package generic provides a generic object store interface and a +// generic label/field matching type. +package generic // import "k8s.io/apiserver/pkg/registry/generic" diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/matcher.go b/vendor/k8s.io/apiserver/pkg/registry/generic/matcher.go new file mode 100644 index 000000000..4364374ef --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/matcher.go @@ -0,0 +1,52 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generic + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" +) + +// ObjectMetaFieldsSet returns a fields that represent the ObjectMeta. +func ObjectMetaFieldsSet(objectMeta *metav1.ObjectMeta, hasNamespaceField bool) fields.Set { + if !hasNamespaceField { + return fields.Set{ + "metadata.name": objectMeta.Name, + } + } + return fields.Set{ + "metadata.name": objectMeta.Name, + "metadata.namespace": objectMeta.Namespace, + } +} + +// AdObjectMetaField add fields that represent the ObjectMeta to source. +func AddObjectMetaFieldsSet(source fields.Set, objectMeta *metav1.ObjectMeta, hasNamespaceField bool) fields.Set { + source["metadata.name"] = objectMeta.Name + if hasNamespaceField { + source["metadata.namespace"] = objectMeta.Namespace + } + return source +} + +// MergeFieldsSets merges a fields'set from fragment into the source. +func MergeFieldsSets(source fields.Set, fragment fields.Set) fields.Set { + for k, value := range fragment { + source[k] = value + } + return source +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/options.go b/vendor/k8s.io/apiserver/pkg/registry/generic/options.go new file mode 100644 index 000000000..577192b62 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/options.go @@ -0,0 +1,54 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generic + +import ( + "time" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/storage" + "k8s.io/apiserver/pkg/storage/storagebackend" + "k8s.io/client-go/tools/cache" +) + +// RESTOptions is set of configuration options to generic registries. +type RESTOptions struct { + StorageConfig *storagebackend.Config + Decorator StorageDecorator + + EnableGarbageCollection bool + DeleteCollectionWorkers int + ResourcePrefix string + CountMetricPollPeriod time.Duration +} + +// Implement RESTOptionsGetter so that RESTOptions can directly be used when available (i.e. tests) +func (opts RESTOptions) GetRESTOptions(schema.GroupResource) (RESTOptions, error) { + return opts, nil +} + +type RESTOptionsGetter interface { + GetRESTOptions(resource schema.GroupResource) (RESTOptions, error) +} + +// StoreOptions is set of configuration options used to complete generic registries. +type StoreOptions struct { + RESTOptions RESTOptionsGetter + TriggerFunc storage.IndexerFuncs + AttrFunc storage.AttrFunc + Indexers *cache.Indexers +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/decorated_watcher.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/decorated_watcher.go new file mode 100644 index 000000000..005a376d4 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/decorated_watcher.go @@ -0,0 +1,102 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "context" + "net/http" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" +) + +type decoratedWatcher struct { + w watch.Interface + decorator ObjectFunc + cancel context.CancelFunc + resultCh chan watch.Event +} + +func newDecoratedWatcher(w watch.Interface, decorator ObjectFunc) *decoratedWatcher { + ctx, cancel := context.WithCancel(context.Background()) + d := &decoratedWatcher{ + w: w, + decorator: decorator, + cancel: cancel, + resultCh: make(chan watch.Event), + } + go d.run(ctx) + return d +} + +func (d *decoratedWatcher) run(ctx context.Context) { + var recv, send watch.Event + var ok bool + for { + select { + case recv, ok = <-d.w.ResultChan(): + // The underlying channel may be closed after timeout. + if !ok { + d.cancel() + return + } + switch recv.Type { + case watch.Added, watch.Modified, watch.Deleted, watch.Bookmark: + err := d.decorator(recv.Object) + if err != nil { + send = makeStatusErrorEvent(err) + break + } + send = recv + case watch.Error: + send = recv + } + select { + case d.resultCh <- send: + if send.Type == watch.Error { + d.cancel() + } + case <-ctx.Done(): + } + case <-ctx.Done(): + d.w.Stop() + close(d.resultCh) + return + } + } +} + +func (d *decoratedWatcher) Stop() { + d.cancel() +} + +func (d *decoratedWatcher) ResultChan() <-chan watch.Event { + return d.resultCh +} + +func makeStatusErrorEvent(err error) watch.Event { + status := &metav1.Status{ + Status: metav1.StatusFailure, + Message: err.Error(), + Code: http.StatusInternalServerError, + Reason: metav1.StatusReasonInternalError, + } + return watch.Event{ + Type: watch.Error, + Object: status, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/doc.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/doc.go new file mode 100644 index 000000000..bd315ae47 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package etcd has a generic implementation of a registry that +// stores things in etcd. +package registry // import "k8s.io/apiserver/pkg/registry/generic/registry" diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go new file mode 100644 index 000000000..2f184c50e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go @@ -0,0 +1,121 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/apiserver/pkg/storage" +) + +type DryRunnableStorage struct { + Storage storage.Interface + Codec runtime.Codec +} + +func (s *DryRunnableStorage) Versioner() storage.Versioner { + return s.Storage.Versioner() +} + +func (s *DryRunnableStorage) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64, dryRun bool) error { + if dryRun { + if err := s.Storage.Get(ctx, key, storage.GetOptions{}, out); err == nil { + return storage.NewKeyExistsError(key, 0) + } + return s.copyInto(obj, out) + } + return s.Storage.Create(ctx, key, obj, out, ttl) +} + +func (s *DryRunnableStorage) Delete(ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions, deleteValidation storage.ValidateObjectFunc, dryRun bool) error { + if dryRun { + if err := s.Storage.Get(ctx, key, storage.GetOptions{}, out); err != nil { + return err + } + if err := preconditions.Check(key, out); err != nil { + return err + } + return deleteValidation(ctx, out) + } + return s.Storage.Delete(ctx, key, out, preconditions, deleteValidation) +} + +func (s *DryRunnableStorage) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) { + return s.Storage.Watch(ctx, key, opts) +} + +func (s *DryRunnableStorage) WatchList(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) { + return s.Storage.WatchList(ctx, key, opts) +} + +func (s *DryRunnableStorage) Get(ctx context.Context, key string, opts storage.GetOptions, objPtr runtime.Object) error { + return s.Storage.Get(ctx, key, opts, objPtr) +} + +func (s *DryRunnableStorage) GetToList(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error { + return s.Storage.GetToList(ctx, key, opts, listObj) +} + +func (s *DryRunnableStorage) List(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error { + return s.Storage.List(ctx, key, opts, listObj) +} + +func (s *DryRunnableStorage) GuaranteedUpdate( + ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool, + preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, dryRun bool, suggestion runtime.Object) error { + if dryRun { + err := s.Storage.Get(ctx, key, storage.GetOptions{IgnoreNotFound: ignoreNotFound}, ptrToType) + if err != nil { + return err + } + err = preconditions.Check(key, ptrToType) + if err != nil { + return err + } + rev, err := s.Versioner().ObjectResourceVersion(ptrToType) + if err != nil { + return err + } + out, _, err := tryUpdate(ptrToType, storage.ResponseMeta{ResourceVersion: rev}) + if err != nil { + return err + } + return s.copyInto(out, ptrToType) + } + return s.Storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, suggestion) +} + +func (s *DryRunnableStorage) Count(key string) (int64, error) { + return s.Storage.Count(key) +} + +func (s *DryRunnableStorage) copyInto(in, out runtime.Object) error { + var data []byte + + data, err := runtime.Encode(s.Codec, in) + if err != nil { + return err + } + _, _, err = s.Codec.Decode(data, nil, out) + if err != nil { + return err + } + return nil + +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go new file mode 100644 index 000000000..8c31bf819 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go @@ -0,0 +1,137 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "fmt" + "sync" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/registry/generic" + "k8s.io/apiserver/pkg/storage" + cacherstorage "k8s.io/apiserver/pkg/storage/cacher" + "k8s.io/apiserver/pkg/storage/etcd3" + "k8s.io/apiserver/pkg/storage/storagebackend" + "k8s.io/apiserver/pkg/storage/storagebackend/factory" + "k8s.io/client-go/tools/cache" +) + +// Creates a cacher based given storageConfig. +func StorageWithCacher() generic.StorageDecorator { + return func( + storageConfig *storagebackend.Config, + resourcePrefix string, + keyFunc func(obj runtime.Object) (string, error), + newFunc func() runtime.Object, + newListFunc func() runtime.Object, + getAttrsFunc storage.AttrFunc, + triggerFuncs storage.IndexerFuncs, + indexers *cache.Indexers) (storage.Interface, factory.DestroyFunc, error) { + + s, d, err := generic.NewRawStorage(storageConfig, newFunc) + if err != nil { + return s, d, err + } + if klog.V(5).Enabled() { + klog.Infof("Storage caching is enabled for %s", objectTypeToString(newFunc())) + } + + cacherConfig := cacherstorage.Config{ + Storage: s, + Versioner: etcd3.APIObjectVersioner{}, + ResourcePrefix: resourcePrefix, + KeyFunc: keyFunc, + NewFunc: newFunc, + NewListFunc: newListFunc, + GetAttrsFunc: getAttrsFunc, + IndexerFuncs: triggerFuncs, + Indexers: indexers, + Codec: storageConfig.Codec, + } + cacher, err := cacherstorage.NewCacherFromConfig(cacherConfig) + if err != nil { + return nil, func() {}, err + } + destroyFunc := func() { + cacher.Stop() + d() + } + + // TODO : Remove RegisterStorageCleanup below when PR + // https://github.com/kubernetes/kubernetes/pull/50690 + // merges as that shuts down storage properly + RegisterStorageCleanup(destroyFunc) + + return cacher, destroyFunc, nil + } +} + +func objectTypeToString(obj runtime.Object) string { + // special-case unstructured objects that tell us their apiVersion/kind + if u, isUnstructured := obj.(*unstructured.Unstructured); isUnstructured { + if apiVersion, kind := u.GetAPIVersion(), u.GetKind(); len(apiVersion) > 0 && len(kind) > 0 { + return fmt.Sprintf("apiVersion=%s, kind=%s", apiVersion, kind) + } + } + // otherwise just return the type + return fmt.Sprintf("%T", obj) +} + +// TODO : Remove all the code below when PR +// https://github.com/kubernetes/kubernetes/pull/50690 +// merges as that shuts down storage properly +// HACK ALERT : Track the destroy methods to call them +// from the test harness. TrackStorageCleanup will be called +// only from the test harness, so Register/Cleanup will be +// no-op at runtime. + +var cleanupLock sync.Mutex +var cleanup []func() = nil + +func TrackStorageCleanup() { + cleanupLock.Lock() + defer cleanupLock.Unlock() + + if cleanup != nil { + panic("Conflicting storage tracking") + } + cleanup = make([]func(), 0) +} + +func RegisterStorageCleanup(fn func()) { + cleanupLock.Lock() + defer cleanupLock.Unlock() + + if cleanup == nil { + return + } + cleanup = append(cleanup, fn) +} + +func CleanupStorage() { + cleanupLock.Lock() + old := cleanup + cleanup = nil + cleanupLock.Unlock() + + for _, d := range old { + d() + } +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go new file mode 100644 index 000000000..7c2f4c390 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go @@ -0,0 +1,1412 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "context" + "fmt" + "reflect" + "strings" + "sync" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/validation/path" + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/registry/generic" + "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/apiserver/pkg/storage" + storeerr "k8s.io/apiserver/pkg/storage/errors" + "k8s.io/apiserver/pkg/storage/etcd3/metrics" + "k8s.io/apiserver/pkg/util/dryrun" + "k8s.io/client-go/tools/cache" + + "k8s.io/klog/v2" +) + +// ObjectFunc is a function to act on a given object. An error may be returned +// if the hook cannot be completed. An ObjectFunc may transform the provided +// object. +type ObjectFunc func(obj runtime.Object) error + +// GenericStore interface can be used for type assertions when we need to access the underlying strategies. +type GenericStore interface { + GetCreateStrategy() rest.RESTCreateStrategy + GetUpdateStrategy() rest.RESTUpdateStrategy + GetDeleteStrategy() rest.RESTDeleteStrategy + GetExportStrategy() rest.RESTExportStrategy +} + +// Store implements pkg/api/rest.StandardStorage. It's intended to be +// embeddable and allows the consumer to implement any non-generic functions +// that are required. This object is intended to be copyable so that it can be +// used in different ways but share the same underlying behavior. +// +// All fields are required unless specified. +// +// The intended use of this type is embedding within a Kind specific +// RESTStorage implementation. This type provides CRUD semantics on a Kubelike +// resource, handling details like conflict detection with ResourceVersion and +// semantics. The RESTCreateStrategy, RESTUpdateStrategy, and +// RESTDeleteStrategy are generic across all backends, and encapsulate logic +// specific to the API. +// +// TODO: make the default exposed methods exactly match a generic RESTStorage +type Store struct { + // NewFunc returns a new instance of the type this registry returns for a + // GET of a single object, e.g.: + // + // curl GET /apis/group/version/namespaces/my-ns/myresource/name-of-object + NewFunc func() runtime.Object + + // NewListFunc returns a new list of the type this registry; it is the + // type returned when the resource is listed, e.g.: + // + // curl GET /apis/group/version/namespaces/my-ns/myresource + NewListFunc func() runtime.Object + + // DefaultQualifiedResource is the pluralized name of the resource. + // This field is used if there is no request info present in the context. + // See qualifiedResourceFromContext for details. + DefaultQualifiedResource schema.GroupResource + + // KeyRootFunc returns the root etcd key for this resource; should not + // include trailing "/". This is used for operations that work on the + // entire collection (listing and watching). + // + // KeyRootFunc and KeyFunc must be supplied together or not at all. + KeyRootFunc func(ctx context.Context) string + + // KeyFunc returns the key for a specific object in the collection. + // KeyFunc is called for Create/Update/Get/Delete. Note that 'namespace' + // can be gotten from ctx. + // + // KeyFunc and KeyRootFunc must be supplied together or not at all. + KeyFunc func(ctx context.Context, name string) (string, error) + + // ObjectNameFunc returns the name of an object or an error. + ObjectNameFunc func(obj runtime.Object) (string, error) + + // TTLFunc returns the TTL (time to live) that objects should be persisted + // with. The existing parameter is the current TTL or the default for this + // operation. The update parameter indicates whether this is an operation + // against an existing object. + // + // Objects that are persisted with a TTL are evicted once the TTL expires. + TTLFunc func(obj runtime.Object, existing uint64, update bool) (uint64, error) + + // PredicateFunc returns a matcher corresponding to the provided labels + // and fields. The SelectionPredicate returned should return true if the + // object matches the given field and label selectors. + PredicateFunc func(label labels.Selector, field fields.Selector) storage.SelectionPredicate + + // EnableGarbageCollection affects the handling of Update and Delete + // requests. Enabling garbage collection allows finalizers to do work to + // finalize this object before the store deletes it. + // + // If any store has garbage collection enabled, it must also be enabled in + // the kube-controller-manager. + EnableGarbageCollection bool + + // DeleteCollectionWorkers is the maximum number of workers in a single + // DeleteCollection call. Delete requests for the items in a collection + // are issued in parallel. + DeleteCollectionWorkers int + + // Decorator is an optional exit hook on an object returned from the + // underlying storage. The returned object could be an individual object + // (e.g. Pod) or a list type (e.g. PodList). Decorator is intended for + // integrations that are above storage and should only be used for + // specific cases where storage of the value is not appropriate, since + // they cannot be watched. + Decorator ObjectFunc + // CreateStrategy implements resource-specific behavior during creation. + CreateStrategy rest.RESTCreateStrategy + // AfterCreate implements a further operation to run after a resource is + // created and before it is decorated, optional. + AfterCreate ObjectFunc + + // UpdateStrategy implements resource-specific behavior during updates. + UpdateStrategy rest.RESTUpdateStrategy + // AfterUpdate implements a further operation to run after a resource is + // updated and before it is decorated, optional. + AfterUpdate ObjectFunc + + // DeleteStrategy implements resource-specific behavior during deletion. + DeleteStrategy rest.RESTDeleteStrategy + // AfterDelete implements a further operation to run after a resource is + // deleted and before it is decorated, optional. + AfterDelete ObjectFunc + // ReturnDeletedObject determines whether the Store returns the object + // that was deleted. Otherwise, return a generic success status response. + ReturnDeletedObject bool + // ShouldDeleteDuringUpdate is an optional function to determine whether + // an update from existing to obj should result in a delete. + // If specified, this is checked in addition to standard finalizer, + // deletionTimestamp, and deletionGracePeriodSeconds checks. + ShouldDeleteDuringUpdate func(ctx context.Context, key string, obj, existing runtime.Object) bool + // ExportStrategy implements resource-specific behavior during export, + // optional. Exported objects are not decorated. + ExportStrategy rest.RESTExportStrategy + // TableConvertor is an optional interface for transforming items or lists + // of items into tabular output. If unset, the default will be used. + TableConvertor rest.TableConvertor + + // Storage is the interface for the underlying storage for the + // resource. It is wrapped into a "DryRunnableStorage" that will + // either pass-through or simply dry-run. + Storage DryRunnableStorage + // StorageVersioner outputs the an object will be + // converted to before persisted in etcd, given a list of possible + // kinds of the object. + // If the StorageVersioner is nil, apiserver will leave the + // storageVersionHash as empty in the discovery document. + StorageVersioner runtime.GroupVersioner + // Called to cleanup clients used by the underlying Storage; optional. + DestroyFunc func() +} + +// Note: the rest.StandardStorage interface aggregates the common REST verbs +var _ rest.StandardStorage = &Store{} +var _ rest.Exporter = &Store{} +var _ rest.TableConvertor = &Store{} +var _ GenericStore = &Store{} + +const ( + OptimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again" + resourceCountPollPeriodJitter = 1.2 +) + +// NamespaceKeyRootFunc is the default function for constructing storage paths +// to resource directories enforcing namespace rules. +func NamespaceKeyRootFunc(ctx context.Context, prefix string) string { + key := prefix + ns, ok := genericapirequest.NamespaceFrom(ctx) + if ok && len(ns) > 0 { + key = key + "/" + ns + } + return key +} + +// NamespaceKeyFunc is the default function for constructing storage paths to +// a resource relative to the given prefix enforcing namespace rules. If the +// context does not contain a namespace, it errors. +func NamespaceKeyFunc(ctx context.Context, prefix string, name string) (string, error) { + key := NamespaceKeyRootFunc(ctx, prefix) + ns, ok := genericapirequest.NamespaceFrom(ctx) + if !ok || len(ns) == 0 { + return "", apierrors.NewBadRequest("Namespace parameter required.") + } + if len(name) == 0 { + return "", apierrors.NewBadRequest("Name parameter required.") + } + if msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 { + return "", apierrors.NewBadRequest(fmt.Sprintf("Name parameter invalid: %q: %s", name, strings.Join(msgs, ";"))) + } + key = key + "/" + name + return key, nil +} + +// NoNamespaceKeyFunc is the default function for constructing storage paths +// to a resource relative to the given prefix without a namespace. +func NoNamespaceKeyFunc(ctx context.Context, prefix string, name string) (string, error) { + if len(name) == 0 { + return "", apierrors.NewBadRequest("Name parameter required.") + } + if msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 { + return "", apierrors.NewBadRequest(fmt.Sprintf("Name parameter invalid: %q: %s", name, strings.Join(msgs, ";"))) + } + key := prefix + "/" + name + return key, nil +} + +// New implements RESTStorage.New. +func (e *Store) New() runtime.Object { + return e.NewFunc() +} + +// NewList implements rest.Lister. +func (e *Store) NewList() runtime.Object { + return e.NewListFunc() +} + +// NamespaceScoped indicates whether the resource is namespaced +func (e *Store) NamespaceScoped() bool { + if e.CreateStrategy != nil { + return e.CreateStrategy.NamespaceScoped() + } + if e.UpdateStrategy != nil { + return e.UpdateStrategy.NamespaceScoped() + } + + panic("programmer error: no CRUD for resource, you're crazy, override NamespaceScoped too") +} + +// GetCreateStrategy implements GenericStore. +func (e *Store) GetCreateStrategy() rest.RESTCreateStrategy { + return e.CreateStrategy +} + +// GetUpdateStrategy implements GenericStore. +func (e *Store) GetUpdateStrategy() rest.RESTUpdateStrategy { + return e.UpdateStrategy +} + +// GetDeleteStrategy implements GenericStore. +func (e *Store) GetDeleteStrategy() rest.RESTDeleteStrategy { + return e.DeleteStrategy +} + +// GetExportStrategy implements GenericStore. +func (e *Store) GetExportStrategy() rest.RESTExportStrategy { + return e.ExportStrategy +} + +// List returns a list of items matching labels and field according to the +// store's PredicateFunc. +func (e *Store) List(ctx context.Context, options *metainternalversion.ListOptions) (runtime.Object, error) { + label := labels.Everything() + if options != nil && options.LabelSelector != nil { + label = options.LabelSelector + } + field := fields.Everything() + if options != nil && options.FieldSelector != nil { + field = options.FieldSelector + } + out, err := e.ListPredicate(ctx, e.PredicateFunc(label, field), options) + if err != nil { + return nil, err + } + if e.Decorator != nil { + if err := e.Decorator(out); err != nil { + return nil, err + } + } + return out, nil +} + +// ListPredicate returns a list of all the items matching the given +// SelectionPredicate. +func (e *Store) ListPredicate(ctx context.Context, p storage.SelectionPredicate, options *metainternalversion.ListOptions) (runtime.Object, error) { + if options == nil { + // By default we should serve the request from etcd. + options = &metainternalversion.ListOptions{ResourceVersion: ""} + } + p.Limit = options.Limit + p.Continue = options.Continue + list := e.NewListFunc() + qualifiedResource := e.qualifiedResourceFromContext(ctx) + storageOpts := storage.ListOptions{ResourceVersion: options.ResourceVersion, ResourceVersionMatch: options.ResourceVersionMatch, Predicate: p} + if name, ok := p.MatchesSingle(); ok { + if key, err := e.KeyFunc(ctx, name); err == nil { + err := e.Storage.GetToList(ctx, key, storageOpts, list) + return list, storeerr.InterpretListError(err, qualifiedResource) + } + // if we cannot extract a key based on the current context, the optimization is skipped + } + + err := e.Storage.List(ctx, e.KeyRootFunc(ctx), storageOpts, list) + return list, storeerr.InterpretListError(err, qualifiedResource) +} + +// Create inserts a new item according to the unique key from the object. +func (e *Store) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) { + if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil { + return nil, err + } + // at this point we have a fully formed object. It is time to call the validators that the apiserver + // handling chain wants to enforce. + if createValidation != nil { + if err := createValidation(ctx, obj.DeepCopyObject()); err != nil { + return nil, err + } + } + + name, err := e.ObjectNameFunc(obj) + if err != nil { + return nil, err + } + key, err := e.KeyFunc(ctx, name) + if err != nil { + return nil, err + } + qualifiedResource := e.qualifiedResourceFromContext(ctx) + ttl, err := e.calculateTTL(obj, 0, false) + if err != nil { + return nil, err + } + out := e.NewFunc() + if err := e.Storage.Create(ctx, key, obj, out, ttl, dryrun.IsDryRun(options.DryRun)); err != nil { + err = storeerr.InterpretCreateError(err, qualifiedResource, name) + err = rest.CheckGeneratedNameError(e.CreateStrategy, err, obj) + if !apierrors.IsAlreadyExists(err) { + return nil, err + } + if errGet := e.Storage.Get(ctx, key, storage.GetOptions{}, out); errGet != nil { + return nil, err + } + accessor, errGetAcc := meta.Accessor(out) + if errGetAcc != nil { + return nil, err + } + if accessor.GetDeletionTimestamp() != nil { + msg := &err.(*apierrors.StatusError).ErrStatus.Message + *msg = fmt.Sprintf("object is being deleted: %s", *msg) + } + return nil, err + } + if e.AfterCreate != nil { + if err := e.AfterCreate(out); err != nil { + return nil, err + } + } + if e.Decorator != nil { + if err := e.Decorator(out); err != nil { + return nil, err + } + } + return out, nil +} + +// ShouldDeleteDuringUpdate is the default function for +// checking if an object should be deleted during an update. +// It checks if the new object has no finalizers, +// the existing object's deletionTimestamp is set, and +// the existing object's deletionGracePeriodSeconds is 0 or nil +func ShouldDeleteDuringUpdate(ctx context.Context, key string, obj, existing runtime.Object) bool { + newMeta, err := meta.Accessor(obj) + if err != nil { + utilruntime.HandleError(err) + return false + } + oldMeta, err := meta.Accessor(existing) + if err != nil { + utilruntime.HandleError(err) + return false + } + if len(newMeta.GetFinalizers()) > 0 { + // don't delete with finalizers remaining in the new object + return false + } + if oldMeta.GetDeletionTimestamp() == nil { + // don't delete if the existing object hasn't had a delete request made + return false + } + // delete if the existing object has no grace period or a grace period of 0 + return oldMeta.GetDeletionGracePeriodSeconds() == nil || *oldMeta.GetDeletionGracePeriodSeconds() == 0 +} + +// deleteWithoutFinalizers handles deleting an object ignoring its finalizer list. +// Used for objects that are either been finalized or have never initialized. +func (e *Store) deleteWithoutFinalizers(ctx context.Context, name, key string, obj runtime.Object, preconditions *storage.Preconditions, dryRun bool) (runtime.Object, bool, error) { + out := e.NewFunc() + klog.V(6).Infof("going to delete %s from registry, triggered by update", name) + // Using the rest.ValidateAllObjectFunc because the request is an UPDATE request and has already passed the admission for the UPDATE verb. + if err := e.Storage.Delete(ctx, key, out, preconditions, rest.ValidateAllObjectFunc, dryRun); err != nil { + // Deletion is racy, i.e., there could be multiple update + // requests to remove all finalizers from the object, so we + // ignore the NotFound error. + if storage.IsNotFound(err) { + _, err := e.finalizeDelete(ctx, obj, true) + // clients are expecting an updated object if a PUT succeeded, + // but finalizeDelete returns a metav1.Status, so return + // the object in the request instead. + return obj, false, err + } + return nil, false, storeerr.InterpretDeleteError(err, e.qualifiedResourceFromContext(ctx), name) + } + _, err := e.finalizeDelete(ctx, out, true) + // clients are expecting an updated object if a PUT succeeded, but + // finalizeDelete returns a metav1.Status, so return the object in + // the request instead. + return obj, false, err +} + +// Update performs an atomic update and set of the object. Returns the result of the update +// or an error. If the registry allows create-on-update, the create flow will be executed. +// A bool is returned along with the object and any errors, to indicate object creation. +func (e *Store) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) { + key, err := e.KeyFunc(ctx, name) + if err != nil { + return nil, false, err + } + + var ( + creatingObj runtime.Object + creating = false + ) + + qualifiedResource := e.qualifiedResourceFromContext(ctx) + storagePreconditions := &storage.Preconditions{} + if preconditions := objInfo.Preconditions(); preconditions != nil { + storagePreconditions.UID = preconditions.UID + storagePreconditions.ResourceVersion = preconditions.ResourceVersion + } + + out := e.NewFunc() + // deleteObj is only used in case a deletion is carried out + var deleteObj runtime.Object + err = e.Storage.GuaranteedUpdate(ctx, key, out, true, storagePreconditions, func(existing runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) { + existingResourceVersion, err := e.Storage.Versioner().ObjectResourceVersion(existing) + if err != nil { + return nil, nil, err + } + if existingResourceVersion == 0 { + if !e.UpdateStrategy.AllowCreateOnUpdate() && !forceAllowCreate { + return nil, nil, apierrors.NewNotFound(qualifiedResource, name) + } + } + + // Given the existing object, get the new object + obj, err := objInfo.UpdatedObject(ctx, existing) + if err != nil { + return nil, nil, err + } + + // If AllowUnconditionalUpdate() is true and the object specified by + // the user does not have a resource version, then we populate it with + // the latest version. Else, we check that the version specified by + // the user matches the version of latest storage object. + newResourceVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj) + if err != nil { + return nil, nil, err + } + doUnconditionalUpdate := newResourceVersion == 0 && e.UpdateStrategy.AllowUnconditionalUpdate() + + if existingResourceVersion == 0 { + creating = true + creatingObj = obj + if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil { + return nil, nil, err + } + // at this point we have a fully formed object. It is time to call the validators that the apiserver + // handling chain wants to enforce. + if createValidation != nil { + if err := createValidation(ctx, obj.DeepCopyObject()); err != nil { + return nil, nil, err + } + } + ttl, err := e.calculateTTL(obj, 0, false) + if err != nil { + return nil, nil, err + } + + return obj, &ttl, nil + } + + creating = false + creatingObj = nil + if doUnconditionalUpdate { + // Update the object's resource version to match the latest + // storage object's resource version. + err = e.Storage.Versioner().UpdateObject(obj, res.ResourceVersion) + if err != nil { + return nil, nil, err + } + } else { + // Check if the object's resource version matches the latest + // resource version. + if newResourceVersion == 0 { + // TODO: The Invalid error should have a field for Resource. + // After that field is added, we should fill the Resource and + // leave the Kind field empty. See the discussion in #18526. + qualifiedKind := schema.GroupKind{Group: qualifiedResource.Group, Kind: qualifiedResource.Resource} + fieldErrList := field.ErrorList{field.Invalid(field.NewPath("metadata").Child("resourceVersion"), newResourceVersion, "must be specified for an update")} + return nil, nil, apierrors.NewInvalid(qualifiedKind, name, fieldErrList) + } + if newResourceVersion != existingResourceVersion { + return nil, nil, apierrors.NewConflict(qualifiedResource, name, fmt.Errorf(OptimisticLockErrorMsg)) + } + } + if err := rest.BeforeUpdate(e.UpdateStrategy, ctx, obj, existing); err != nil { + return nil, nil, err + } + // at this point we have a fully formed object. It is time to call the validators that the apiserver + // handling chain wants to enforce. + if updateValidation != nil { + if err := updateValidation(ctx, obj.DeepCopyObject(), existing.DeepCopyObject()); err != nil { + return nil, nil, err + } + } + // Check the default delete-during-update conditions, and store-specific conditions if provided + if ShouldDeleteDuringUpdate(ctx, key, obj, existing) && + (e.ShouldDeleteDuringUpdate == nil || e.ShouldDeleteDuringUpdate(ctx, key, obj, existing)) { + deleteObj = obj + return nil, nil, errEmptiedFinalizers + } + ttl, err := e.calculateTTL(obj, res.TTL, true) + if err != nil { + return nil, nil, err + } + if int64(ttl) != res.TTL { + return obj, &ttl, nil + } + return obj, nil, nil + }, dryrun.IsDryRun(options.DryRun), nil) + + if err != nil { + // delete the object + if err == errEmptiedFinalizers { + return e.deleteWithoutFinalizers(ctx, name, key, deleteObj, storagePreconditions, dryrun.IsDryRun(options.DryRun)) + } + if creating { + err = storeerr.InterpretCreateError(err, qualifiedResource, name) + err = rest.CheckGeneratedNameError(e.CreateStrategy, err, creatingObj) + } else { + err = storeerr.InterpretUpdateError(err, qualifiedResource, name) + } + return nil, false, err + } + + if creating { + if e.AfterCreate != nil { + if err := e.AfterCreate(out); err != nil { + return nil, false, err + } + } + } else { + if e.AfterUpdate != nil { + if err := e.AfterUpdate(out); err != nil { + return nil, false, err + } + } + } + if e.Decorator != nil { + if err := e.Decorator(out); err != nil { + return nil, false, err + } + } + return out, creating, nil +} + +// Get retrieves the item from storage. +func (e *Store) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) { + obj := e.NewFunc() + key, err := e.KeyFunc(ctx, name) + if err != nil { + return nil, err + } + if err := e.Storage.Get(ctx, key, storage.GetOptions{ResourceVersion: options.ResourceVersion}, obj); err != nil { + return nil, storeerr.InterpretGetError(err, e.qualifiedResourceFromContext(ctx), name) + } + if e.Decorator != nil { + if err := e.Decorator(obj); err != nil { + return nil, err + } + } + return obj, nil +} + +// qualifiedResourceFromContext attempts to retrieve a GroupResource from the context's request info. +// If the context has no request info, DefaultQualifiedResource is used. +func (e *Store) qualifiedResourceFromContext(ctx context.Context) schema.GroupResource { + if info, ok := genericapirequest.RequestInfoFrom(ctx); ok { + return schema.GroupResource{Group: info.APIGroup, Resource: info.Resource} + } + // some implementations access storage directly and thus the context has no RequestInfo + return e.DefaultQualifiedResource +} + +var ( + errAlreadyDeleting = fmt.Errorf("abort delete") + errDeleteNow = fmt.Errorf("delete now") + errEmptiedFinalizers = fmt.Errorf("emptied finalizers") +) + +// shouldOrphanDependents returns true if the finalizer for orphaning should be set +// updated for FinalizerOrphanDependents. In the order of highest to lowest +// priority, there are three factors affect whether to add/remove the +// FinalizerOrphanDependents: options, existing finalizers of the object, +// and e.DeleteStrategy.DefaultGarbageCollectionPolicy. +func shouldOrphanDependents(ctx context.Context, e *Store, accessor metav1.Object, options *metav1.DeleteOptions) bool { + // Get default GC policy from this REST object type + gcStrategy, ok := e.DeleteStrategy.(rest.GarbageCollectionDeleteStrategy) + var defaultGCPolicy rest.GarbageCollectionPolicy + if ok { + defaultGCPolicy = gcStrategy.DefaultGarbageCollectionPolicy(ctx) + } + + if defaultGCPolicy == rest.Unsupported { + // return false to indicate that we should NOT orphan + return false + } + + // An explicit policy was set at deletion time, that overrides everything + if options != nil && options.OrphanDependents != nil { + return *options.OrphanDependents + } + if options != nil && options.PropagationPolicy != nil { + switch *options.PropagationPolicy { + case metav1.DeletePropagationOrphan: + return true + case metav1.DeletePropagationBackground, metav1.DeletePropagationForeground: + return false + } + } + + // If a finalizer is set in the object, it overrides the default + // validation should make sure the two cases won't be true at the same time. + finalizers := accessor.GetFinalizers() + for _, f := range finalizers { + switch f { + case metav1.FinalizerOrphanDependents: + return true + case metav1.FinalizerDeleteDependents: + return false + } + } + + // Get default orphan policy from this REST object type if it exists + return defaultGCPolicy == rest.OrphanDependents +} + +// shouldDeleteDependents returns true if the finalizer for foreground deletion should be set +// updated for FinalizerDeleteDependents. In the order of highest to lowest +// priority, there are three factors affect whether to add/remove the +// FinalizerDeleteDependents: options, existing finalizers of the object, and +// e.DeleteStrategy.DefaultGarbageCollectionPolicy. +func shouldDeleteDependents(ctx context.Context, e *Store, accessor metav1.Object, options *metav1.DeleteOptions) bool { + // Get default GC policy from this REST object type + if gcStrategy, ok := e.DeleteStrategy.(rest.GarbageCollectionDeleteStrategy); ok && gcStrategy.DefaultGarbageCollectionPolicy(ctx) == rest.Unsupported { + // return false to indicate that we should NOT delete in foreground + return false + } + + // If an explicit policy was set at deletion time, that overrides both + if options != nil && options.OrphanDependents != nil { + return false + } + if options != nil && options.PropagationPolicy != nil { + switch *options.PropagationPolicy { + case metav1.DeletePropagationForeground: + return true + case metav1.DeletePropagationBackground, metav1.DeletePropagationOrphan: + return false + } + } + + // If a finalizer is set in the object, it overrides the default + // validation has made sure the two cases won't be true at the same time. + finalizers := accessor.GetFinalizers() + for _, f := range finalizers { + switch f { + case metav1.FinalizerDeleteDependents: + return true + case metav1.FinalizerOrphanDependents: + return false + } + } + + return false +} + +// deletionFinalizersForGarbageCollection analyzes the object and delete options +// to determine whether the object is in need of finalization by the garbage +// collector. If so, returns the set of deletion finalizers to apply and a bool +// indicating whether the finalizer list has changed and is in need of updating. +// +// The finalizers returned are intended to be handled by the garbage collector. +// If garbage collection is disabled for the store, this function returns false +// to ensure finalizers aren't set which will never be cleared. +func deletionFinalizersForGarbageCollection(ctx context.Context, e *Store, accessor metav1.Object, options *metav1.DeleteOptions) (bool, []string) { + if !e.EnableGarbageCollection { + return false, []string{} + } + shouldOrphan := shouldOrphanDependents(ctx, e, accessor, options) + shouldDeleteDependentInForeground := shouldDeleteDependents(ctx, e, accessor, options) + newFinalizers := []string{} + + // first remove both finalizers, add them back if needed. + for _, f := range accessor.GetFinalizers() { + if f == metav1.FinalizerOrphanDependents || f == metav1.FinalizerDeleteDependents { + continue + } + newFinalizers = append(newFinalizers, f) + } + + if shouldOrphan { + newFinalizers = append(newFinalizers, metav1.FinalizerOrphanDependents) + } + if shouldDeleteDependentInForeground { + newFinalizers = append(newFinalizers, metav1.FinalizerDeleteDependents) + } + + oldFinalizerSet := sets.NewString(accessor.GetFinalizers()...) + newFinalizersSet := sets.NewString(newFinalizers...) + if oldFinalizerSet.Equal(newFinalizersSet) { + return false, accessor.GetFinalizers() + } + return true, newFinalizers +} + +// markAsDeleting sets the obj's DeletionGracePeriodSeconds to 0, and sets the +// DeletionTimestamp to "now" if there is no existing deletionTimestamp or if the existing +// deletionTimestamp is further in future. Finalizers are watching for such updates and will +// finalize the object if their IDs are present in the object's Finalizers list. +func markAsDeleting(obj runtime.Object, now time.Time) (err error) { + objectMeta, kerr := meta.Accessor(obj) + if kerr != nil { + return kerr + } + // This handles Generation bump for resources that don't support graceful + // deletion. For resources that support graceful deletion is handle in + // pkg/api/rest/delete.go + if objectMeta.GetDeletionTimestamp() == nil && objectMeta.GetGeneration() > 0 { + objectMeta.SetGeneration(objectMeta.GetGeneration() + 1) + } + existingDeletionTimestamp := objectMeta.GetDeletionTimestamp() + if existingDeletionTimestamp == nil || existingDeletionTimestamp.After(now) { + metaNow := metav1.NewTime(now) + objectMeta.SetDeletionTimestamp(&metaNow) + } + var zero int64 = 0 + objectMeta.SetDeletionGracePeriodSeconds(&zero) + return nil +} + +// updateForGracefulDeletionAndFinalizers updates the given object for +// graceful deletion and finalization by setting the deletion timestamp and +// grace period seconds (graceful deletion) and updating the list of +// finalizers (finalization); it returns: +// +// 1. an error +// 2. a boolean indicating that the object was not found, but it should be +// ignored +// 3. a boolean indicating that the object's grace period is exhausted and it +// should be deleted immediately +// 4. a new output object with the state that was updated +// 5. a copy of the last existing state of the object +func (e *Store) updateForGracefulDeletionAndFinalizers(ctx context.Context, name, key string, options *metav1.DeleteOptions, preconditions storage.Preconditions, deleteValidation rest.ValidateObjectFunc, in runtime.Object) (err error, ignoreNotFound, deleteImmediately bool, out, lastExisting runtime.Object) { + lastGraceful := int64(0) + var pendingFinalizers bool + out = e.NewFunc() + err = e.Storage.GuaranteedUpdate( + ctx, + key, + out, + false, /* ignoreNotFound */ + &preconditions, + storage.SimpleUpdate(func(existing runtime.Object) (runtime.Object, error) { + if err := deleteValidation(ctx, existing); err != nil { + return nil, err + } + graceful, pendingGraceful, err := rest.BeforeDelete(e.DeleteStrategy, ctx, existing, options) + if err != nil { + return nil, err + } + if pendingGraceful { + return nil, errAlreadyDeleting + } + + // Add/remove the orphan finalizer as the options dictates. + // Note that this occurs after checking pendingGraceufl, so + // finalizers cannot be updated via DeleteOptions if deletion has + // started. + existingAccessor, err := meta.Accessor(existing) + if err != nil { + return nil, err + } + needsUpdate, newFinalizers := deletionFinalizersForGarbageCollection(ctx, e, existingAccessor, options) + if needsUpdate { + existingAccessor.SetFinalizers(newFinalizers) + } + + pendingFinalizers = len(existingAccessor.GetFinalizers()) != 0 + if !graceful { + // set the DeleteGracePeriods to 0 if the object has pendingFinalizers but not supporting graceful deletion + if pendingFinalizers { + klog.V(6).Infof("update the DeletionTimestamp to \"now\" and GracePeriodSeconds to 0 for object %s, because it has pending finalizers", name) + err = markAsDeleting(existing, time.Now()) + if err != nil { + return nil, err + } + return existing, nil + } + return nil, errDeleteNow + } + lastGraceful = *options.GracePeriodSeconds + lastExisting = existing + return existing, nil + }), + dryrun.IsDryRun(options.DryRun), + nil, + ) + switch err { + case nil: + // If there are pending finalizers, we never delete the object immediately. + if pendingFinalizers { + return nil, false, false, out, lastExisting + } + if lastGraceful > 0 { + return nil, false, false, out, lastExisting + } + // If we are here, the registry supports grace period mechanism and + // we are intentionally delete gracelessly. In this case, we may + // enter a race with other k8s components. If other component wins + // the race, the object will not be found, and we should tolerate + // the NotFound error. See + // https://github.com/kubernetes/kubernetes/issues/19403 for + // details. + return nil, true, true, out, lastExisting + case errDeleteNow: + // we've updated the object to have a zero grace period, or it's already at 0, so + // we should fall through and truly delete the object. + return nil, false, true, out, lastExisting + case errAlreadyDeleting: + out, err = e.finalizeDelete(ctx, in, true) + return err, false, false, out, lastExisting + default: + return storeerr.InterpretUpdateError(err, e.qualifiedResourceFromContext(ctx), name), false, false, out, lastExisting + } +} + +// Delete removes the item from storage. +func (e *Store) Delete(ctx context.Context, name string, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions) (runtime.Object, bool, error) { + key, err := e.KeyFunc(ctx, name) + if err != nil { + return nil, false, err + } + obj := e.NewFunc() + qualifiedResource := e.qualifiedResourceFromContext(ctx) + if err = e.Storage.Get(ctx, key, storage.GetOptions{}, obj); err != nil { + return nil, false, storeerr.InterpretDeleteError(err, qualifiedResource, name) + } + + // support older consumers of delete by treating "nil" as delete immediately + if options == nil { + options = metav1.NewDeleteOptions(0) + } + var preconditions storage.Preconditions + if options.Preconditions != nil { + preconditions.UID = options.Preconditions.UID + preconditions.ResourceVersion = options.Preconditions.ResourceVersion + } + graceful, pendingGraceful, err := rest.BeforeDelete(e.DeleteStrategy, ctx, obj, options) + if err != nil { + return nil, false, err + } + // this means finalizers cannot be updated via DeleteOptions if a deletion is already pending + if pendingGraceful { + out, err := e.finalizeDelete(ctx, obj, false) + return out, false, err + } + // check if obj has pending finalizers + accessor, err := meta.Accessor(obj) + if err != nil { + return nil, false, apierrors.NewInternalError(err) + } + pendingFinalizers := len(accessor.GetFinalizers()) != 0 + var ignoreNotFound bool + var deleteImmediately bool = true + var lastExisting, out runtime.Object + + // Handle combinations of graceful deletion and finalization by issuing + // the correct updates. + shouldUpdateFinalizers, _ := deletionFinalizersForGarbageCollection(ctx, e, accessor, options) + // TODO: remove the check, because we support no-op updates now. + if graceful || pendingFinalizers || shouldUpdateFinalizers { + err, ignoreNotFound, deleteImmediately, out, lastExisting = e.updateForGracefulDeletionAndFinalizers(ctx, name, key, options, preconditions, deleteValidation, obj) + // Update the preconditions.ResourceVersion if set since we updated the object. + if err == nil && deleteImmediately && preconditions.ResourceVersion != nil { + accessor, err = meta.Accessor(out) + if err != nil { + return out, false, apierrors.NewInternalError(err) + } + resourceVersion := accessor.GetResourceVersion() + preconditions.ResourceVersion = &resourceVersion + } + } + + // !deleteImmediately covers all cases where err != nil. We keep both to be future-proof. + if !deleteImmediately || err != nil { + return out, false, err + } + + // Going further in this function is not useful when we are + // performing a dry-run request. Worse, it will actually + // override "out" with the version of the object in database + // that doesn't have the finalizer and deletiontimestamp set + // (because the update above was dry-run too). If we already + // have that version available, let's just return it now, + // otherwise, we can call dry-run delete that will get us the + // latest version of the object. + if dryrun.IsDryRun(options.DryRun) && out != nil { + return out, true, nil + } + + // delete immediately, or no graceful deletion supported + klog.V(6).Infof("going to delete %s from registry: ", name) + out = e.NewFunc() + if err := e.Storage.Delete(ctx, key, out, &preconditions, storage.ValidateObjectFunc(deleteValidation), dryrun.IsDryRun(options.DryRun)); err != nil { + // Please refer to the place where we set ignoreNotFound for the reason + // why we ignore the NotFound error . + if storage.IsNotFound(err) && ignoreNotFound && lastExisting != nil { + // The lastExisting object may not be the last state of the object + // before its deletion, but it's the best approximation. + out, err := e.finalizeDelete(ctx, lastExisting, true) + return out, true, err + } + return nil, false, storeerr.InterpretDeleteError(err, qualifiedResource, name) + } + out, err = e.finalizeDelete(ctx, out, true) + return out, true, err +} + +// DeleteReturnsDeletedObject implements the rest.MayReturnFullObjectDeleter interface +func (e *Store) DeleteReturnsDeletedObject() bool { + return e.ReturnDeletedObject +} + +// DeleteCollection removes all items returned by List with a given ListOptions from storage. +// +// DeleteCollection is currently NOT atomic. It can happen that only subset of objects +// will be deleted from storage, and then an error will be returned. +// In case of success, the list of deleted objects will be returned. +// +// TODO: Currently, there is no easy way to remove 'directory' entry from storage (if we +// are removing all objects of a given type) with the current API (it's technically +// possibly with storage API, but watch is not delivered correctly then). +// It will be possible to fix it with v3 etcd API. +func (e *Store) DeleteCollection(ctx context.Context, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions, listOptions *metainternalversion.ListOptions) (runtime.Object, error) { + if listOptions == nil { + listOptions = &metainternalversion.ListOptions{} + } else { + listOptions = listOptions.DeepCopy() + } + + listObj, err := e.List(ctx, listOptions) + if err != nil { + return nil, err + } + items, err := meta.ExtractList(listObj) + if err != nil { + return nil, err + } + if len(items) == 0 { + // Nothing to delete, return now + return listObj, nil + } + // Spawn a number of goroutines, so that we can issue requests to storage + // in parallel to speed up deletion. + // It is proportional to the number of items to delete, up to + // DeleteCollectionWorkers (it doesn't make much sense to spawn 16 + // workers to delete 10 items). + workersNumber := e.DeleteCollectionWorkers + if workersNumber > len(items) { + workersNumber = len(items) + } + if workersNumber < 1 { + workersNumber = 1 + } + wg := sync.WaitGroup{} + toProcess := make(chan int, 2*workersNumber) + errs := make(chan error, workersNumber+1) + + go func() { + defer utilruntime.HandleCrash(func(panicReason interface{}) { + errs <- fmt.Errorf("DeleteCollection distributor panicked: %v", panicReason) + }) + for i := 0; i < len(items); i++ { + toProcess <- i + } + close(toProcess) + }() + + wg.Add(workersNumber) + for i := 0; i < workersNumber; i++ { + go func() { + // panics don't cross goroutine boundaries + defer utilruntime.HandleCrash(func(panicReason interface{}) { + errs <- fmt.Errorf("DeleteCollection goroutine panicked: %v", panicReason) + }) + defer wg.Done() + + for index := range toProcess { + accessor, err := meta.Accessor(items[index]) + if err != nil { + errs <- err + return + } + if _, _, err := e.Delete(ctx, accessor.GetName(), deleteValidation, options); err != nil && !apierrors.IsNotFound(err) { + klog.V(4).Infof("Delete %s in DeleteCollection failed: %v", accessor.GetName(), err) + errs <- err + return + } + } + }() + } + wg.Wait() + select { + case err := <-errs: + return nil, err + default: + return listObj, nil + } +} + +// finalizeDelete runs the Store's AfterDelete hook if runHooks is set and +// returns the decorated deleted object if appropriate. +func (e *Store) finalizeDelete(ctx context.Context, obj runtime.Object, runHooks bool) (runtime.Object, error) { + if runHooks && e.AfterDelete != nil { + if err := e.AfterDelete(obj); err != nil { + return nil, err + } + } + if e.ReturnDeletedObject { + if e.Decorator != nil { + if err := e.Decorator(obj); err != nil { + return nil, err + } + } + return obj, nil + } + // Return information about the deleted object, which enables clients to + // verify that the object was actually deleted and not waiting for finalizers. + accessor, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + qualifiedResource := e.qualifiedResourceFromContext(ctx) + details := &metav1.StatusDetails{ + Name: accessor.GetName(), + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, // Yes we set Kind field to resource. + UID: accessor.GetUID(), + } + status := &metav1.Status{Status: metav1.StatusSuccess, Details: details} + return status, nil +} + +// Watch makes a matcher for the given label and field, and calls +// WatchPredicate. If possible, you should customize PredicateFunc to produce +// a matcher that matches by key. SelectionPredicate does this for you +// automatically. +func (e *Store) Watch(ctx context.Context, options *metainternalversion.ListOptions) (watch.Interface, error) { + label := labels.Everything() + if options != nil && options.LabelSelector != nil { + label = options.LabelSelector + } + field := fields.Everything() + if options != nil && options.FieldSelector != nil { + field = options.FieldSelector + } + predicate := e.PredicateFunc(label, field) + + resourceVersion := "" + if options != nil { + resourceVersion = options.ResourceVersion + predicate.AllowWatchBookmarks = options.AllowWatchBookmarks + } + return e.WatchPredicate(ctx, predicate, resourceVersion) +} + +// WatchPredicate starts a watch for the items that matches. +func (e *Store) WatchPredicate(ctx context.Context, p storage.SelectionPredicate, resourceVersion string) (watch.Interface, error) { + storageOpts := storage.ListOptions{ResourceVersion: resourceVersion, Predicate: p} + if name, ok := p.MatchesSingle(); ok { + if key, err := e.KeyFunc(ctx, name); err == nil { + w, err := e.Storage.Watch(ctx, key, storageOpts) + if err != nil { + return nil, err + } + if e.Decorator != nil { + return newDecoratedWatcher(w, e.Decorator), nil + } + return w, nil + } + // if we cannot extract a key based on the current context, the + // optimization is skipped + } + + w, err := e.Storage.WatchList(ctx, e.KeyRootFunc(ctx), storageOpts) + if err != nil { + return nil, err + } + if e.Decorator != nil { + return newDecoratedWatcher(w, e.Decorator), nil + } + return w, nil +} + +// calculateTTL is a helper for retrieving the updated TTL for an object or +// returning an error if the TTL cannot be calculated. The defaultTTL is +// changed to 1 if less than zero. Zero means no TTL, not expire immediately. +func (e *Store) calculateTTL(obj runtime.Object, defaultTTL int64, update bool) (ttl uint64, err error) { + // TODO: validate this is assertion is still valid. + + // etcd may return a negative TTL for a node if the expiration has not + // occurred due to server lag - we will ensure that the value is at least + // set. + if defaultTTL < 0 { + defaultTTL = 1 + } + ttl = uint64(defaultTTL) + if e.TTLFunc != nil { + ttl, err = e.TTLFunc(obj, ttl, update) + } + return ttl, err +} + +// exportObjectMeta unsets the fields on the given object that should not be +// present when the object is exported. +func exportObjectMeta(accessor metav1.Object, exact bool) { + accessor.SetUID("") + if !exact { + accessor.SetNamespace("") + } + accessor.SetCreationTimestamp(metav1.Time{}) + accessor.SetDeletionTimestamp(nil) + accessor.SetResourceVersion("") + accessor.SetSelfLink("") + if len(accessor.GetGenerateName()) > 0 && !exact { + accessor.SetName("") + } +} + +// Export implements the rest.Exporter interface +func (e *Store) Export(ctx context.Context, name string, opts metav1.ExportOptions) (runtime.Object, error) { + obj, err := e.Get(ctx, name, &metav1.GetOptions{}) + if err != nil { + return nil, err + } + if accessor, err := meta.Accessor(obj); err == nil { + exportObjectMeta(accessor, opts.Exact) + } else { + klog.V(4).Infof("Object of type %v does not have ObjectMeta: %v", reflect.TypeOf(obj), err) + } + + if e.ExportStrategy != nil { + if err = e.ExportStrategy.Export(ctx, obj, opts.Exact); err != nil { + return nil, err + } + } else { + e.CreateStrategy.PrepareForCreate(ctx, obj) + } + return obj, nil +} + +// CompleteWithOptions updates the store with the provided options and +// defaults common fields. +func (e *Store) CompleteWithOptions(options *generic.StoreOptions) error { + if e.DefaultQualifiedResource.Empty() { + return fmt.Errorf("store %#v must have a non-empty qualified resource", e) + } + if e.NewFunc == nil { + return fmt.Errorf("store for %s must have NewFunc set", e.DefaultQualifiedResource.String()) + } + if e.NewListFunc == nil { + return fmt.Errorf("store for %s must have NewListFunc set", e.DefaultQualifiedResource.String()) + } + if (e.KeyRootFunc == nil) != (e.KeyFunc == nil) { + return fmt.Errorf("store for %s must set both KeyRootFunc and KeyFunc or neither", e.DefaultQualifiedResource.String()) + } + + if e.TableConvertor == nil { + return fmt.Errorf("store for %s must set TableConvertor; rest.NewDefaultTableConvertor(e.DefaultQualifiedResource) can be used to output just name/creation time", e.DefaultQualifiedResource.String()) + } + + var isNamespaced bool + switch { + case e.CreateStrategy != nil: + isNamespaced = e.CreateStrategy.NamespaceScoped() + case e.UpdateStrategy != nil: + isNamespaced = e.UpdateStrategy.NamespaceScoped() + default: + return fmt.Errorf("store for %s must have CreateStrategy or UpdateStrategy set", e.DefaultQualifiedResource.String()) + } + + if e.DeleteStrategy == nil { + return fmt.Errorf("store for %s must have DeleteStrategy set", e.DefaultQualifiedResource.String()) + } + + if options.RESTOptions == nil { + return fmt.Errorf("options for %s must have RESTOptions set", e.DefaultQualifiedResource.String()) + } + + attrFunc := options.AttrFunc + if attrFunc == nil { + if isNamespaced { + attrFunc = storage.DefaultNamespaceScopedAttr + } else { + attrFunc = storage.DefaultClusterScopedAttr + } + } + if e.PredicateFunc == nil { + e.PredicateFunc = func(label labels.Selector, field fields.Selector) storage.SelectionPredicate { + return storage.SelectionPredicate{ + Label: label, + Field: field, + GetAttrs: attrFunc, + } + } + } + + err := validateIndexers(options.Indexers) + if err != nil { + return err + } + + opts, err := options.RESTOptions.GetRESTOptions(e.DefaultQualifiedResource) + if err != nil { + return err + } + + // ResourcePrefix must come from the underlying factory + prefix := opts.ResourcePrefix + if !strings.HasPrefix(prefix, "/") { + prefix = "/" + prefix + } + if prefix == "/" { + return fmt.Errorf("store for %s has an invalid prefix %q", e.DefaultQualifiedResource.String(), opts.ResourcePrefix) + } + + // Set the default behavior for storage key generation + if e.KeyRootFunc == nil && e.KeyFunc == nil { + if isNamespaced { + e.KeyRootFunc = func(ctx context.Context) string { + return NamespaceKeyRootFunc(ctx, prefix) + } + e.KeyFunc = func(ctx context.Context, name string) (string, error) { + return NamespaceKeyFunc(ctx, prefix, name) + } + } else { + e.KeyRootFunc = func(ctx context.Context) string { + return prefix + } + e.KeyFunc = func(ctx context.Context, name string) (string, error) { + return NoNamespaceKeyFunc(ctx, prefix, name) + } + } + } + + // We adapt the store's keyFunc so that we can use it with the StorageDecorator + // without making any assumptions about where objects are stored in etcd + keyFunc := func(obj runtime.Object) (string, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return "", err + } + + if isNamespaced { + return e.KeyFunc(genericapirequest.WithNamespace(genericapirequest.NewContext(), accessor.GetNamespace()), accessor.GetName()) + } + + return e.KeyFunc(genericapirequest.NewContext(), accessor.GetName()) + } + + if e.DeleteCollectionWorkers == 0 { + e.DeleteCollectionWorkers = opts.DeleteCollectionWorkers + } + + e.EnableGarbageCollection = opts.EnableGarbageCollection + + if e.ObjectNameFunc == nil { + e.ObjectNameFunc = func(obj runtime.Object) (string, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return "", err + } + return accessor.GetName(), nil + } + } + + if e.Storage.Storage == nil { + e.Storage.Codec = opts.StorageConfig.Codec + var err error + e.Storage.Storage, e.DestroyFunc, err = opts.Decorator( + opts.StorageConfig, + prefix, + keyFunc, + e.NewFunc, + e.NewListFunc, + attrFunc, + options.TriggerFunc, + options.Indexers, + ) + if err != nil { + return err + } + e.StorageVersioner = opts.StorageConfig.EncodeVersioner + + if opts.CountMetricPollPeriod > 0 { + stopFunc := e.startObservingCount(opts.CountMetricPollPeriod) + previousDestroy := e.DestroyFunc + e.DestroyFunc = func() { + stopFunc() + if previousDestroy != nil { + previousDestroy() + } + } + } + } + + return nil +} + +// startObservingCount starts monitoring given prefix and periodically updating metrics. It returns a function to stop collection. +func (e *Store) startObservingCount(period time.Duration) func() { + prefix := e.KeyRootFunc(genericapirequest.NewContext()) + resourceName := e.DefaultQualifiedResource.String() + klog.V(2).Infof("Monitoring %v count at /%v", resourceName, prefix) + stopCh := make(chan struct{}) + go wait.JitterUntil(func() { + count, err := e.Storage.Count(prefix) + if err != nil { + klog.V(5).Infof("Failed to update storage count metric: %v", err) + metrics.UpdateObjectCount(resourceName, -1) + } else { + metrics.UpdateObjectCount(resourceName, count) + } + }, period, resourceCountPollPeriodJitter, true, stopCh) + return func() { close(stopCh) } +} + +func (e *Store) ConvertToTable(ctx context.Context, object runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) { + if e.TableConvertor != nil { + return e.TableConvertor.ConvertToTable(ctx, object, tableOptions) + } + return rest.NewDefaultTableConvertor(e.DefaultQualifiedResource).ConvertToTable(ctx, object, tableOptions) +} + +func (e *Store) StorageVersion() runtime.GroupVersioner { + return e.StorageVersioner +} + +// validateIndexers will check the prefix of indexers. +func validateIndexers(indexers *cache.Indexers) error { + if indexers == nil { + return nil + } + for indexName := range *indexers { + if len(indexName) <= 2 || (indexName[:2] != "l:" && indexName[:2] != "f:") { + return fmt.Errorf("index must prefix with \"l:\" or \"f:\"") + } + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go b/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go new file mode 100644 index 000000000..e0ca2df04 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go @@ -0,0 +1,58 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generic + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/storage" + "k8s.io/apiserver/pkg/storage/storagebackend" + "k8s.io/apiserver/pkg/storage/storagebackend/factory" + "k8s.io/client-go/tools/cache" +) + +// StorageDecorator is a function signature for producing a storage.Interface +// and an associated DestroyFunc from given parameters. +type StorageDecorator func( + config *storagebackend.Config, + resourcePrefix string, + keyFunc func(obj runtime.Object) (string, error), + newFunc func() runtime.Object, + newListFunc func() runtime.Object, + getAttrsFunc storage.AttrFunc, + trigger storage.IndexerFuncs, + indexers *cache.Indexers) (storage.Interface, factory.DestroyFunc, error) + +// UndecoratedStorage returns the given a new storage from the given config +// without any decoration. +func UndecoratedStorage( + config *storagebackend.Config, + resourcePrefix string, + keyFunc func(obj runtime.Object) (string, error), + newFunc func() runtime.Object, + newListFunc func() runtime.Object, + getAttrsFunc storage.AttrFunc, + trigger storage.IndexerFuncs, + indexers *cache.Indexers) (storage.Interface, factory.DestroyFunc, error) { + return NewRawStorage(config, newFunc) +} + +// NewRawStorage creates the low level kv storage. This is a work-around for current +// two layer of same storage interface. +// TODO: Once cacher is enabled on all registries (event registry is special), we will remove this method. +func NewRawStorage(config *storagebackend.Config, newFunc func() runtime.Object) (storage.Interface, factory.DestroyFunc, error) { + return factory.Create(*config, newFunc) +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/OWNERS b/vendor/k8s.io/apiserver/pkg/registry/rest/OWNERS new file mode 100644 index 000000000..55e617818 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/OWNERS @@ -0,0 +1,23 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- thockin +- smarterclayton +- wojtek-t +- deads2k +- brendandburns +- derekwaynecarr +- caesarxuchao +- mikedanese +- liggitt +- nikhiljindal +- gmarek +- justinsb +- ncdc +- dims +- hongchaodeng +- krousey +- ingvagabund +- jianhuiz +- sdminonne +- enj diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/create.go b/vendor/k8s.io/apiserver/pkg/registry/rest/create.go new file mode 100644 index 000000000..dd70a4eb7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/create.go @@ -0,0 +1,192 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + genericvalidation "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/api/validation/path" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/storage/names" + utilfeature "k8s.io/apiserver/pkg/util/feature" +) + +// RESTCreateStrategy defines the minimum validation, accepted input, and +// name generation behavior to create an object that follows Kubernetes +// API conventions. +type RESTCreateStrategy interface { + runtime.ObjectTyper + // The name generator is used when the standard GenerateName field is set. + // The NameGenerator will be invoked prior to validation. + names.NameGenerator + + // NamespaceScoped returns true if the object must be within a namespace. + NamespaceScoped() bool + // PrepareForCreate is invoked on create before validation to normalize + // the object. For example: remove fields that are not to be persisted, + // sort order-insensitive list fields, etc. This should not remove fields + // whose presence would be considered a validation error. + // + // Often implemented as a type check and an initailization or clearing of + // status. Clear the status because status changes are internal. External + // callers of an api (users) should not be setting an initial status on + // newly created objects. + PrepareForCreate(ctx context.Context, obj runtime.Object) + // Validate returns an ErrorList with validation errors or nil. Validate + // is invoked after default fields in the object have been filled in + // before the object is persisted. This method should not mutate the + // object. + Validate(ctx context.Context, obj runtime.Object) field.ErrorList + // Canonicalize allows an object to be mutated into a canonical form. This + // ensures that code that operates on these objects can rely on the common + // form for things like comparison. Canonicalize is invoked after + // validation has succeeded but before the object has been persisted. + // This method may mutate the object. Often implemented as a type check or + // empty method. + Canonicalize(obj runtime.Object) +} + +// BeforeCreate ensures that common operations for all resources are performed on creation. It only returns +// errors that can be converted to api.Status. It invokes PrepareForCreate, then GenerateName, then Validate. +// It returns nil if the object should be created. +func BeforeCreate(strategy RESTCreateStrategy, ctx context.Context, obj runtime.Object) error { + objectMeta, kind, kerr := objectMetaAndKind(strategy, obj) + if kerr != nil { + return kerr + } + + if strategy.NamespaceScoped() { + if !ValidNamespace(ctx, objectMeta) { + return errors.NewBadRequest("the namespace of the provided object does not match the namespace sent on the request") + } + } else if len(objectMeta.GetNamespace()) > 0 { + objectMeta.SetNamespace(metav1.NamespaceNone) + } + objectMeta.SetDeletionTimestamp(nil) + objectMeta.SetDeletionGracePeriodSeconds(nil) + strategy.PrepareForCreate(ctx, obj) + FillObjectMetaSystemFields(objectMeta) + if len(objectMeta.GetGenerateName()) > 0 && len(objectMeta.GetName()) == 0 { + objectMeta.SetName(strategy.GenerateName(objectMeta.GetGenerateName())) + } + + // Ensure managedFields is not set unless the feature is enabled + if !utilfeature.DefaultFeatureGate.Enabled(features.ServerSideApply) { + objectMeta.SetManagedFields(nil) + } + + // ClusterName is ignored and should not be saved + if len(objectMeta.GetClusterName()) > 0 { + objectMeta.SetClusterName("") + } + + if errs := strategy.Validate(ctx, obj); len(errs) > 0 { + return errors.NewInvalid(kind.GroupKind(), objectMeta.GetName(), errs) + } + + // Custom validation (including name validation) passed + // Now run common validation on object meta + // Do this *after* custom validation so that specific error messages are shown whenever possible + if errs := genericvalidation.ValidateObjectMetaAccessor(objectMeta, strategy.NamespaceScoped(), path.ValidatePathSegmentName, field.NewPath("metadata")); len(errs) > 0 { + return errors.NewInvalid(kind.GroupKind(), objectMeta.GetName(), errs) + } + + strategy.Canonicalize(obj) + + return nil +} + +// CheckGeneratedNameError checks whether an error that occurred creating a resource is due +// to generation being unable to pick a valid name. +func CheckGeneratedNameError(strategy RESTCreateStrategy, err error, obj runtime.Object) error { + if !errors.IsAlreadyExists(err) { + return err + } + + objectMeta, kind, kerr := objectMetaAndKind(strategy, obj) + if kerr != nil { + return kerr + } + + if len(objectMeta.GetGenerateName()) == 0 { + return err + } + + return errors.NewServerTimeoutForKind(kind.GroupKind(), "POST", 0) +} + +// objectMetaAndKind retrieves kind and ObjectMeta from a runtime object, or returns an error. +func objectMetaAndKind(typer runtime.ObjectTyper, obj runtime.Object) (metav1.Object, schema.GroupVersionKind, error) { + objectMeta, err := meta.Accessor(obj) + if err != nil { + return nil, schema.GroupVersionKind{}, errors.NewInternalError(err) + } + kinds, _, err := typer.ObjectKinds(obj) + if err != nil { + return nil, schema.GroupVersionKind{}, errors.NewInternalError(err) + } + return objectMeta, kinds[0], nil +} + +// NamespaceScopedStrategy has a method to tell if the object must be in a namespace. +type NamespaceScopedStrategy interface { + // NamespaceScoped returns if the object must be in a namespace. + NamespaceScoped() bool +} + +// AdmissionToValidateObjectFunc converts validating admission to a rest validate object func +func AdmissionToValidateObjectFunc(admit admission.Interface, staticAttributes admission.Attributes, o admission.ObjectInterfaces) ValidateObjectFunc { + validatingAdmission, ok := admit.(admission.ValidationInterface) + if !ok { + return func(ctx context.Context, obj runtime.Object) error { return nil } + } + return func(ctx context.Context, obj runtime.Object) error { + name := staticAttributes.GetName() + // in case the generated name is populated + if len(name) == 0 { + if metadata, err := meta.Accessor(obj); err == nil { + name = metadata.GetName() + } + } + + finalAttributes := admission.NewAttributesRecord( + obj, + staticAttributes.GetOldObject(), + staticAttributes.GetKind(), + staticAttributes.GetNamespace(), + name, + staticAttributes.GetResource(), + staticAttributes.GetSubresource(), + staticAttributes.GetOperation(), + staticAttributes.GetOperationOptions(), + staticAttributes.IsDryRun(), + staticAttributes.GetUserInfo(), + ) + if !validatingAdmission.Handles(finalAttributes.GetOperation()) { + return nil + } + return validatingAdmission.Validate(ctx, finalAttributes, o) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/create_update.go b/vendor/k8s.io/apiserver/pkg/registry/rest/create_update.go new file mode 100644 index 000000000..37d6c8f8a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/create_update.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// RESTCreateUpdateStrategy is a union of RESTUpdateStrategy and RESTCreateStrategy, +// and it defines the minimum validation, accepted input, and name generation +// behavior to create and update an object that follows Kubernetes API conventions. +type RESTCreateUpdateStrategy interface { + RESTCreateStrategy + // AllowCreateOnUpdate returns true if the object can be created by a PUT. + AllowCreateOnUpdate() bool + // PrepareForUpdate is invoked on update before validation to normalize + // the object. For example: remove fields that are not to be persisted, + // sort order-insensitive list fields, etc. This should not remove fields + // whose presence would be considered a validation error. + PrepareForUpdate(ctx context.Context, obj, old runtime.Object) + // ValidateUpdate is invoked after default fields in the object have been + // filled in before the object is persisted. This method should not mutate + // the object. + ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList + // AllowUnconditionalUpdate returns true if the object can be updated + // unconditionally (irrespective of the latest resource version), when + // there is no resource version specified in the object. + AllowUnconditionalUpdate() bool +} + +// Ensure that RESTCreateUpdateStrategy extends RESTCreateStrategy +var _ RESTCreateStrategy = (RESTCreateUpdateStrategy)(nil) + +// Ensure that RESTCreateUpdateStrategy extends RESTUpdateStrategy +var _ RESTUpdateStrategy = (RESTCreateUpdateStrategy)(nil) diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/delete.go b/vendor/k8s.io/apiserver/pkg/registry/rest/delete.go new file mode 100644 index 000000000..3e7ca85b7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/delete.go @@ -0,0 +1,183 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "context" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" +) + +// RESTDeleteStrategy defines deletion behavior on an object that follows Kubernetes +// API conventions. +type RESTDeleteStrategy interface { + runtime.ObjectTyper +} + +type GarbageCollectionPolicy string + +const ( + DeleteDependents GarbageCollectionPolicy = "DeleteDependents" + OrphanDependents GarbageCollectionPolicy = "OrphanDependents" + // Unsupported means that the resource knows that it cannot be GC'd, so the finalizers + // should never be set in storage. + Unsupported GarbageCollectionPolicy = "Unsupported" +) + +// GarbageCollectionDeleteStrategy must be implemented by the registry that wants to +// orphan dependents by default. +type GarbageCollectionDeleteStrategy interface { + // DefaultGarbageCollectionPolicy returns the default garbage collection behavior. + DefaultGarbageCollectionPolicy(ctx context.Context) GarbageCollectionPolicy +} + +// RESTGracefulDeleteStrategy must be implemented by the registry that supports +// graceful deletion. +type RESTGracefulDeleteStrategy interface { + // CheckGracefulDelete should return true if the object can be gracefully deleted and set + // any default values on the DeleteOptions. + CheckGracefulDelete(ctx context.Context, obj runtime.Object, options *metav1.DeleteOptions) bool +} + +// BeforeDelete tests whether the object can be gracefully deleted. +// If graceful is set, the object should be gracefully deleted. If gracefulPending +// is set, the object has already been gracefully deleted (and the provided grace +// period is longer than the time to deletion). An error is returned if the +// condition cannot be checked or the gracePeriodSeconds is invalid. The options +// argument may be updated with default values if graceful is true. Second place +// where we set deletionTimestamp is pkg/registry/generic/registry/store.go. +// This function is responsible for setting deletionTimestamp during gracefulDeletion, +// other one for cascading deletions. +func BeforeDelete(strategy RESTDeleteStrategy, ctx context.Context, obj runtime.Object, options *metav1.DeleteOptions) (graceful, gracefulPending bool, err error) { + objectMeta, gvk, kerr := objectMetaAndKind(strategy, obj) + if kerr != nil { + return false, false, kerr + } + if errs := validation.ValidateDeleteOptions(options); len(errs) > 0 { + return false, false, errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", errs) + } + // Checking the Preconditions here to fail early. They'll be enforced later on when we actually do the deletion, too. + if options.Preconditions != nil { + if options.Preconditions.UID != nil && *options.Preconditions.UID != objectMeta.GetUID() { + return false, false, errors.NewConflict(schema.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, objectMeta.GetName(), fmt.Errorf("the UID in the precondition (%s) does not match the UID in record (%s). The object might have been deleted and then recreated", *options.Preconditions.UID, objectMeta.GetUID())) + } + if options.Preconditions.ResourceVersion != nil && *options.Preconditions.ResourceVersion != objectMeta.GetResourceVersion() { + return false, false, errors.NewConflict(schema.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, objectMeta.GetName(), fmt.Errorf("the ResourceVersion in the precondition (%s) does not match the ResourceVersion in record (%s). The object might have been modified", *options.Preconditions.ResourceVersion, objectMeta.GetResourceVersion())) + } + } + gracefulStrategy, ok := strategy.(RESTGracefulDeleteStrategy) + if !ok { + // If we're not deleting gracefully there's no point in updating Generation, as we won't update + // the obcject before deleting it. + return false, false, nil + } + // if the object is already being deleted, no need to update generation. + if objectMeta.GetDeletionTimestamp() != nil { + // if we are already being deleted, we may only shorten the deletion grace period + // this means the object was gracefully deleted previously but deletionGracePeriodSeconds was not set, + // so we force deletion immediately + // IMPORTANT: + // The deletion operation happens in two phases. + // 1. Update to set DeletionGracePeriodSeconds and DeletionTimestamp + // 2. Delete the object from storage. + // If the update succeeds, but the delete fails (network error, internal storage error, etc.), + // a resource was previously left in a state that was non-recoverable. We + // check if the existing stored resource has a grace period as 0 and if so + // attempt to delete immediately in order to recover from this scenario. + if objectMeta.GetDeletionGracePeriodSeconds() == nil || *objectMeta.GetDeletionGracePeriodSeconds() == 0 { + return false, false, nil + } + // only a shorter grace period may be provided by a user + if options.GracePeriodSeconds != nil { + period := int64(*options.GracePeriodSeconds) + if period >= *objectMeta.GetDeletionGracePeriodSeconds() { + return false, true, nil + } + newDeletionTimestamp := metav1.NewTime( + objectMeta.GetDeletionTimestamp().Add(-time.Second * time.Duration(*objectMeta.GetDeletionGracePeriodSeconds())). + Add(time.Second * time.Duration(*options.GracePeriodSeconds))) + objectMeta.SetDeletionTimestamp(&newDeletionTimestamp) + objectMeta.SetDeletionGracePeriodSeconds(&period) + return true, false, nil + } + // graceful deletion is pending, do nothing + options.GracePeriodSeconds = objectMeta.GetDeletionGracePeriodSeconds() + return false, true, nil + } + + if !gracefulStrategy.CheckGracefulDelete(ctx, obj, options) { + return false, false, nil + } + now := metav1.NewTime(metav1.Now().Add(time.Second * time.Duration(*options.GracePeriodSeconds))) + objectMeta.SetDeletionTimestamp(&now) + objectMeta.SetDeletionGracePeriodSeconds(options.GracePeriodSeconds) + // If it's the first graceful deletion we are going to set the DeletionTimestamp to non-nil. + // Controllers of the object that's being deleted shouldn't take any nontrivial actions, hence its behavior changes. + // Thus we need to bump object's Generation (if set). This handles generation bump during graceful deletion. + // The bump for objects that don't support graceful deletion is handled in pkg/registry/generic/registry/store.go. + if objectMeta.GetGeneration() > 0 { + objectMeta.SetGeneration(objectMeta.GetGeneration() + 1) + } + return true, false, nil +} + +// AdmissionToValidateObjectDeleteFunc returns a admission validate func for object deletion +func AdmissionToValidateObjectDeleteFunc(admit admission.Interface, staticAttributes admission.Attributes, objInterfaces admission.ObjectInterfaces) ValidateObjectFunc { + mutatingAdmission, isMutatingAdmission := admit.(admission.MutationInterface) + validatingAdmission, isValidatingAdmission := admit.(admission.ValidationInterface) + + mutating := isMutatingAdmission && mutatingAdmission.Handles(staticAttributes.GetOperation()) + validating := isValidatingAdmission && validatingAdmission.Handles(staticAttributes.GetOperation()) + + return func(ctx context.Context, old runtime.Object) error { + if !mutating && !validating { + return nil + } + finalAttributes := admission.NewAttributesRecord( + nil, + // Deep copy the object to avoid accidentally changing the object. + old.DeepCopyObject(), + staticAttributes.GetKind(), + staticAttributes.GetNamespace(), + staticAttributes.GetName(), + staticAttributes.GetResource(), + staticAttributes.GetSubresource(), + staticAttributes.GetOperation(), + staticAttributes.GetOperationOptions(), + staticAttributes.IsDryRun(), + staticAttributes.GetUserInfo(), + ) + if mutating { + if err := mutatingAdmission.Admit(ctx, finalAttributes, objInterfaces); err != nil { + return err + } + } + if validating { + if err := validatingAdmission.Validate(ctx, finalAttributes, objInterfaces); err != nil { + return err + } + } + return nil + } +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/doc.go b/vendor/k8s.io/apiserver/pkg/registry/rest/doc.go new file mode 100644 index 000000000..20524d21f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package rest defines common logic around changes to Kubernetes-style resources. +package rest // import "k8s.io/apiserver/pkg/registry/rest" diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/export.go b/vendor/k8s.io/apiserver/pkg/registry/rest/export.go new file mode 100644 index 000000000..b3fd8af30 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/export.go @@ -0,0 +1,34 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" +) + +// RESTExportStrategy is the interface that defines how to export a Kubernetes +// object. An exported object is stripped of non-user-settable fields and +// optionally, the identifying information related to the object's identity in +// the cluster so that it can be loaded into a different namespace or entirely +// different cluster without conflict. +type RESTExportStrategy interface { + // Export strips fields that can not be set by the user. If 'exact' is false + // fields specific to the cluster are also stripped + Export(ctx context.Context, obj runtime.Object, exact bool) error +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/meta.go b/vendor/k8s.io/apiserver/pkg/registry/rest/meta.go new file mode 100644 index 000000000..add6044ab --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/meta.go @@ -0,0 +1,43 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/uuid" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" +) + +// FillObjectMetaSystemFields populates fields that are managed by the system on ObjectMeta. +func FillObjectMetaSystemFields(meta metav1.Object) { + meta.SetCreationTimestamp(metav1.Now()) + meta.SetUID(uuid.NewUUID()) + meta.SetSelfLink("") +} + +// ValidNamespace returns false if the namespace on the context differs from +// the resource. If the resource has no namespace, it is set to the value in +// the context. +func ValidNamespace(ctx context.Context, resource metav1.Object) bool { + ns, ok := genericapirequest.NamespaceFrom(ctx) + if len(resource.GetNamespace()) == 0 { + resource.SetNamespace(ns) + } + return ns == resource.GetNamespace() && ok +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/rest.go b/vendor/k8s.io/apiserver/pkg/registry/rest/rest.go new file mode 100644 index 000000000..8f9c981dd --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/rest.go @@ -0,0 +1,351 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "context" + "io" + "net/http" + "net/url" + + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" +) + +//TODO: +// Storage interfaces need to be separated into two groups; those that operate +// on collections and those that operate on individually named items. +// Collection interfaces: +// (Method: Current -> Proposed) +// GET: Lister -> CollectionGetter +// WATCH: Watcher -> CollectionWatcher +// CREATE: Creater -> CollectionCreater +// DELETE: (n/a) -> CollectionDeleter +// UPDATE: (n/a) -> CollectionUpdater +// +// Single item interfaces: +// (Method: Current -> Proposed) +// GET: Getter -> NamedGetter +// WATCH: (n/a) -> NamedWatcher +// CREATE: (n/a) -> NamedCreater +// DELETE: Deleter -> NamedDeleter +// UPDATE: Update -> NamedUpdater + +// Storage is a generic interface for RESTful storage services. +// Resources which are exported to the RESTful API of apiserver need to implement this interface. It is expected +// that objects may implement any of the below interfaces. +type Storage interface { + // New returns an empty object that can be used with Create and Update after request data has been put into it. + // This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object) + New() runtime.Object +} + +// Scoper indicates what scope the resource is at. It must be specified. +// It is usually provided automatically based on your strategy. +type Scoper interface { + // NamespaceScoped returns true if the storage is namespaced + NamespaceScoped() bool +} + +// KindProvider specifies a different kind for its API than for its internal storage. This is necessary for external +// objects that are not compiled into the api server. For such objects, there is no in-memory representation for +// the object, so they must be represented as generic objects (e.g. runtime.Unknown), but when we present the object as part of +// API discovery we want to present the specific kind, not the generic internal representation. +type KindProvider interface { + Kind() string +} + +// ShortNamesProvider is an interface for RESTful storage services. Delivers a list of short names for a resource. The list is used by kubectl to have short names representation of resources. +type ShortNamesProvider interface { + ShortNames() []string +} + +// CategoriesProvider allows a resource to specify which groups of resources (categories) it's part of. Categories can +// be used by API clients to refer to a batch of resources by using a single name (e.g. "all" could translate to "pod,rc,svc,..."). +type CategoriesProvider interface { + Categories() []string +} + +// GroupVersionKindProvider is used to specify a particular GroupVersionKind to discovery. This is used for polymorphic endpoints +// which generally point to foreign versions. Scale refers to Scale.v1beta1.extensions for instance. +// This trumps KindProvider since it is capable of providing the information required. +// TODO KindProvider (only used by federation) should be removed and replaced with this, but that presents greater risk late in 1.8. +type GroupVersionKindProvider interface { + GroupVersionKind(containingGV schema.GroupVersion) schema.GroupVersionKind +} + +// Lister is an object that can retrieve resources that match the provided field and label criteria. +type Lister interface { + // NewList returns an empty object that can be used with the List call. + // This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object) + NewList() runtime.Object + // List selects resources in the storage which match to the selector. 'options' can be nil. + List(ctx context.Context, options *metainternalversion.ListOptions) (runtime.Object, error) + // TableConvertor ensures all list implementers also implement table conversion + TableConvertor +} + +// Exporter is an object that knows how to strip a RESTful resource for export. A store should implement this interface +// if export is generally supported for that type. Errors can still be returned during the actual Export when certain +// instances of the type are not exportable. +type Exporter interface { + // Export an object. Fields that are not user specified (e.g. Status, ObjectMeta.ResourceVersion) are stripped out + // Returns the stripped object. If 'exact' is true, fields that are specific to the cluster (e.g. namespace) are + // retained, otherwise they are stripped also. + Export(ctx context.Context, name string, opts metav1.ExportOptions) (runtime.Object, error) +} + +// Getter is an object that can retrieve a named RESTful resource. +type Getter interface { + // Get finds a resource in the storage by name and returns it. + // Although it can return an arbitrary error value, IsNotFound(err) is true for the + // returned error value err when the specified resource is not found. + Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) +} + +// GetterWithOptions is an object that retrieve a named RESTful resource and takes +// additional options on the get request. It allows a caller to also receive the +// subpath of the GET request. +type GetterWithOptions interface { + // Get finds a resource in the storage by name and returns it. + // Although it can return an arbitrary error value, IsNotFound(err) is true for the + // returned error value err when the specified resource is not found. + // The options object passed to it is of the same type returned by the NewGetOptions + // method. + // TODO: Pass metav1.GetOptions. + Get(ctx context.Context, name string, options runtime.Object) (runtime.Object, error) + + // NewGetOptions returns an empty options object that will be used to pass + // options to the Get method. It may return a bool and a string, if true, the + // value of the request path below the object will be included as the named + // string in the serialization of the runtime object. E.g., returning "path" + // will convert the trailing request scheme value to "path" in the map[string][]string + // passed to the converter. + NewGetOptions() (runtime.Object, bool, string) +} + +type TableConvertor interface { + ConvertToTable(ctx context.Context, object runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) +} + +// GracefulDeleter knows how to pass deletion options to allow delayed deletion of a +// RESTful object. +type GracefulDeleter interface { + // Delete finds a resource in the storage and deletes it. + // The delete attempt is validated by the deleteValidation first. + // If options are provided, the resource will attempt to honor them or return an invalid + // request error. + // Although it can return an arbitrary error value, IsNotFound(err) is true for the + // returned error value err when the specified resource is not found. + // Delete *may* return the object that was deleted, or a status object indicating additional + // information about deletion. + // It also returns a boolean which is set to true if the resource was instantly + // deleted or false if it will be deleted asynchronously. + Delete(ctx context.Context, name string, deleteValidation ValidateObjectFunc, options *metav1.DeleteOptions) (runtime.Object, bool, error) +} + +// MayReturnFullObjectDeleter may return deleted object (instead of a simple status) on deletion. +type MayReturnFullObjectDeleter interface { + DeleteReturnsDeletedObject() bool +} + +// CollectionDeleter is an object that can delete a collection +// of RESTful resources. +type CollectionDeleter interface { + // DeleteCollection selects all resources in the storage matching given 'listOptions' + // and deletes them. The delete attempt is validated by the deleteValidation first. + // If 'options' are provided, the resource will attempt to honor them or return an + // invalid request error. + // DeleteCollection may not be atomic - i.e. it may delete some objects and still + // return an error after it. On success, returns a list of deleted objects. + DeleteCollection(ctx context.Context, deleteValidation ValidateObjectFunc, options *metav1.DeleteOptions, listOptions *metainternalversion.ListOptions) (runtime.Object, error) +} + +// Creater is an object that can create an instance of a RESTful object. +type Creater interface { + // New returns an empty object that can be used with Create after request data has been put into it. + // This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object) + New() runtime.Object + + // Create creates a new version of a resource. + Create(ctx context.Context, obj runtime.Object, createValidation ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) +} + +// NamedCreater is an object that can create an instance of a RESTful object using a name parameter. +type NamedCreater interface { + // New returns an empty object that can be used with Create after request data has been put into it. + // This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object) + New() runtime.Object + + // Create creates a new version of a resource. It expects a name parameter from the path. + // This is needed for create operations on subresources which include the name of the parent + // resource in the path. + Create(ctx context.Context, name string, obj runtime.Object, createValidation ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) +} + +// UpdatedObjectInfo provides information about an updated object to an Updater. +// It requires access to the old object in order to return the newly updated object. +type UpdatedObjectInfo interface { + // Returns preconditions built from the updated object, if applicable. + // May return nil, or a preconditions object containing nil fields, + // if no preconditions can be determined from the updated object. + Preconditions() *metav1.Preconditions + + // UpdatedObject returns the updated object, given a context and old object. + // The only time an empty oldObj should be passed in is if a "create on update" is occurring (there is no oldObj). + UpdatedObject(ctx context.Context, oldObj runtime.Object) (newObj runtime.Object, err error) +} + +// ValidateObjectFunc is a function to act on a given object. An error may be returned +// if the hook cannot be completed. An ObjectFunc may NOT transform the provided +// object. +type ValidateObjectFunc func(ctx context.Context, obj runtime.Object) error + +// ValidateAllObjectFunc is a "admit everything" instance of ValidateObjectFunc. +func ValidateAllObjectFunc(ctx context.Context, obj runtime.Object) error { + return nil +} + +// ValidateObjectUpdateFunc is a function to act on a given object and its predecessor. +// An error may be returned if the hook cannot be completed. An UpdateObjectFunc +// may NOT transform the provided object. +type ValidateObjectUpdateFunc func(ctx context.Context, obj, old runtime.Object) error + +// ValidateAllObjectUpdateFunc is a "admit everything" instance of ValidateObjectUpdateFunc. +func ValidateAllObjectUpdateFunc(ctx context.Context, obj, old runtime.Object) error { + return nil +} + +// Updater is an object that can update an instance of a RESTful object. +type Updater interface { + // New returns an empty object that can be used with Update after request data has been put into it. + // This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object) + New() runtime.Object + + // Update finds a resource in the storage and updates it. Some implementations + // may allow updates creates the object - they should set the created boolean + // to true. + Update(ctx context.Context, name string, objInfo UpdatedObjectInfo, createValidation ValidateObjectFunc, updateValidation ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) +} + +// CreaterUpdater is a storage object that must support both create and update. +// Go prevents embedded interfaces that implement the same method. +type CreaterUpdater interface { + Creater + Update(ctx context.Context, name string, objInfo UpdatedObjectInfo, createValidation ValidateObjectFunc, updateValidation ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) +} + +// CreaterUpdater must satisfy the Updater interface. +var _ Updater = CreaterUpdater(nil) + +// Patcher is a storage object that supports both get and update. +type Patcher interface { + Getter + Updater +} + +// Watcher should be implemented by all Storage objects that +// want to offer the ability to watch for changes through the watch api. +type Watcher interface { + // 'label' selects on labels; 'field' selects on the object's fields. Not all fields + // are supported; an error should be returned if 'field' tries to select on a field that + // isn't supported. 'resourceVersion' allows for continuing/starting a watch at a + // particular version. + Watch(ctx context.Context, options *metainternalversion.ListOptions) (watch.Interface, error) +} + +// StandardStorage is an interface covering the common verbs. Provided for testing whether a +// resource satisfies the normal storage methods. Use Storage when passing opaque storage objects. +type StandardStorage interface { + Getter + Lister + CreaterUpdater + GracefulDeleter + CollectionDeleter + Watcher +} + +// Redirector know how to return a remote resource's location. +type Redirector interface { + // ResourceLocation should return the remote location of the given resource, and an optional transport to use to request it, or an error. + ResourceLocation(ctx context.Context, id string) (remoteLocation *url.URL, transport http.RoundTripper, err error) +} + +// Responder abstracts the normal response behavior for a REST method and is passed to callers that +// may wish to handle the response directly in some cases, but delegate to the normal error or object +// behavior in other cases. +type Responder interface { + // Object writes the provided object to the response. Invoking this method multiple times is undefined. + Object(statusCode int, obj runtime.Object) + // Error writes the provided error to the response. This method may only be invoked once. + Error(err error) +} + +// Connecter is a storage object that responds to a connection request. +type Connecter interface { + // Connect returns an http.Handler that will handle the request/response for a given API invocation. + // The provided responder may be used for common API responses. The responder will write both status + // code and body, so the ServeHTTP method should exit after invoking the responder. The Handler will + // be used for a single API request and then discarded. The Responder is guaranteed to write to the + // same http.ResponseWriter passed to ServeHTTP. + Connect(ctx context.Context, id string, options runtime.Object, r Responder) (http.Handler, error) + + // NewConnectOptions returns an empty options object that will be used to pass + // options to the Connect method. If nil, then a nil options object is passed to + // Connect. It may return a bool and a string. If true, the value of the request + // path below the object will be included as the named string in the serialization + // of the runtime object. + NewConnectOptions() (runtime.Object, bool, string) + + // ConnectMethods returns the list of HTTP methods handled by Connect + ConnectMethods() []string +} + +// ResourceStreamer is an interface implemented by objects that prefer to be streamed from the server +// instead of decoded directly. +type ResourceStreamer interface { + // InputStream should return an io.ReadCloser if the provided object supports streaming. The desired + // api version and an accept header (may be empty) are passed to the call. If no error occurs, + // the caller may return a flag indicating whether the result should be flushed as writes occur + // and a content type string that indicates the type of the stream. + // If a null stream is returned, a StatusNoContent response wil be generated. + InputStream(ctx context.Context, apiVersion, acceptHeader string) (stream io.ReadCloser, flush bool, mimeType string, err error) +} + +// StorageMetadata is an optional interface that callers can implement to provide additional +// information about their Storage objects. +type StorageMetadata interface { + // ProducesMIMETypes returns a list of the MIME types the specified HTTP verb (GET, POST, DELETE, + // PATCH) can respond with. + ProducesMIMETypes(verb string) []string + + // ProducesObject returns an object the specified HTTP verb respond with. It will overwrite storage object if + // it is not nil. Only the type of the return object matters, the value will be ignored. + ProducesObject(verb string) interface{} +} + +// StorageVersionProvider is an optional interface that a storage object can +// implement if it wishes to disclose its storage version. +type StorageVersionProvider interface { + // StorageVersion returns a group versioner, which will outputs the gvk + // an object will be converted to before persisted in etcd, given a + // list of kinds the object might belong to. + StorageVersion() runtime.GroupVersioner +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/table.go b/vendor/k8s.io/apiserver/pkg/registry/rest/table.go new file mode 100644 index 000000000..d90ae7076 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/table.go @@ -0,0 +1,107 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "context" + "fmt" + "net/http" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" +) + +type defaultTableConvertor struct { + defaultQualifiedResource schema.GroupResource +} + +// NewDefaultTableConvertor creates a default convertor; the provided resource is used for error messages +// if no resource info can be determined from the context passed to ConvertToTable. +func NewDefaultTableConvertor(defaultQualifiedResource schema.GroupResource) TableConvertor { + return defaultTableConvertor{defaultQualifiedResource: defaultQualifiedResource} +} + +var swaggerMetadataDescriptions = metav1.ObjectMeta{}.SwaggerDoc() + +func (c defaultTableConvertor) ConvertToTable(ctx context.Context, object runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) { + var table metav1.Table + fn := func(obj runtime.Object) error { + m, err := meta.Accessor(obj) + if err != nil { + resource := c.defaultQualifiedResource + if info, ok := genericapirequest.RequestInfoFrom(ctx); ok { + resource = schema.GroupResource{Group: info.APIGroup, Resource: info.Resource} + } + return errNotAcceptable{resource: resource} + } + table.Rows = append(table.Rows, metav1.TableRow{ + Cells: []interface{}{m.GetName(), m.GetCreationTimestamp().Time.UTC().Format(time.RFC3339)}, + Object: runtime.RawExtension{Object: obj}, + }) + return nil + } + switch { + case meta.IsListType(object): + if err := meta.EachListItem(object, fn); err != nil { + return nil, err + } + default: + if err := fn(object); err != nil { + return nil, err + } + } + if m, err := meta.ListAccessor(object); err == nil { + table.ResourceVersion = m.GetResourceVersion() + table.SelfLink = m.GetSelfLink() + table.Continue = m.GetContinue() + table.RemainingItemCount = m.GetRemainingItemCount() + } else { + if m, err := meta.CommonAccessor(object); err == nil { + table.ResourceVersion = m.GetResourceVersion() + table.SelfLink = m.GetSelfLink() + } + } + if opt, ok := tableOptions.(*metav1.TableOptions); !ok || !opt.NoHeaders { + table.ColumnDefinitions = []metav1.TableColumnDefinition{ + {Name: "Name", Type: "string", Format: "name", Description: swaggerMetadataDescriptions["name"]}, + {Name: "Created At", Type: "date", Description: swaggerMetadataDescriptions["creationTimestamp"]}, + } + } + return &table, nil +} + +// errNotAcceptable indicates the resource doesn't support Table conversion +type errNotAcceptable struct { + resource schema.GroupResource +} + +func (e errNotAcceptable) Error() string { + return fmt.Sprintf("the resource %s does not support being converted to a Table", e.resource) +} + +func (e errNotAcceptable) Status() metav1.Status { + return metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusNotAcceptable, + Reason: metav1.StatusReason("NotAcceptable"), + Message: e.Error(), + } +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/update.go b/vendor/k8s.io/apiserver/pkg/registry/rest/update.go new file mode 100644 index 000000000..0741b84ec --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/update.go @@ -0,0 +1,279 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + genericvalidation "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/api/validation/path" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" +) + +// RESTUpdateStrategy defines the minimum validation, accepted input, and +// name generation behavior to update an object that follows Kubernetes +// API conventions. A resource may have many UpdateStrategies, depending on +// the call pattern in use. +type RESTUpdateStrategy interface { + runtime.ObjectTyper + // NamespaceScoped returns true if the object must be within a namespace. + NamespaceScoped() bool + // AllowCreateOnUpdate returns true if the object can be created by a PUT. + AllowCreateOnUpdate() bool + // PrepareForUpdate is invoked on update before validation to normalize + // the object. For example: remove fields that are not to be persisted, + // sort order-insensitive list fields, etc. This should not remove fields + // whose presence would be considered a validation error. + PrepareForUpdate(ctx context.Context, obj, old runtime.Object) + // ValidateUpdate is invoked after default fields in the object have been + // filled in before the object is persisted. This method should not mutate + // the object. + ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList + // Canonicalize allows an object to be mutated into a canonical form. This + // ensures that code that operates on these objects can rely on the common + // form for things like comparison. Canonicalize is invoked after + // validation has succeeded but before the object has been persisted. + // This method may mutate the object. + Canonicalize(obj runtime.Object) + // AllowUnconditionalUpdate returns true if the object can be updated + // unconditionally (irrespective of the latest resource version), when + // there is no resource version specified in the object. + AllowUnconditionalUpdate() bool +} + +// TODO: add other common fields that require global validation. +func validateCommonFields(obj, old runtime.Object, strategy RESTUpdateStrategy) (field.ErrorList, error) { + allErrs := field.ErrorList{} + objectMeta, err := meta.Accessor(obj) + if err != nil { + return nil, fmt.Errorf("failed to get new object metadata: %v", err) + } + oldObjectMeta, err := meta.Accessor(old) + if err != nil { + return nil, fmt.Errorf("failed to get old object metadata: %v", err) + } + allErrs = append(allErrs, genericvalidation.ValidateObjectMetaAccessor(objectMeta, strategy.NamespaceScoped(), path.ValidatePathSegmentName, field.NewPath("metadata"))...) + allErrs = append(allErrs, genericvalidation.ValidateObjectMetaAccessorUpdate(objectMeta, oldObjectMeta, field.NewPath("metadata"))...) + + return allErrs, nil +} + +// BeforeUpdate ensures that common operations for all resources are performed on update. It only returns +// errors that can be converted to api.Status. It will invoke update validation with the provided existing +// and updated objects. +// It sets zero values only if the object does not have a zero value for the respective field. +func BeforeUpdate(strategy RESTUpdateStrategy, ctx context.Context, obj, old runtime.Object) error { + objectMeta, kind, kerr := objectMetaAndKind(strategy, obj) + if kerr != nil { + return kerr + } + if strategy.NamespaceScoped() { + if !ValidNamespace(ctx, objectMeta) { + return errors.NewBadRequest("the namespace of the provided object does not match the namespace sent on the request") + } + } else if len(objectMeta.GetNamespace()) > 0 { + objectMeta.SetNamespace(metav1.NamespaceNone) + } + + // Ensure requests cannot update generation + oldMeta, err := meta.Accessor(old) + if err != nil { + return err + } + objectMeta.SetGeneration(oldMeta.GetGeneration()) + + // Ensure managedFields state is removed unless ServerSideApply is enabled + if !utilfeature.DefaultFeatureGate.Enabled(features.ServerSideApply) { + oldMeta.SetManagedFields(nil) + objectMeta.SetManagedFields(nil) + } + + strategy.PrepareForUpdate(ctx, obj, old) + + // ClusterName is ignored and should not be saved + if len(objectMeta.GetClusterName()) > 0 { + objectMeta.SetClusterName("") + } + // Use the existing UID if none is provided + if len(objectMeta.GetUID()) == 0 { + objectMeta.SetUID(oldMeta.GetUID()) + } + // ignore changes to timestamp + if oldCreationTime := oldMeta.GetCreationTimestamp(); !oldCreationTime.IsZero() { + objectMeta.SetCreationTimestamp(oldMeta.GetCreationTimestamp()) + } + // an update can never remove/change a deletion timestamp + if !oldMeta.GetDeletionTimestamp().IsZero() { + objectMeta.SetDeletionTimestamp(oldMeta.GetDeletionTimestamp()) + } + // an update can never remove/change grace period seconds + if oldMeta.GetDeletionGracePeriodSeconds() != nil && objectMeta.GetDeletionGracePeriodSeconds() == nil { + objectMeta.SetDeletionGracePeriodSeconds(oldMeta.GetDeletionGracePeriodSeconds()) + } + + // Ensure some common fields, like UID, are validated for all resources. + errs, err := validateCommonFields(obj, old, strategy) + if err != nil { + return errors.NewInternalError(err) + } + + errs = append(errs, strategy.ValidateUpdate(ctx, obj, old)...) + if len(errs) > 0 { + return errors.NewInvalid(kind.GroupKind(), objectMeta.GetName(), errs) + } + + strategy.Canonicalize(obj) + + return nil +} + +// TransformFunc is a function to transform and return newObj +type TransformFunc func(ctx context.Context, newObj runtime.Object, oldObj runtime.Object) (transformedNewObj runtime.Object, err error) + +// defaultUpdatedObjectInfo implements UpdatedObjectInfo +type defaultUpdatedObjectInfo struct { + // obj is the updated object + obj runtime.Object + + // transformers is an optional list of transforming functions that modify or + // replace obj using information from the context, old object, or other sources. + transformers []TransformFunc +} + +// DefaultUpdatedObjectInfo returns an UpdatedObjectInfo impl based on the specified object. +func DefaultUpdatedObjectInfo(obj runtime.Object, transformers ...TransformFunc) UpdatedObjectInfo { + return &defaultUpdatedObjectInfo{obj, transformers} +} + +// Preconditions satisfies the UpdatedObjectInfo interface. +func (i *defaultUpdatedObjectInfo) Preconditions() *metav1.Preconditions { + // Attempt to get the UID out of the object + accessor, err := meta.Accessor(i.obj) + if err != nil { + // If no UID can be read, no preconditions are possible + return nil + } + + // If empty, no preconditions needed + uid := accessor.GetUID() + if len(uid) == 0 { + return nil + } + + return &metav1.Preconditions{UID: &uid} +} + +// UpdatedObject satisfies the UpdatedObjectInfo interface. +// It returns a copy of the held obj, passed through any configured transformers. +func (i *defaultUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runtime.Object) (runtime.Object, error) { + var err error + // Start with the configured object + newObj := i.obj + + // If the original is non-nil (might be nil if the first transformer builds the object from the oldObj), make a copy, + // so we don't return the original. BeforeUpdate can mutate the returned object, doing things like clearing ResourceVersion. + // If we're re-called, we need to be able to return the pristine version. + if newObj != nil { + newObj = newObj.DeepCopyObject() + } + + // Allow any configured transformers to update the new object + for _, transformer := range i.transformers { + newObj, err = transformer(ctx, newObj, oldObj) + if err != nil { + return nil, err + } + } + + return newObj, nil +} + +// wrappedUpdatedObjectInfo allows wrapping an existing objInfo and +// chaining additional transformations/checks on the result of UpdatedObject() +type wrappedUpdatedObjectInfo struct { + // obj is the updated object + objInfo UpdatedObjectInfo + + // transformers is an optional list of transforming functions that modify or + // replace obj using information from the context, old object, or other sources. + transformers []TransformFunc +} + +// WrapUpdatedObjectInfo returns an UpdatedObjectInfo impl that delegates to +// the specified objInfo, then calls the passed transformers +func WrapUpdatedObjectInfo(objInfo UpdatedObjectInfo, transformers ...TransformFunc) UpdatedObjectInfo { + return &wrappedUpdatedObjectInfo{objInfo, transformers} +} + +// Preconditions satisfies the UpdatedObjectInfo interface. +func (i *wrappedUpdatedObjectInfo) Preconditions() *metav1.Preconditions { + return i.objInfo.Preconditions() +} + +// UpdatedObject satisfies the UpdatedObjectInfo interface. +// It delegates to the wrapped objInfo and passes the result through any configured transformers. +func (i *wrappedUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runtime.Object) (runtime.Object, error) { + newObj, err := i.objInfo.UpdatedObject(ctx, oldObj) + if err != nil { + return newObj, err + } + + // Allow any configured transformers to update the new object or error + for _, transformer := range i.transformers { + newObj, err = transformer(ctx, newObj, oldObj) + if err != nil { + return nil, err + } + } + + return newObj, nil +} + +// AdmissionToValidateObjectUpdateFunc converts validating admission to a rest validate object update func +func AdmissionToValidateObjectUpdateFunc(admit admission.Interface, staticAttributes admission.Attributes, o admission.ObjectInterfaces) ValidateObjectUpdateFunc { + validatingAdmission, ok := admit.(admission.ValidationInterface) + if !ok { + return func(ctx context.Context, obj, old runtime.Object) error { return nil } + } + return func(ctx context.Context, obj, old runtime.Object) error { + finalAttributes := admission.NewAttributesRecord( + obj, + old, + staticAttributes.GetKind(), + staticAttributes.GetNamespace(), + staticAttributes.GetName(), + staticAttributes.GetResource(), + staticAttributes.GetSubresource(), + staticAttributes.GetOperation(), + staticAttributes.GetOperationOptions(), + staticAttributes.IsDryRun(), + staticAttributes.GetUserInfo(), + ) + if !validatingAdmission.Handles(finalAttributes.GetOperation()) { + return nil + } + return validatingAdmission.Validate(ctx, finalAttributes, o) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/server/config.go b/vendor/k8s.io/apiserver/pkg/server/config.go new file mode 100644 index 000000000..9ac857924 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/config.go @@ -0,0 +1,854 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "fmt" + "net" + "net/http" + goruntime "runtime" + "runtime/debug" + "sort" + "strconv" + "strings" + "sync/atomic" + "time" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/go-openapi/spec" + "github.com/google/uuid" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apimachinery/pkg/util/sets" + utilwaitgroup "k8s.io/apimachinery/pkg/util/waitgroup" + "k8s.io/apimachinery/pkg/version" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/audit" + auditpolicy "k8s.io/apiserver/pkg/audit/policy" + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/authenticatorfactory" + authenticatorunion "k8s.io/apiserver/pkg/authentication/request/union" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/authorization/authorizerfactory" + authorizerunion "k8s.io/apiserver/pkg/authorization/union" + "k8s.io/apiserver/pkg/endpoints/discovery" + "k8s.io/apiserver/pkg/endpoints/filterlatency" + genericapifilters "k8s.io/apiserver/pkg/endpoints/filters" + apiopenapi "k8s.io/apiserver/pkg/endpoints/openapi" + apirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/features" + genericregistry "k8s.io/apiserver/pkg/registry/generic" + "k8s.io/apiserver/pkg/server/dynamiccertificates" + "k8s.io/apiserver/pkg/server/egressselector" + genericfilters "k8s.io/apiserver/pkg/server/filters" + "k8s.io/apiserver/pkg/server/healthz" + "k8s.io/apiserver/pkg/server/routes" + serverstore "k8s.io/apiserver/pkg/server/storage" + "k8s.io/apiserver/pkg/storageversion" + "k8s.io/apiserver/pkg/util/feature" + utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" + "k8s.io/client-go/informers" + restclient "k8s.io/client-go/rest" + "k8s.io/component-base/logs" + "k8s.io/klog/v2" + openapicommon "k8s.io/kube-openapi/pkg/common" + utilsnet "k8s.io/utils/net" + + // install apis + _ "k8s.io/apiserver/pkg/apis/apiserver/install" +) + +const ( + // DefaultLegacyAPIPrefix is where the legacy APIs will be located. + DefaultLegacyAPIPrefix = "/api" + + // APIGroupPrefix is where non-legacy API group will be located. + APIGroupPrefix = "/apis" +) + +// Config is a structure used to configure a GenericAPIServer. +// Its members are sorted roughly in order of importance for composers. +type Config struct { + // SecureServing is required to serve https + SecureServing *SecureServingInfo + + // Authentication is the configuration for authentication + Authentication AuthenticationInfo + + // Authorization is the configuration for authorization + Authorization AuthorizationInfo + + // LoopbackClientConfig is a config for a privileged loopback connection to the API server + // This is required for proper functioning of the PostStartHooks on a GenericAPIServer + // TODO: move into SecureServing(WithLoopback) as soon as insecure serving is gone + LoopbackClientConfig *restclient.Config + + // EgressSelector provides a lookup mechanism for dialing outbound connections. + // It does so based on a EgressSelectorConfiguration which was read at startup. + EgressSelector *egressselector.EgressSelector + + // RuleResolver is required to get the list of rules that apply to a given user + // in a given namespace + RuleResolver authorizer.RuleResolver + // AdmissionControl performs deep inspection of a given request (including content) + // to set values and determine whether its allowed + AdmissionControl admission.Interface + CorsAllowedOriginList []string + + // FlowControl, if not nil, gives priority and fairness to request handling + FlowControl utilflowcontrol.Interface + + EnableIndex bool + EnableProfiling bool + EnableDiscovery bool + // Requires generic profiling enabled + EnableContentionProfiling bool + EnableMetrics bool + + DisabledPostStartHooks sets.String + // done values in this values for this map are ignored. + PostStartHooks map[string]PostStartHookConfigEntry + + // Version will enable the /version endpoint if non-nil + Version *version.Info + // AuditBackend is where audit events are sent to. + AuditBackend audit.Backend + // AuditPolicyChecker makes the decision of whether and how to audit log a request. + AuditPolicyChecker auditpolicy.Checker + // ExternalAddress is the host name to use for external (public internet) facing URLs (e.g. Swagger) + // Will default to a value based on secure serving info and available ipv4 IPs. + ExternalAddress string + + //=========================================================================== + // Fields you probably don't care about changing + //=========================================================================== + + // BuildHandlerChainFunc allows you to build custom handler chains by decorating the apiHandler. + BuildHandlerChainFunc func(apiHandler http.Handler, c *Config) (secure http.Handler) + // HandlerChainWaitGroup allows you to wait for all chain handlers exit after the server shutdown. + HandlerChainWaitGroup *utilwaitgroup.SafeWaitGroup + // DiscoveryAddresses is used to build the IPs pass to discovery. If nil, the ExternalAddress is + // always reported + DiscoveryAddresses discovery.Addresses + // The default set of healthz checks. There might be more added via AddHealthChecks dynamically. + HealthzChecks []healthz.HealthChecker + // The default set of livez checks. There might be more added via AddHealthChecks dynamically. + LivezChecks []healthz.HealthChecker + // The default set of readyz-only checks. There might be more added via AddReadyzChecks dynamically. + ReadyzChecks []healthz.HealthChecker + // LegacyAPIGroupPrefixes is used to set up URL parsing for authorization and for validating requests + // to InstallLegacyAPIGroup. New API servers don't generally have legacy groups at all. + LegacyAPIGroupPrefixes sets.String + // RequestInfoResolver is used to assign attributes (used by admission and authorization) based on a request URL. + // Use-cases that are like kubelets may need to customize this. + RequestInfoResolver apirequest.RequestInfoResolver + // Serializer is required and provides the interface for serializing and converting objects to and from the wire + // The default (api.Codecs) usually works fine. + Serializer runtime.NegotiatedSerializer + // OpenAPIConfig will be used in generating OpenAPI spec. This is nil by default. Use DefaultOpenAPIConfig for "working" defaults. + OpenAPIConfig *openapicommon.Config + + // RESTOptionsGetter is used to construct RESTStorage types via the generic registry. + RESTOptionsGetter genericregistry.RESTOptionsGetter + + // If specified, all requests except those which match the LongRunningFunc predicate will timeout + // after this duration. + RequestTimeout time.Duration + // If specified, long running requests such as watch will be allocated a random timeout between this value, and + // twice this value. Note that it is up to the request handlers to ignore or honor this timeout. In seconds. + MinRequestTimeout int + + // This represents the maximum amount of time it should take for apiserver to complete its startup + // sequence and become healthy. From apiserver's start time to when this amount of time has + // elapsed, /livez will assume that unfinished post-start hooks will complete successfully and + // therefore return true. + LivezGracePeriod time.Duration + // ShutdownDelayDuration allows to block shutdown for some time, e.g. until endpoints pointing to this API server + // have converged on all node. During this time, the API server keeps serving, /healthz will return 200, + // but /readyz will return failure. + ShutdownDelayDuration time.Duration + + // The limit on the total size increase all "copy" operations in a json + // patch may cause. + // This affects all places that applies json patch in the binary. + JSONPatchMaxCopyBytes int64 + // The limit on the request size that would be accepted and decoded in a write request + // 0 means no limit. + MaxRequestBodyBytes int64 + // MaxRequestsInFlight is the maximum number of parallel non-long-running requests. Every further + // request has to wait. Applies only to non-mutating requests. + MaxRequestsInFlight int + // MaxMutatingRequestsInFlight is the maximum number of parallel mutating requests. Every further + // request has to wait. + MaxMutatingRequestsInFlight int + // Predicate which is true for paths of long-running http requests + LongRunningFunc apirequest.LongRunningRequestCheck + + // GoawayChance is the probability that send a GOAWAY to HTTP/2 clients. When client received + // GOAWAY, the in-flight requests will not be affected and new requests will use + // a new TCP connection to triggering re-balancing to another server behind the load balance. + // Default to 0, means never send GOAWAY. Max is 0.02 to prevent break the apiserver. + GoawayChance float64 + + // MergedResourceConfig indicates which groupVersion enabled and its resources enabled/disabled. + // This is composed of genericapiserver defaultAPIResourceConfig and those parsed from flags. + // If not specify any in flags, then genericapiserver will only enable defaultAPIResourceConfig. + MergedResourceConfig *serverstore.ResourceConfig + + //=========================================================================== + // values below here are targets for removal + //=========================================================================== + + // PublicAddress is the IP address where members of the cluster (kubelet, + // kube-proxy, services, etc.) can reach the GenericAPIServer. + // If nil or 0.0.0.0, the host's default interface will be used. + PublicAddress net.IP + + // EquivalentResourceRegistry provides information about resources equivalent to a given resource, + // and the kind associated with a given resource. As resources are installed, they are registered here. + EquivalentResourceRegistry runtime.EquivalentResourceRegistry + + // APIServerID is the ID of this API server + APIServerID string + + // StorageVersionManager holds the storage versions of the API resources installed by this server. + StorageVersionManager storageversion.Manager +} + +type RecommendedConfig struct { + Config + + // SharedInformerFactory provides shared informers for Kubernetes resources. This value is set by + // RecommendedOptions.CoreAPI.ApplyTo called by RecommendedOptions.ApplyTo. It uses an in-cluster client config + // by default, or the kubeconfig given with kubeconfig command line flag. + SharedInformerFactory informers.SharedInformerFactory + + // ClientConfig holds the kubernetes client configuration. + // This value is set by RecommendedOptions.CoreAPI.ApplyTo called by RecommendedOptions.ApplyTo. + // By default in-cluster client config is used. + ClientConfig *restclient.Config +} + +type SecureServingInfo struct { + // Listener is the secure server network listener. + Listener net.Listener + + // Cert is the main server cert which is used if SNI does not match. Cert must be non-nil and is + // allowed to be in SNICerts. + Cert dynamiccertificates.CertKeyContentProvider + + // SNICerts are the TLS certificates used for SNI. + SNICerts []dynamiccertificates.SNICertKeyContentProvider + + // ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates + ClientCA dynamiccertificates.CAContentProvider + + // MinTLSVersion optionally overrides the minimum TLS version supported. + // Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). + MinTLSVersion uint16 + + // CipherSuites optionally overrides the list of allowed cipher suites for the server. + // Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). + CipherSuites []uint16 + + // HTTP2MaxStreamsPerConnection is the limit that the api server imposes on each client. + // A value of zero means to use the default provided by golang's HTTP/2 support. + HTTP2MaxStreamsPerConnection int + + // DisableHTTP2 indicates that http2 should not be enabled. + DisableHTTP2 bool +} + +type AuthenticationInfo struct { + // APIAudiences is a list of identifier that the API identifies as. This is + // used by some authenticators to validate audience bound credentials. + APIAudiences authenticator.Audiences + // Authenticator determines which subject is making the request + Authenticator authenticator.Request +} + +type AuthorizationInfo struct { + // Authorizer determines whether the subject is allowed to make the request based only + // on the RequestURI + Authorizer authorizer.Authorizer +} + +// NewConfig returns a Config struct with the default values +func NewConfig(codecs serializer.CodecFactory) *Config { + defaultHealthChecks := []healthz.HealthChecker{healthz.PingHealthz, healthz.LogHealthz} + var id string + if feature.DefaultFeatureGate.Enabled(features.APIServerIdentity) { + id = "kube-apiserver-" + uuid.New().String() + } + return &Config{ + Serializer: codecs, + BuildHandlerChainFunc: DefaultBuildHandlerChain, + HandlerChainWaitGroup: new(utilwaitgroup.SafeWaitGroup), + LegacyAPIGroupPrefixes: sets.NewString(DefaultLegacyAPIPrefix), + DisabledPostStartHooks: sets.NewString(), + PostStartHooks: map[string]PostStartHookConfigEntry{}, + HealthzChecks: append([]healthz.HealthChecker{}, defaultHealthChecks...), + ReadyzChecks: append([]healthz.HealthChecker{}, defaultHealthChecks...), + LivezChecks: append([]healthz.HealthChecker{}, defaultHealthChecks...), + EnableIndex: true, + EnableDiscovery: true, + EnableProfiling: true, + EnableMetrics: true, + MaxRequestsInFlight: 400, + MaxMutatingRequestsInFlight: 200, + RequestTimeout: time.Duration(60) * time.Second, + MinRequestTimeout: 1800, + LivezGracePeriod: time.Duration(0), + ShutdownDelayDuration: time.Duration(0), + // 1.5MB is the default client request size in bytes + // the etcd server should accept. See + // https://github.com/etcd-io/etcd/blob/release-3.4/embed/config.go#L56. + // A request body might be encoded in json, and is converted to + // proto when persisted in etcd, so we allow 2x as the largest size + // increase the "copy" operations in a json patch may cause. + JSONPatchMaxCopyBytes: int64(3 * 1024 * 1024), + // 1.5MB is the recommended client request size in byte + // the etcd server should accept. See + // https://github.com/etcd-io/etcd/blob/release-3.4/embed/config.go#L56. + // A request body might be encoded in json, and is converted to + // proto when persisted in etcd, so we allow 2x as the largest request + // body size to be accepted and decoded in a write request. + MaxRequestBodyBytes: int64(3 * 1024 * 1024), + + // Default to treating watch as a long-running operation + // Generic API servers have no inherent long-running subresources + LongRunningFunc: genericfilters.BasicLongRunningRequestCheck(sets.NewString("watch"), sets.NewString()), + APIServerID: id, + StorageVersionManager: storageversion.NewDefaultManager(), + } +} + +// NewRecommendedConfig returns a RecommendedConfig struct with the default values +func NewRecommendedConfig(codecs serializer.CodecFactory) *RecommendedConfig { + return &RecommendedConfig{ + Config: *NewConfig(codecs), + } +} + +func DefaultOpenAPIConfig(getDefinitions openapicommon.GetOpenAPIDefinitions, defNamer *apiopenapi.DefinitionNamer) *openapicommon.Config { + return &openapicommon.Config{ + ProtocolList: []string{"https"}, + IgnorePrefixes: []string{}, + Info: &spec.Info{ + InfoProps: spec.InfoProps{ + Title: "Generic API Server", + }, + }, + DefaultResponse: &spec.Response{ + ResponseProps: spec.ResponseProps{ + Description: "Default Response.", + }, + }, + GetOperationIDAndTags: apiopenapi.GetOperationIDAndTags, + GetDefinitionName: defNamer.GetDefinitionName, + GetDefinitions: getDefinitions, + } +} + +func (c *AuthenticationInfo) ApplyClientCert(clientCA dynamiccertificates.CAContentProvider, servingInfo *SecureServingInfo) error { + if servingInfo == nil { + return nil + } + if clientCA == nil { + return nil + } + if servingInfo.ClientCA == nil { + servingInfo.ClientCA = clientCA + return nil + } + + servingInfo.ClientCA = dynamiccertificates.NewUnionCAContentProvider(servingInfo.ClientCA, clientCA) + return nil +} + +type completedConfig struct { + *Config + + //=========================================================================== + // values below here are filled in during completion + //=========================================================================== + + // SharedInformerFactory provides shared informers for resources + SharedInformerFactory informers.SharedInformerFactory +} + +type CompletedConfig struct { + // Embed a private pointer that cannot be instantiated outside of this package. + *completedConfig +} + +// AddHealthChecks adds a health check to our config to be exposed by the health endpoints +// of our configured apiserver. We should prefer this to adding healthChecks directly to +// the config unless we explicitly want to add a healthcheck only to a specific health endpoint. +func (c *Config) AddHealthChecks(healthChecks ...healthz.HealthChecker) { + for _, check := range healthChecks { + c.HealthzChecks = append(c.HealthzChecks, check) + c.LivezChecks = append(c.LivezChecks, check) + c.ReadyzChecks = append(c.ReadyzChecks, check) + } +} + +// AddPostStartHook allows you to add a PostStartHook that will later be added to the server itself in a New call. +// Name conflicts will cause an error. +func (c *Config) AddPostStartHook(name string, hook PostStartHookFunc) error { + if len(name) == 0 { + return fmt.Errorf("missing name") + } + if hook == nil { + return fmt.Errorf("hook func may not be nil: %q", name) + } + if c.DisabledPostStartHooks.Has(name) { + klog.V(1).Infof("skipping %q because it was explicitly disabled", name) + return nil + } + + if postStartHook, exists := c.PostStartHooks[name]; exists { + // this is programmer error, but it can be hard to debug + return fmt.Errorf("unable to add %q because it was already registered by: %s", name, postStartHook.originatingStack) + } + c.PostStartHooks[name] = PostStartHookConfigEntry{hook: hook, originatingStack: string(debug.Stack())} + + return nil +} + +// AddPostStartHookOrDie allows you to add a PostStartHook, but dies on failure. +func (c *Config) AddPostStartHookOrDie(name string, hook PostStartHookFunc) { + if err := c.AddPostStartHook(name, hook); err != nil { + klog.Fatalf("Error registering PostStartHook %q: %v", name, err) + } +} + +// Complete fills in any fields not set that are required to have valid data and can be derived +// from other fields. If you're going to `ApplyOptions`, do that first. It's mutating the receiver. +func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedConfig { + if len(c.ExternalAddress) == 0 && c.PublicAddress != nil { + c.ExternalAddress = c.PublicAddress.String() + } + + // if there is no port, and we listen on one securely, use that one + if _, _, err := net.SplitHostPort(c.ExternalAddress); err != nil { + if c.SecureServing == nil { + klog.Fatalf("cannot derive external address port without listening on a secure port.") + } + _, port, err := c.SecureServing.HostPort() + if err != nil { + klog.Fatalf("cannot derive external address from the secure port: %v", err) + } + c.ExternalAddress = net.JoinHostPort(c.ExternalAddress, strconv.Itoa(port)) + } + + if c.OpenAPIConfig != nil { + if c.OpenAPIConfig.SecurityDefinitions != nil { + // Setup OpenAPI security: all APIs will have the same authentication for now. + c.OpenAPIConfig.DefaultSecurity = []map[string][]string{} + keys := []string{} + for k := range *c.OpenAPIConfig.SecurityDefinitions { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + c.OpenAPIConfig.DefaultSecurity = append(c.OpenAPIConfig.DefaultSecurity, map[string][]string{k: {}}) + } + if c.OpenAPIConfig.CommonResponses == nil { + c.OpenAPIConfig.CommonResponses = map[int]spec.Response{} + } + if _, exists := c.OpenAPIConfig.CommonResponses[http.StatusUnauthorized]; !exists { + c.OpenAPIConfig.CommonResponses[http.StatusUnauthorized] = spec.Response{ + ResponseProps: spec.ResponseProps{ + Description: "Unauthorized", + }, + } + } + } + + // make sure we populate info, and info.version, if not manually set + if c.OpenAPIConfig.Info == nil { + c.OpenAPIConfig.Info = &spec.Info{} + } + if c.OpenAPIConfig.Info.Version == "" { + if c.Version != nil { + c.OpenAPIConfig.Info.Version = strings.Split(c.Version.String(), "-")[0] + } else { + c.OpenAPIConfig.Info.Version = "unversioned" + } + } + } + if c.DiscoveryAddresses == nil { + c.DiscoveryAddresses = discovery.DefaultAddresses{DefaultAddress: c.ExternalAddress} + } + + AuthorizeClientBearerToken(c.LoopbackClientConfig, &c.Authentication, &c.Authorization) + + if c.RequestInfoResolver == nil { + c.RequestInfoResolver = NewRequestInfoResolver(c) + } + + if c.EquivalentResourceRegistry == nil { + if c.RESTOptionsGetter == nil { + c.EquivalentResourceRegistry = runtime.NewEquivalentResourceRegistry() + } else { + c.EquivalentResourceRegistry = runtime.NewEquivalentResourceRegistryWithIdentity(func(groupResource schema.GroupResource) string { + // use the storage prefix as the key if possible + if opts, err := c.RESTOptionsGetter.GetRESTOptions(groupResource); err == nil { + return opts.ResourcePrefix + } + // otherwise return "" to use the default key (parent GV name) + return "" + }) + } + } + + return CompletedConfig{&completedConfig{c, informers}} +} + +// Complete fills in any fields not set that are required to have valid data and can be derived +// from other fields. If you're going to `ApplyOptions`, do that first. It's mutating the receiver. +func (c *RecommendedConfig) Complete() CompletedConfig { + return c.Config.Complete(c.SharedInformerFactory) +} + +// New creates a new server which logically combines the handling chain with the passed server. +// name is used to differentiate for logging. The handler chain in particular can be difficult as it starts delgating. +// delegationTarget may not be nil. +func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*GenericAPIServer, error) { + if c.Serializer == nil { + return nil, fmt.Errorf("Genericapiserver.New() called with config.Serializer == nil") + } + if c.LoopbackClientConfig == nil { + return nil, fmt.Errorf("Genericapiserver.New() called with config.LoopbackClientConfig == nil") + } + if c.EquivalentResourceRegistry == nil { + return nil, fmt.Errorf("Genericapiserver.New() called with config.EquivalentResourceRegistry == nil") + } + + handlerChainBuilder := func(handler http.Handler) http.Handler { + return c.BuildHandlerChainFunc(handler, c.Config) + } + apiServerHandler := NewAPIServerHandler(name, c.Serializer, handlerChainBuilder, delegationTarget.UnprotectedHandler()) + + s := &GenericAPIServer{ + discoveryAddresses: c.DiscoveryAddresses, + LoopbackClientConfig: c.LoopbackClientConfig, + legacyAPIGroupPrefixes: c.LegacyAPIGroupPrefixes, + admissionControl: c.AdmissionControl, + Serializer: c.Serializer, + AuditBackend: c.AuditBackend, + Authorizer: c.Authorization.Authorizer, + delegationTarget: delegationTarget, + EquivalentResourceRegistry: c.EquivalentResourceRegistry, + HandlerChainWaitGroup: c.HandlerChainWaitGroup, + + minRequestTimeout: time.Duration(c.MinRequestTimeout) * time.Second, + ShutdownTimeout: c.RequestTimeout, + ShutdownDelayDuration: c.ShutdownDelayDuration, + SecureServingInfo: c.SecureServing, + ExternalAddress: c.ExternalAddress, + + Handler: apiServerHandler, + + listedPathProvider: apiServerHandler, + + openAPIConfig: c.OpenAPIConfig, + + postStartHooks: map[string]postStartHookEntry{}, + preShutdownHooks: map[string]preShutdownHookEntry{}, + disabledPostStartHooks: c.DisabledPostStartHooks, + + healthzChecks: c.HealthzChecks, + livezChecks: c.LivezChecks, + readyzChecks: c.ReadyzChecks, + readinessStopCh: make(chan struct{}), + livezGracePeriod: c.LivezGracePeriod, + + DiscoveryGroupManager: discovery.NewRootAPIsHandler(c.DiscoveryAddresses, c.Serializer), + + maxRequestBodyBytes: c.MaxRequestBodyBytes, + livezClock: clock.RealClock{}, + + APIServerID: c.APIServerID, + StorageVersionManager: c.StorageVersionManager, + } + + for { + if c.JSONPatchMaxCopyBytes <= 0 { + break + } + existing := atomic.LoadInt64(&jsonpatch.AccumulatedCopySizeLimit) + if existing > 0 && existing < c.JSONPatchMaxCopyBytes { + break + } + if atomic.CompareAndSwapInt64(&jsonpatch.AccumulatedCopySizeLimit, existing, c.JSONPatchMaxCopyBytes) { + break + } + } + + // first add poststarthooks from delegated targets + for k, v := range delegationTarget.PostStartHooks() { + s.postStartHooks[k] = v + } + + for k, v := range delegationTarget.PreShutdownHooks() { + s.preShutdownHooks[k] = v + } + + // add poststarthooks that were preconfigured. Using the add method will give us an error if the same name has already been registered. + for name, preconfiguredPostStartHook := range c.PostStartHooks { + if err := s.AddPostStartHook(name, preconfiguredPostStartHook.hook); err != nil { + return nil, err + } + } + + genericApiServerHookName := "generic-apiserver-start-informers" + if c.SharedInformerFactory != nil { + if !s.isPostStartHookRegistered(genericApiServerHookName) { + err := s.AddPostStartHook(genericApiServerHookName, func(context PostStartHookContext) error { + c.SharedInformerFactory.Start(context.StopCh) + return nil + }) + if err != nil { + return nil, err + } + } + // TODO: Once we get rid of /healthz consider changing this to post-start-hook. + err := s.addReadyzChecks(healthz.NewInformerSyncHealthz(c.SharedInformerFactory)) + if err != nil { + return nil, err + } + } + + const priorityAndFairnessConfigConsumerHookName = "priority-and-fairness-config-consumer" + if s.isPostStartHookRegistered(priorityAndFairnessConfigConsumerHookName) { + } else if c.FlowControl != nil { + err := s.AddPostStartHook(priorityAndFairnessConfigConsumerHookName, func(context PostStartHookContext) error { + go c.FlowControl.MaintainObservations(context.StopCh) + go c.FlowControl.Run(context.StopCh) + return nil + }) + if err != nil { + return nil, err + } + // TODO(yue9944882): plumb pre-shutdown-hook for request-management system? + } else { + klog.V(3).Infof("Not requested to run hook %s", priorityAndFairnessConfigConsumerHookName) + } + + // Add PostStartHooks for maintaining the watermarks for the Priority-and-Fairness and the Max-in-Flight filters. + if c.FlowControl != nil { + const priorityAndFairnessFilterHookName = "priority-and-fairness-filter" + if !s.isPostStartHookRegistered(priorityAndFairnessFilterHookName) { + err := s.AddPostStartHook(priorityAndFairnessFilterHookName, func(context PostStartHookContext) error { + genericfilters.StartPriorityAndFairnessWatermarkMaintenance(context.StopCh) + return nil + }) + if err != nil { + return nil, err + } + } + } else { + const maxInFlightFilterHookName = "max-in-flight-filter" + if !s.isPostStartHookRegistered(maxInFlightFilterHookName) { + err := s.AddPostStartHook(maxInFlightFilterHookName, func(context PostStartHookContext) error { + genericfilters.StartMaxInFlightWatermarkMaintenance(context.StopCh) + return nil + }) + if err != nil { + return nil, err + } + } + } + + for _, delegateCheck := range delegationTarget.HealthzChecks() { + skip := false + for _, existingCheck := range c.HealthzChecks { + if existingCheck.Name() == delegateCheck.Name() { + skip = true + break + } + } + if skip { + continue + } + s.AddHealthChecks(delegateCheck) + } + + s.listedPathProvider = routes.ListedPathProviders{s.listedPathProvider, delegationTarget} + + installAPI(s, c.Config) + + // use the UnprotectedHandler from the delegation target to ensure that we don't attempt to double authenticator, authorize, + // or some other part of the filter chain in delegation cases. + if delegationTarget.UnprotectedHandler() == nil && c.EnableIndex { + s.Handler.NonGoRestfulMux.NotFoundHandler(routes.IndexLister{ + StatusCode: http.StatusNotFound, + PathProvider: s.listedPathProvider, + }) + } + + return s, nil +} + +func BuildHandlerChainWithStorageVersionPrecondition(apiHandler http.Handler, c *Config) http.Handler { + // WithStorageVersionPrecondition needs the WithRequestInfo to run first + handler := genericapifilters.WithStorageVersionPrecondition(apiHandler, c.StorageVersionManager, c.Serializer) + return DefaultBuildHandlerChain(handler, c) +} + +func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler { + handler := filterlatency.TrackCompleted(apiHandler) + handler = genericapifilters.WithAuthorization(handler, c.Authorization.Authorizer, c.Serializer) + handler = filterlatency.TrackStarted(handler, "authorization") + + if c.FlowControl != nil { + handler = filterlatency.TrackCompleted(handler) + handler = genericfilters.WithPriorityAndFairness(handler, c.LongRunningFunc, c.FlowControl) + handler = filterlatency.TrackStarted(handler, "priorityandfairness") + } else { + handler = genericfilters.WithMaxInFlightLimit(handler, c.MaxRequestsInFlight, c.MaxMutatingRequestsInFlight, c.LongRunningFunc) + } + + handler = filterlatency.TrackCompleted(handler) + handler = genericapifilters.WithImpersonation(handler, c.Authorization.Authorizer, c.Serializer) + handler = filterlatency.TrackStarted(handler, "impersonation") + + handler = filterlatency.TrackCompleted(handler) + handler = genericapifilters.WithAudit(handler, c.AuditBackend, c.AuditPolicyChecker, c.LongRunningFunc) + handler = filterlatency.TrackStarted(handler, "audit") + + failedHandler := genericapifilters.Unauthorized(c.Serializer) + failedHandler = genericapifilters.WithFailedAuthenticationAudit(failedHandler, c.AuditBackend, c.AuditPolicyChecker) + + failedHandler = filterlatency.TrackCompleted(failedHandler) + handler = filterlatency.TrackCompleted(handler) + handler = genericapifilters.WithAuthentication(handler, c.Authentication.Authenticator, failedHandler, c.Authentication.APIAudiences) + handler = filterlatency.TrackStarted(handler, "authentication") + + handler = genericfilters.WithCORS(handler, c.CorsAllowedOriginList, nil, nil, nil, "true") + handler = genericfilters.WithTimeoutForNonLongRunningRequests(handler, c.LongRunningFunc, c.RequestTimeout) + handler = genericfilters.WithWaitGroup(handler, c.LongRunningFunc, c.HandlerChainWaitGroup) + handler = genericapifilters.WithRequestInfo(handler, c.RequestInfoResolver) + if c.SecureServing != nil && !c.SecureServing.DisableHTTP2 && c.GoawayChance > 0 { + handler = genericfilters.WithProbabilisticGoaway(handler, c.GoawayChance) + } + handler = genericapifilters.WithAuditAnnotations(handler, c.AuditBackend, c.AuditPolicyChecker) + handler = genericapifilters.WithWarningRecorder(handler) + handler = genericapifilters.WithCacheControl(handler) + handler = genericapifilters.WithRequestReceivedTimestamp(handler) + handler = genericfilters.WithPanicRecovery(handler, c.RequestInfoResolver) + return handler +} + +func installAPI(s *GenericAPIServer, c *Config) { + if c.EnableIndex { + routes.Index{}.Install(s.listedPathProvider, s.Handler.NonGoRestfulMux) + } + if c.EnableProfiling { + routes.Profiling{}.Install(s.Handler.NonGoRestfulMux) + if c.EnableContentionProfiling { + goruntime.SetBlockProfileRate(1) + } + // so far, only logging related endpoints are considered valid to add for these debug flags. + routes.DebugFlags{}.Install(s.Handler.NonGoRestfulMux, "v", routes.StringFlagPutHandler(logs.GlogSetter)) + } + if c.EnableMetrics { + if c.EnableProfiling { + routes.MetricsWithReset{}.Install(s.Handler.NonGoRestfulMux) + } else { + routes.DefaultMetrics{}.Install(s.Handler.NonGoRestfulMux) + } + } + + routes.Version{Version: c.Version}.Install(s.Handler.GoRestfulContainer) + + if c.EnableDiscovery { + s.Handler.GoRestfulContainer.Add(s.DiscoveryGroupManager.WebService()) + } + if c.FlowControl != nil && feature.DefaultFeatureGate.Enabled(features.APIPriorityAndFairness) { + c.FlowControl.Install(s.Handler.NonGoRestfulMux) + } +} + +func NewRequestInfoResolver(c *Config) *apirequest.RequestInfoFactory { + apiPrefixes := sets.NewString(strings.Trim(APIGroupPrefix, "/")) // all possible API prefixes + legacyAPIPrefixes := sets.String{} // APIPrefixes that won't have groups (legacy) + for legacyAPIPrefix := range c.LegacyAPIGroupPrefixes { + apiPrefixes.Insert(strings.Trim(legacyAPIPrefix, "/")) + legacyAPIPrefixes.Insert(strings.Trim(legacyAPIPrefix, "/")) + } + + return &apirequest.RequestInfoFactory{ + APIPrefixes: apiPrefixes, + GrouplessAPIPrefixes: legacyAPIPrefixes, + } +} + +func (s *SecureServingInfo) HostPort() (string, int, error) { + if s == nil || s.Listener == nil { + return "", 0, fmt.Errorf("no listener found") + } + addr := s.Listener.Addr().String() + host, portStr, err := net.SplitHostPort(addr) + if err != nil { + return "", 0, fmt.Errorf("failed to get port from listener address %q: %v", addr, err) + } + port, err := utilsnet.ParsePort(portStr, true) + if err != nil { + return "", 0, fmt.Errorf("invalid non-numeric port %q", portStr) + } + return host, port, nil +} + +// AuthorizeClientBearerToken wraps the authenticator and authorizer in loopback authentication logic +// if the loopback client config is specified AND it has a bearer token. Note that if either authn or +// authz is nil, this function won't add a token authenticator or authorizer. +func AuthorizeClientBearerToken(loopback *restclient.Config, authn *AuthenticationInfo, authz *AuthorizationInfo) { + if loopback == nil || len(loopback.BearerToken) == 0 { + return + } + if authn == nil || authz == nil { + // prevent nil pointer panic + return + } + if authn.Authenticator == nil || authz.Authorizer == nil { + // authenticator or authorizer might be nil if we want to bypass authz/authn + // and we also do nothing in this case. + return + } + + privilegedLoopbackToken := loopback.BearerToken + var uid = uuid.New().String() + tokens := make(map[string]*user.DefaultInfo) + tokens[privilegedLoopbackToken] = &user.DefaultInfo{ + Name: user.APIServerUser, + UID: uid, + Groups: []string{user.SystemPrivilegedGroup}, + } + + tokenAuthenticator := authenticatorfactory.NewFromTokens(tokens) + authn.Authenticator = authenticatorunion.New(tokenAuthenticator, authn.Authenticator) + + tokenAuthorizer := authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup) + authz.Authorizer = authorizerunion.New(tokenAuthorizer, authz.Authorizer) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/config_selfclient.go b/vendor/k8s.io/apiserver/pkg/server/config_selfclient.go new file mode 100644 index 000000000..f2c2de9b3 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/config_selfclient.go @@ -0,0 +1,97 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "fmt" + "net" + + restclient "k8s.io/client-go/rest" + netutils "k8s.io/utils/net" +) + +// LoopbackClientServerNameOverride is passed to the apiserver from the loopback client in order to +// select the loopback certificate via SNI if TLS is used. +const LoopbackClientServerNameOverride = "apiserver-loopback-client" + +func (s *SecureServingInfo) NewClientConfig(caCert []byte) (*restclient.Config, error) { + if s == nil || (s.Cert == nil && len(s.SNICerts) == 0) { + return nil, nil + } + + host, port, err := LoopbackHostPort(s.Listener.Addr().String()) + if err != nil { + return nil, err + } + + return &restclient.Config{ + // Do not limit loopback client QPS. + QPS: -1, + Host: "https://" + net.JoinHostPort(host, port), + // override the ServerName to select our loopback certificate via SNI. This name is also + // used by the client to compare the returns server certificate against. + TLSClientConfig: restclient.TLSClientConfig{ + CAData: caCert, + }, + }, nil +} + +func (s *SecureServingInfo) NewLoopbackClientConfig(token string, loopbackCert []byte) (*restclient.Config, error) { + c, err := s.NewClientConfig(loopbackCert) + if err != nil || c == nil { + return c, err + } + + c.BearerToken = token + c.TLSClientConfig.ServerName = LoopbackClientServerNameOverride + + return c, nil +} + +// LoopbackHostPort returns the host and port loopback REST clients should use +// to contact the server. +func LoopbackHostPort(bindAddress string) (string, string, error) { + host, port, err := net.SplitHostPort(bindAddress) + if err != nil { + // should never happen + return "", "", fmt.Errorf("invalid server bind address: %q", bindAddress) + } + + isIPv6 := netutils.IsIPv6String(host) + + // Value is expected to be an IP or DNS name, not "0.0.0.0". + if host == "0.0.0.0" || host == "::" { + // Get ip of local interface, but fall back to "localhost". + // Note that "localhost" is resolved with the external nameserver first with Go's stdlib. + // So if localhost. resolves, we don't get a 127.0.0.1 as expected. + host = getLoopbackAddress(isIPv6) + } + return host, port, nil +} + +// getLoopbackAddress returns the ip address of local loopback interface. If any error occurs or loopback interface is not found, will fall back to "localhost" +func getLoopbackAddress(wantIPv6 bool) string { + addrs, err := net.InterfaceAddrs() + if err == nil { + for _, address := range addrs { + if ipnet, ok := address.(*net.IPNet); ok && ipnet.IP.IsLoopback() && wantIPv6 == netutils.IsIPv6(ipnet.IP) { + return ipnet.IP.String() + } + } + } + return "localhost" +} diff --git a/vendor/k8s.io/apiserver/pkg/server/deprecated_insecure_serving.go b/vendor/k8s.io/apiserver/pkg/server/deprecated_insecure_serving.go new file mode 100644 index 000000000..655543a25 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/deprecated_insecure_serving.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "net" + "net/http" + "time" + + "k8s.io/klog/v2" + + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/client-go/rest" +) + +// DeprecatedInsecureServingInfo is the main context object for the insecure http server. +// HTTP does NOT include authentication or authorization. +// You shouldn't be using this. It makes sig-auth sad. +type DeprecatedInsecureServingInfo struct { + // Listener is the secure server network listener. + Listener net.Listener + // optional server name for log messages + Name string +} + +// Serve starts an insecure http server with the given handler. It fails only if +// the initial listen call fails. It does not block. +func (s *DeprecatedInsecureServingInfo) Serve(handler http.Handler, shutdownTimeout time.Duration, stopCh <-chan struct{}) error { + insecureServer := &http.Server{ + Addr: s.Listener.Addr().String(), + Handler: handler, + MaxHeaderBytes: 1 << 20, + } + + if len(s.Name) > 0 { + klog.Infof("Serving %s insecurely on %s", s.Name, s.Listener.Addr()) + } else { + klog.Infof("Serving insecurely on %s", s.Listener.Addr()) + } + _, err := RunServer(insecureServer, s.Listener, shutdownTimeout, stopCh) + // NOTE: we do not handle stoppedCh returned by RunServer for graceful termination here + return err +} + +func (s *DeprecatedInsecureServingInfo) NewLoopbackClientConfig() (*rest.Config, error) { + if s == nil { + return nil, nil + } + + host, port, err := LoopbackHostPort(s.Listener.Addr().String()) + if err != nil { + return nil, err + } + + return &rest.Config{ + Host: "http://" + net.JoinHostPort(host, port), + // Increase QPS limits. The client is currently passed to all admission plugins, + // and those can be throttled in case of higher load on apiserver - see #22340 and #22422 + // for more details. Once #22422 is fixed, we may want to remove it. + QPS: 50, + Burst: 100, + }, nil +} + +// InsecureSuperuser implements authenticator.Request to always return a superuser. +// This is functionally equivalent to skipping authentication and authorization, +// but allows apiserver code to stop special-casing a nil user to skip authorization checks. +type InsecureSuperuser struct{} + +func (InsecureSuperuser) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { + auds, _ := authenticator.AudiencesFrom(req.Context()) + return &authenticator.Response{ + User: &user.DefaultInfo{ + Name: "system:unsecured", + Groups: []string{user.SystemPrivilegedGroup, user.AllAuthenticated}, + }, + Audiences: auds, + }, true, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/doc.go b/vendor/k8s.io/apiserver/pkg/server/doc.go new file mode 100644 index 000000000..bc671eae8 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package server contains the plumbing to create kubernetes-like API server command. +package server // import "k8s.io/apiserver/pkg/server" diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/cert_key.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/cert_key.go new file mode 100644 index 000000000..114002b1a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/cert_key.go @@ -0,0 +1,74 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "bytes" +) + +// CertKeyContentProvider provides a certificate and matching private key +type CertKeyContentProvider interface { + // Name is just an identifier + Name() string + // CurrentCertKeyContent provides cert and key byte content + CurrentCertKeyContent() ([]byte, []byte) +} + +// SNICertKeyContentProvider provides a certificate and matching private key as well as optional explicit names +type SNICertKeyContentProvider interface { + CertKeyContentProvider + // SNINames provides names used for SNI. May return nil. + SNINames() []string +} + +// certKeyContent holds the content for the cert and key +type certKeyContent struct { + cert []byte + key []byte +} + +func (c *certKeyContent) Equal(rhs *certKeyContent) bool { + if c == nil || rhs == nil { + return c == rhs + } + + return bytes.Equal(c.key, rhs.key) && bytes.Equal(c.cert, rhs.cert) +} + +// sniCertKeyContent holds the content for the cert and key as well as any explicit names +type sniCertKeyContent struct { + certKeyContent + sniNames []string +} + +func (c *sniCertKeyContent) Equal(rhs *sniCertKeyContent) bool { + if c == nil || rhs == nil { + return c == rhs + } + + if len(c.sniNames) != len(rhs.sniNames) { + return false + } + + for i := range c.sniNames { + if c.sniNames[i] != rhs.sniNames[i] { + return false + } + } + + return c.certKeyContent.Equal(&rhs.certKeyContent) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/client_ca.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/client_ca.go new file mode 100644 index 000000000..4348fa387 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/client_ca.go @@ -0,0 +1,81 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "bytes" + "crypto/x509" +) + +// CAContentProvider provides ca bundle byte content +type CAContentProvider interface { + // Name is just an identifier + Name() string + // CurrentCABundleContent provides ca bundle byte content. Errors can be contained to the controllers initializing + // the value. By the time you get here, you should always be returning a value that won't fail. + CurrentCABundleContent() []byte + // VerifyOptions provides VerifyOptions for authenticators + VerifyOptions() (x509.VerifyOptions, bool) +} + +// dynamicCertificateContent holds the content that overrides the baseTLSConfig +type dynamicCertificateContent struct { + // clientCA holds the content for the clientCA bundle + clientCA caBundleContent + servingCert certKeyContent + sniCerts []sniCertKeyContent +} + +// caBundleContent holds the content for the clientCA bundle. Wrapping the bytes makes the Equals work nicely with the +// method receiver. +type caBundleContent struct { + caBundle []byte +} + +func (c *dynamicCertificateContent) Equal(rhs *dynamicCertificateContent) bool { + if c == nil || rhs == nil { + return c == rhs + } + + if !c.clientCA.Equal(&rhs.clientCA) { + return false + } + + if !c.servingCert.Equal(&rhs.servingCert) { + return false + } + + if len(c.sniCerts) != len(rhs.sniCerts) { + return false + } + + for i := range c.sniCerts { + if !c.sniCerts[i].Equal(&rhs.sniCerts[i]) { + return false + } + } + + return true +} + +func (c *caBundleContent) Equal(rhs *caBundleContent) bool { + if c == nil || rhs == nil { + return c == rhs + } + + return bytes.Equal(c.caBundle, rhs.caBundle) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go new file mode 100644 index 000000000..ec0fc5096 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go @@ -0,0 +1,274 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "bytes" + "crypto/x509" + "fmt" + "sync/atomic" + "time" + + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + corev1informers "k8s.io/client-go/informers/core/v1" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" +) + +// ConfigMapCAController provies a CAContentProvider that can dynamically react to configmap changes +// It also fulfills the authenticator interface to provide verifyoptions +type ConfigMapCAController struct { + name string + + configmapLister corev1listers.ConfigMapLister + configmapNamespace string + configmapName string + configmapKey string + // configMapInformer is tracked so that we can start these on Run + configMapInformer cache.SharedIndexInformer + + // caBundle is a caBundleAndVerifier that contains the last read, non-zero length content of the file + caBundle atomic.Value + + listeners []Listener + + queue workqueue.RateLimitingInterface + // preRunCaches are the caches to sync before starting the work of this control loop + preRunCaches []cache.InformerSynced +} + +var _ Notifier = &ConfigMapCAController{} +var _ CAContentProvider = &ConfigMapCAController{} +var _ ControllerRunner = &ConfigMapCAController{} + +// NewDynamicCAFromConfigMapController returns a CAContentProvider based on a configmap that automatically reloads content. +// It is near-realtime via an informer. +func NewDynamicCAFromConfigMapController(purpose, namespace, name, key string, kubeClient kubernetes.Interface) (*ConfigMapCAController, error) { + if len(purpose) == 0 { + return nil, fmt.Errorf("missing purpose for ca bundle") + } + if len(namespace) == 0 { + return nil, fmt.Errorf("missing namespace for ca bundle") + } + if len(name) == 0 { + return nil, fmt.Errorf("missing name for ca bundle") + } + if len(key) == 0 { + return nil, fmt.Errorf("missing key for ca bundle") + } + caContentName := fmt.Sprintf("%s::%s::%s::%s", purpose, namespace, name, key) + + // we construct our own informer because we need such a small subset of the information available. Just one namespace. + uncastConfigmapInformer := corev1informers.NewFilteredConfigMapInformer(kubeClient, namespace, 12*time.Hour, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, func(listOptions *v1.ListOptions) { + listOptions.FieldSelector = fields.OneTermEqualSelector("metadata.name", name).String() + }) + + configmapLister := corev1listers.NewConfigMapLister(uncastConfigmapInformer.GetIndexer()) + + c := &ConfigMapCAController{ + name: caContentName, + configmapNamespace: namespace, + configmapName: name, + configmapKey: key, + configmapLister: configmapLister, + configMapInformer: uncastConfigmapInformer, + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("DynamicConfigMapCABundle-%s", purpose)), + preRunCaches: []cache.InformerSynced{uncastConfigmapInformer.HasSynced}, + } + + uncastConfigmapInformer.AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: func(obj interface{}) bool { + if cast, ok := obj.(*corev1.ConfigMap); ok { + return cast.Name == c.configmapName && cast.Namespace == c.configmapNamespace + } + if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok { + if cast, ok := tombstone.Obj.(*corev1.ConfigMap); ok { + return cast.Name == c.configmapName && cast.Namespace == c.configmapNamespace + } + } + return true // always return true just in case. The checks are fairly cheap + }, + Handler: cache.ResourceEventHandlerFuncs{ + // we have a filter, so any time we're called, we may as well queue. We only ever check one configmap + // so we don't have to be choosy about our key. + AddFunc: func(obj interface{}) { + c.queue.Add(c.keyFn()) + }, + UpdateFunc: func(oldObj, newObj interface{}) { + c.queue.Add(c.keyFn()) + }, + DeleteFunc: func(obj interface{}) { + c.queue.Add(c.keyFn()) + }, + }, + }) + + return c, nil +} + +func (c *ConfigMapCAController) keyFn() string { + // this format matches DeletionHandlingMetaNamespaceKeyFunc for our single key + return c.configmapNamespace + "/" + c.configmapName +} + +// AddListener adds a listener to be notified when the CA content changes. +func (c *ConfigMapCAController) AddListener(listener Listener) { + c.listeners = append(c.listeners, listener) +} + +// loadCABundle determines the next set of content for the file. +func (c *ConfigMapCAController) loadCABundle() error { + configMap, err := c.configmapLister.ConfigMaps(c.configmapNamespace).Get(c.configmapName) + if err != nil { + return err + } + caBundle := configMap.Data[c.configmapKey] + if len(caBundle) == 0 { + return fmt.Errorf("missing content for CA bundle %q", c.Name()) + } + + // check to see if we have a change. If the values are the same, do nothing. + if !c.hasCAChanged([]byte(caBundle)) { + return nil + } + + caBundleAndVerifier, err := newCABundleAndVerifier(c.Name(), []byte(caBundle)) + if err != nil { + return err + } + c.caBundle.Store(caBundleAndVerifier) + + for _, listener := range c.listeners { + listener.Enqueue() + } + + return nil +} + +// hasCAChanged returns true if the caBundle is different than the current. +func (c *ConfigMapCAController) hasCAChanged(caBundle []byte) bool { + uncastExisting := c.caBundle.Load() + if uncastExisting == nil { + return true + } + + // check to see if we have a change. If the values are the same, do nothing. + existing, ok := uncastExisting.(*caBundleAndVerifier) + if !ok { + return true + } + if !bytes.Equal(existing.caBundle, caBundle) { + return true + } + + return false +} + +// RunOnce runs a single sync loop +func (c *ConfigMapCAController) RunOnce() error { + // Ignore the error when running once because when using a dynamically loaded ca file, because we think it's better to have nothing for + // a brief time than completely crash. If crashing is necessary, higher order logic like a healthcheck and cause failures. + _ = c.loadCABundle() + return nil +} + +// Run starts the kube-apiserver and blocks until stopCh is closed. +func (c *ConfigMapCAController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting %s", c.name) + defer klog.Infof("Shutting down %s", c.name) + + // we have a personal informer that is narrowly scoped, start it. + go c.configMapInformer.Run(stopCh) + + // wait for your secondary caches to fill before starting your work + if !cache.WaitForNamedCacheSync(c.name, stopCh, c.preRunCaches...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + // start timer that rechecks every minute, just in case. this also serves to prime the controller quickly. + go wait.PollImmediateUntil(FileRefreshDuration, func() (bool, error) { + c.queue.Add(workItemKey) + return false, nil + }, stopCh) + + <-stopCh +} + +func (c *ConfigMapCAController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *ConfigMapCAController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.loadCABundle() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// Name is just an identifier +func (c *ConfigMapCAController) Name() string { + return c.name +} + +// CurrentCABundleContent provides ca bundle byte content +func (c *ConfigMapCAController) CurrentCABundleContent() []byte { + uncastObj := c.caBundle.Load() + if uncastObj == nil { + return nil // this can happen if we've been unable load data from the apiserver for some reason + } + + return c.caBundle.Load().(*caBundleAndVerifier).caBundle +} + +// VerifyOptions provides verifyoptions compatible with authenticators +func (c *ConfigMapCAController) VerifyOptions() (x509.VerifyOptions, bool) { + uncastObj := c.caBundle.Load() + if uncastObj == nil { + // This can happen if we've been unable load data from the apiserver for some reason. + // In this case, we should not accept any connections on the basis of this ca bundle. + return x509.VerifyOptions{}, false + } + + return uncastObj.(*caBundleAndVerifier).verifyOptions, true +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go new file mode 100644 index 000000000..756289a80 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go @@ -0,0 +1,255 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "bytes" + "crypto/x509" + "fmt" + "io/ioutil" + "sync/atomic" + "time" + + "k8s.io/client-go/util/cert" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" +) + +// FileRefreshDuration is exposed so that integration tests can crank up the reload speed. +var FileRefreshDuration = 1 * time.Minute + +// Listener is an interface to use to notify interested parties of a change. +type Listener interface { + // Enqueue should be called when an input may have changed + Enqueue() +} + +// Notifier is a way to add listeners +type Notifier interface { + // AddListener is adds a listener to be notified of potential input changes + AddListener(listener Listener) +} + +// ControllerRunner is a generic interface for starting a controller +type ControllerRunner interface { + // RunOnce runs the sync loop a single time. This useful for synchronous priming + RunOnce() error + + // Run should be called a go .Run + Run(workers int, stopCh <-chan struct{}) +} + +// DynamicFileCAContent provies a CAContentProvider that can dynamically react to new file content +// It also fulfills the authenticator interface to provide verifyoptions +type DynamicFileCAContent struct { + name string + + // filename is the name the file to read. + filename string + + // caBundle is a caBundleAndVerifier that contains the last read, non-zero length content of the file + caBundle atomic.Value + + listeners []Listener + + // queue only ever has one item, but it has nice error handling backoff/retry semantics + queue workqueue.RateLimitingInterface +} + +var _ Notifier = &DynamicFileCAContent{} +var _ CAContentProvider = &DynamicFileCAContent{} +var _ ControllerRunner = &DynamicFileCAContent{} + +type caBundleAndVerifier struct { + caBundle []byte + verifyOptions x509.VerifyOptions +} + +// NewDynamicCAContentFromFile returns a CAContentProvider based on a filename that automatically reloads content +func NewDynamicCAContentFromFile(purpose, filename string) (*DynamicFileCAContent, error) { + if len(filename) == 0 { + return nil, fmt.Errorf("missing filename for ca bundle") + } + name := fmt.Sprintf("%s::%s", purpose, filename) + + ret := &DynamicFileCAContent{ + name: name, + filename: filename, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("DynamicCABundle-%s", purpose)), + } + if err := ret.loadCABundle(); err != nil { + return nil, err + } + + return ret, nil +} + +// AddListener adds a listener to be notified when the CA content changes. +func (c *DynamicFileCAContent) AddListener(listener Listener) { + c.listeners = append(c.listeners, listener) +} + +// loadCABundle determines the next set of content for the file. +func (c *DynamicFileCAContent) loadCABundle() error { + caBundle, err := ioutil.ReadFile(c.filename) + if err != nil { + return err + } + if len(caBundle) == 0 { + return fmt.Errorf("missing content for CA bundle %q", c.Name()) + } + + // check to see if we have a change. If the values are the same, do nothing. + if !c.hasCAChanged(caBundle) { + return nil + } + + caBundleAndVerifier, err := newCABundleAndVerifier(c.Name(), caBundle) + if err != nil { + return err + } + c.caBundle.Store(caBundleAndVerifier) + klog.V(2).Infof("Loaded a new CA Bundle and Verifier for %q", c.Name()) + + for _, listener := range c.listeners { + listener.Enqueue() + } + + return nil +} + +// hasCAChanged returns true if the caBundle is different than the current. +func (c *DynamicFileCAContent) hasCAChanged(caBundle []byte) bool { + uncastExisting := c.caBundle.Load() + if uncastExisting == nil { + return true + } + + // check to see if we have a change. If the values are the same, do nothing. + existing, ok := uncastExisting.(*caBundleAndVerifier) + if !ok { + return true + } + if !bytes.Equal(existing.caBundle, caBundle) { + return true + } + + return false +} + +// RunOnce runs a single sync loop +func (c *DynamicFileCAContent) RunOnce() error { + return c.loadCABundle() +} + +// Run starts the kube-apiserver and blocks until stopCh is closed. +func (c *DynamicFileCAContent) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting %s", c.name) + defer klog.Infof("Shutting down %s", c.name) + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + // start timer that rechecks every minute, just in case. this also serves to prime the controller quickly. + go wait.PollImmediateUntil(FileRefreshDuration, func() (bool, error) { + c.queue.Add(workItemKey) + return false, nil + }, stopCh) + + // TODO this can be wired to an fsnotifier as well. + + <-stopCh +} + +func (c *DynamicFileCAContent) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *DynamicFileCAContent) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.loadCABundle() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// Name is just an identifier +func (c *DynamicFileCAContent) Name() string { + return c.name +} + +// CurrentCABundleContent provides ca bundle byte content +func (c *DynamicFileCAContent) CurrentCABundleContent() (cabundle []byte) { + return c.caBundle.Load().(*caBundleAndVerifier).caBundle +} + +// VerifyOptions provides verifyoptions compatible with authenticators +func (c *DynamicFileCAContent) VerifyOptions() (x509.VerifyOptions, bool) { + uncastObj := c.caBundle.Load() + if uncastObj == nil { + return x509.VerifyOptions{}, false + } + + return uncastObj.(*caBundleAndVerifier).verifyOptions, true +} + +// newVerifyOptions creates a new verification func from a file. It reads the content and then fails. +// It will return a nil function if you pass an empty CA file. +func newCABundleAndVerifier(name string, caBundle []byte) (*caBundleAndVerifier, error) { + if len(caBundle) == 0 { + return nil, fmt.Errorf("missing content for CA bundle %q", name) + } + + // Wrap with an x509 verifier + var err error + verifyOptions := defaultVerifyOptions() + verifyOptions.Roots, err = cert.NewPoolFromBytes(caBundle) + if err != nil { + return nil, fmt.Errorf("error loading CA bundle for %q: %v", name, err) + } + + return &caBundleAndVerifier{ + caBundle: caBundle, + verifyOptions: verifyOptions, + }, nil +} + +// defaultVerifyOptions returns VerifyOptions that use the system root certificates, current time, +// and requires certificates to be valid for client auth (x509.ExtKeyUsageClientAuth) +func defaultVerifyOptions() x509.VerifyOptions { + return x509.VerifyOptions{ + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go new file mode 100644 index 000000000..3b7f34738 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go @@ -0,0 +1,180 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "crypto/tls" + "fmt" + "io/ioutil" + "sync/atomic" + "time" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" +) + +// DynamicCertKeyPairContent provides a CertKeyContentProvider that can dynamically react to new file content +type DynamicCertKeyPairContent struct { + name string + + // certFile is the name of the certificate file to read. + certFile string + // keyFile is the name of the key file to read. + keyFile string + + // servingCert is a certKeyContent that contains the last read, non-zero length content of the key and cert + certKeyPair atomic.Value + + listeners []Listener + + // queue only ever has one item, but it has nice error handling backoff/retry semantics + queue workqueue.RateLimitingInterface +} + +var _ Notifier = &DynamicCertKeyPairContent{} +var _ CertKeyContentProvider = &DynamicCertKeyPairContent{} +var _ ControllerRunner = &DynamicCertKeyPairContent{} + +// NewDynamicServingContentFromFiles returns a dynamic CertKeyContentProvider based on a cert and key filename +func NewDynamicServingContentFromFiles(purpose, certFile, keyFile string) (*DynamicCertKeyPairContent, error) { + if len(certFile) == 0 || len(keyFile) == 0 { + return nil, fmt.Errorf("missing filename for serving cert") + } + name := fmt.Sprintf("%s::%s::%s", purpose, certFile, keyFile) + + ret := &DynamicCertKeyPairContent{ + name: name, + certFile: certFile, + keyFile: keyFile, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("DynamicCABundle-%s", purpose)), + } + if err := ret.loadCertKeyPair(); err != nil { + return nil, err + } + + return ret, nil +} + +// AddListener adds a listener to be notified when the serving cert content changes. +func (c *DynamicCertKeyPairContent) AddListener(listener Listener) { + c.listeners = append(c.listeners, listener) +} + +// loadServingCert determines the next set of content for the file. +func (c *DynamicCertKeyPairContent) loadCertKeyPair() error { + cert, err := ioutil.ReadFile(c.certFile) + if err != nil { + return err + } + key, err := ioutil.ReadFile(c.keyFile) + if err != nil { + return err + } + if len(cert) == 0 || len(key) == 0 { + return fmt.Errorf("missing content for serving cert %q", c.Name()) + } + + // Ensure that the key matches the cert and both are valid + _, err = tls.X509KeyPair(cert, key) + if err != nil { + return err + } + + newCertKey := &certKeyContent{ + cert: cert, + key: key, + } + + // check to see if we have a change. If the values are the same, do nothing. + existing, ok := c.certKeyPair.Load().(*certKeyContent) + if ok && existing != nil && existing.Equal(newCertKey) { + return nil + } + + c.certKeyPair.Store(newCertKey) + klog.V(2).Infof("Loaded a new cert/key pair for %q", c.Name()) + + for _, listener := range c.listeners { + listener.Enqueue() + } + + return nil +} + +// RunOnce runs a single sync loop +func (c *DynamicCertKeyPairContent) RunOnce() error { + return c.loadCertKeyPair() +} + +// Run starts the controller and blocks until stopCh is closed. +func (c *DynamicCertKeyPairContent) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting %s", c.name) + defer klog.Infof("Shutting down %s", c.name) + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + // start timer that rechecks every minute, just in case. this also serves to prime the controller quickly. + go wait.PollImmediateUntil(FileRefreshDuration, func() (bool, error) { + c.queue.Add(workItemKey) + return false, nil + }, stopCh) + + // TODO this can be wired to an fsnotifier as well. + + <-stopCh +} + +func (c *DynamicCertKeyPairContent) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *DynamicCertKeyPairContent) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.loadCertKeyPair() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// Name is just an identifier +func (c *DynamicCertKeyPairContent) Name() string { + return c.name +} + +// CurrentCertKeyContent provides cert and key byte content +func (c *DynamicCertKeyPairContent) CurrentCertKeyContent() ([]byte, []byte) { + certKeyContent := c.certKeyPair.Load().(*certKeyContent) + return certKeyContent.cert, certKeyContent.key +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_sni_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_sni_content.go new file mode 100644 index 000000000..161fa1ca7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_sni_content.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +// DynamicFileSNIContent provides a SNICertKeyContentProvider that can dynamically react to new file content +type DynamicFileSNIContent struct { + *DynamicCertKeyPairContent + sniNames []string +} + +var _ Notifier = &DynamicFileSNIContent{} +var _ SNICertKeyContentProvider = &DynamicFileSNIContent{} +var _ ControllerRunner = &DynamicFileSNIContent{} + +// NewDynamicSNIContentFromFiles returns a dynamic SNICertKeyContentProvider based on a cert and key filename and explicit names +func NewDynamicSNIContentFromFiles(purpose, certFile, keyFile string, sniNames ...string) (*DynamicFileSNIContent, error) { + servingContent, err := NewDynamicServingContentFromFiles(purpose, certFile, keyFile) + if err != nil { + return nil, err + } + + ret := &DynamicFileSNIContent{ + DynamicCertKeyPairContent: servingContent, + sniNames: sniNames, + } + if err := ret.loadCertKeyPair(); err != nil { + return nil, err + } + + return ret, nil +} + +// SNINames returns explicitly set SNI names for the certificate. These are not dynamic. +func (c *DynamicFileSNIContent) SNINames() []string { + return c.sniNames +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/named_certificates.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/named_certificates.go new file mode 100644 index 000000000..8f55edec4 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/named_certificates.go @@ -0,0 +1,93 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/klog/v2" +) + +// BuildNamedCertificates returns a map of *tls.Certificate by name. It's +// suitable for use in tls.Config#NamedCertificates. Returns an error if any of the certs +// is invalid. Returns nil if len(certs) == 0 +func (c *DynamicServingCertificateController) BuildNamedCertificates(sniCerts []sniCertKeyContent) (map[string]*tls.Certificate, error) { + nameToCertificate := map[string]*tls.Certificate{} + byNameExplicit := map[string]*tls.Certificate{} + + // Iterate backwards so that earlier certs take precedence in the names map + for i := len(sniCerts) - 1; i >= 0; i-- { + cert, err := tls.X509KeyPair(sniCerts[i].cert, sniCerts[i].key) + if err != nil { + return nil, fmt.Errorf("invalid SNI cert keypair [%d/%q]: %v", i, c.sniCerts[i].Name(), err) + } + + // error is not possible given above call to X509KeyPair + x509Cert, _ := x509.ParseCertificate(cert.Certificate[0]) + + names := sniCerts[i].sniNames + for _, name := range names { + byNameExplicit[name] = &cert + } + + klog.V(2).Infof("loaded SNI cert [%d/%q]: %s", i, c.sniCerts[i].Name(), GetHumanCertDetail(x509Cert)) + if c.eventRecorder != nil { + c.eventRecorder.Eventf(&corev1.ObjectReference{Name: c.sniCerts[i].Name()}, nil, corev1.EventTypeWarning, "TLSConfigChanged", "SNICertificateReload", "loaded SNI cert [%d/%q]: %s with explicit names %v", i, c.sniCerts[i].Name(), GetHumanCertDetail(x509Cert), names) + } + + if len(names) == 0 { + names = getCertificateNames(x509Cert) + for _, name := range names { + nameToCertificate[name] = &cert + } + } + } + + // Explicitly set names must override + for k, v := range byNameExplicit { + nameToCertificate[k] = v + } + + return nameToCertificate, nil +} + +// getCertificateNames returns names for an x509.Certificate. The names are +// suitable for use in tls.Config#NamedCertificates. +func getCertificateNames(cert *x509.Certificate) []string { + var names []string + + cn := cert.Subject.CommonName + cnIsIP := net.ParseIP(cn) != nil + cnIsValidDomain := cn == "*" || len(validation.IsDNS1123Subdomain(strings.TrimPrefix(cn, "*."))) == 0 + // don't use the CN if it is a valid IP because our IP serving detection may unexpectedly use it to terminate the connection. + if !cnIsIP && cnIsValidDomain { + names = append(names, cn) + } + for _, san := range cert.DNSNames { + names = append(names, san) + } + // intentionally all IPs in the cert are ignored as SNI forbids passing IPs + // to select a cert. Before go 1.6 the tls happily passed IPs as SNI values. + + return names +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/static_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/static_content.go new file mode 100644 index 000000000..c877dfe6c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/static_content.go @@ -0,0 +1,114 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "crypto/tls" + "crypto/x509" +) + +type staticCAContent struct { + name string + caBundle *caBundleAndVerifier +} + +var _ CAContentProvider = &staticCAContent{} + +// NewStaticCAContent returns a CAContentProvider that always returns the same value +func NewStaticCAContent(name string, caBundle []byte) (CAContentProvider, error) { + caBundleAndVerifier, err := newCABundleAndVerifier(name, caBundle) + if err != nil { + return nil, err + } + + return &staticCAContent{ + name: name, + caBundle: caBundleAndVerifier, + }, nil +} + +// Name is just an identifier +func (c *staticCAContent) Name() string { + return c.name +} + +// CurrentCABundleContent provides ca bundle byte content +func (c *staticCAContent) CurrentCABundleContent() (cabundle []byte) { + return c.caBundle.caBundle +} + +func (c *staticCAContent) VerifyOptions() (x509.VerifyOptions, bool) { + return c.caBundle.verifyOptions, true +} + +type staticCertKeyContent struct { + name string + cert []byte + key []byte +} + +type staticSNICertKeyContent struct { + staticCertKeyContent + sniNames []string +} + +// NewStaticCertKeyContent returns a CertKeyContentProvider that always returns the same value +func NewStaticCertKeyContent(name string, cert, key []byte) (CertKeyContentProvider, error) { + // Ensure that the key matches the cert and both are valid + _, err := tls.X509KeyPair(cert, key) + if err != nil { + return nil, err + } + + return &staticCertKeyContent{ + name: name, + cert: cert, + key: key, + }, nil +} + +// NewStaticSNICertKeyContent returns a SNICertKeyContentProvider that always returns the same value +func NewStaticSNICertKeyContent(name string, cert, key []byte, sniNames ...string) (SNICertKeyContentProvider, error) { + // Ensure that the key matches the cert and both are valid + _, err := tls.X509KeyPair(cert, key) + if err != nil { + return nil, err + } + + return &staticSNICertKeyContent{ + staticCertKeyContent: staticCertKeyContent{ + name: name, + cert: cert, + key: key, + }, + sniNames: sniNames, + }, nil +} + +// Name is just an identifier +func (c *staticCertKeyContent) Name() string { + return c.name +} + +// CurrentCertKeyContent provides cert and key content +func (c *staticCertKeyContent) CurrentCertKeyContent() ([]byte, []byte) { + return c.cert, c.key +} + +func (c *staticSNICertKeyContent) SNINames() []string { + return c.sniNames +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/tlsconfig.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/tlsconfig.go new file mode 100644 index 000000000..f637f3233 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/tlsconfig.go @@ -0,0 +1,284 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "net" + "sync/atomic" + "time" + + corev1 "k8s.io/api/core/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/events" + "k8s.io/client-go/util/cert" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" +) + +const workItemKey = "key" + +// DynamicServingCertificateController dynamically loads certificates and provides a golang tls compatible dynamic GetCertificate func. +type DynamicServingCertificateController struct { + // baseTLSConfig is the static portion of the tlsConfig for serving to clients. It is copied and the copy is mutated + // based on the dynamic cert state. + baseTLSConfig *tls.Config + + // clientCA provides the very latest content of the ca bundle + clientCA CAContentProvider + // servingCert provides the very latest content of the default serving certificate + servingCert CertKeyContentProvider + // sniCerts are a list of CertKeyContentProvider with associated names used for SNI + sniCerts []SNICertKeyContentProvider + + // currentlyServedContent holds the original bytes that we are serving. This is used to decide if we need to set a + // new atomic value. The types used for efficient TLSConfig preclude using the processed value. + currentlyServedContent *dynamicCertificateContent + // currentServingTLSConfig holds a *tls.Config that will be used to serve requests + currentServingTLSConfig atomic.Value + + // queue only ever has one item, but it has nice error handling backoff/retry semantics + queue workqueue.RateLimitingInterface + eventRecorder events.EventRecorder +} + +var _ Listener = &DynamicServingCertificateController{} + +// NewDynamicServingCertificateController returns a controller that can be used to keep a TLSConfig up to date. +func NewDynamicServingCertificateController( + baseTLSConfig *tls.Config, + clientCA CAContentProvider, + servingCert CertKeyContentProvider, + sniCerts []SNICertKeyContentProvider, + eventRecorder events.EventRecorder, +) *DynamicServingCertificateController { + c := &DynamicServingCertificateController{ + baseTLSConfig: baseTLSConfig, + clientCA: clientCA, + servingCert: servingCert, + sniCerts: sniCerts, + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "DynamicServingCertificateController"), + eventRecorder: eventRecorder, + } + + return c +} + +// GetConfigForClient is an implementation of tls.Config.GetConfigForClient +func (c *DynamicServingCertificateController) GetConfigForClient(clientHello *tls.ClientHelloInfo) (*tls.Config, error) { + uncastObj := c.currentServingTLSConfig.Load() + if uncastObj == nil { + return nil, errors.New("dynamiccertificates: configuration not ready") + } + tlsConfig, ok := uncastObj.(*tls.Config) + if !ok { + return nil, errors.New("dynamiccertificates: unexpected config type") + } + + tlsConfigCopy := tlsConfig.Clone() + + // if the client set SNI information, just use our "normal" SNI flow + if len(clientHello.ServerName) > 0 { + return tlsConfigCopy, nil + } + + // if the client didn't set SNI, then we need to inspect the requested IP so that we can choose + // a certificate from our list if we specifically handle that IP. This can happen when an IP is specifically mapped by name. + host, _, err := net.SplitHostPort(clientHello.Conn.LocalAddr().String()) + if err != nil { + return tlsConfigCopy, nil + } + + ipCert, ok := tlsConfigCopy.NameToCertificate[host] + if !ok { + return tlsConfigCopy, nil + } + tlsConfigCopy.Certificates = []tls.Certificate{*ipCert} + tlsConfigCopy.NameToCertificate = nil + + return tlsConfigCopy, nil +} + +// newTLSContent determines the next set of content for overriding the baseTLSConfig. +func (c *DynamicServingCertificateController) newTLSContent() (*dynamicCertificateContent, error) { + newContent := &dynamicCertificateContent{} + + if c.clientCA != nil { + currClientCABundle := c.clientCA.CurrentCABundleContent() + // we allow removing all client ca bundles because the server is still secure when this happens. it just means + // that there isn't a hint to clients about which client-cert to used. this happens when there is no client-ca + // yet known for authentication, which can happen in aggregated apiservers and some kube-apiserver deployment modes. + newContent.clientCA = caBundleContent{caBundle: currClientCABundle} + } + + if c.servingCert != nil { + currServingCert, currServingKey := c.servingCert.CurrentCertKeyContent() + if len(currServingCert) == 0 || len(currServingKey) == 0 { + return nil, fmt.Errorf("not loading an empty serving certificate from %q", c.servingCert.Name()) + } + + newContent.servingCert = certKeyContent{cert: currServingCert, key: currServingKey} + } + + for i, sniCert := range c.sniCerts { + currCert, currKey := sniCert.CurrentCertKeyContent() + if len(currCert) == 0 || len(currKey) == 0 { + return nil, fmt.Errorf("not loading an empty SNI certificate from %d/%q", i, sniCert.Name()) + } + + newContent.sniCerts = append(newContent.sniCerts, sniCertKeyContent{certKeyContent: certKeyContent{cert: currCert, key: currKey}, sniNames: sniCert.SNINames()}) + } + + return newContent, nil +} + +// syncCerts gets newTLSContent, if it has changed from the existing, the content is parsed and stored for usage in +// GetConfigForClient. +func (c *DynamicServingCertificateController) syncCerts() error { + newContent, err := c.newTLSContent() + if err != nil { + return err + } + // if the content is the same as what we currently have, we can simply skip it. This works because we are single + // threaded. If you ever make this multi-threaded, add a lock. + if newContent.Equal(c.currentlyServedContent) { + return nil + } + + // make a shallow copy and override the dynamic pieces which have changed. + newTLSConfigCopy := c.baseTLSConfig.Clone() + + // parse new content to add to TLSConfig + if len(newContent.clientCA.caBundle) > 0 { + newClientCAPool := x509.NewCertPool() + newClientCAs, err := cert.ParseCertsPEM(newContent.clientCA.caBundle) + if err != nil { + return fmt.Errorf("unable to load client CA file %q: %v", string(newContent.clientCA.caBundle), err) + } + for i, cert := range newClientCAs { + klog.V(2).Infof("loaded client CA [%d/%q]: %s", i, c.clientCA.Name(), GetHumanCertDetail(cert)) + if c.eventRecorder != nil { + c.eventRecorder.Eventf(&corev1.ObjectReference{Name: c.clientCA.Name()}, nil, corev1.EventTypeWarning, "TLSConfigChanged", "CACertificateReload", "loaded client CA [%d/%q]: %s", i, c.clientCA.Name(), GetHumanCertDetail(cert)) + } + + newClientCAPool.AddCert(cert) + } + + newTLSConfigCopy.ClientCAs = newClientCAPool + } + + if len(newContent.servingCert.cert) > 0 && len(newContent.servingCert.key) > 0 { + cert, err := tls.X509KeyPair(newContent.servingCert.cert, newContent.servingCert.key) + if err != nil { + return fmt.Errorf("invalid serving cert keypair: %v", err) + } + + x509Cert, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return fmt.Errorf("invalid serving cert: %v", err) + } + + klog.V(2).Infof("loaded serving cert [%q]: %s", c.servingCert.Name(), GetHumanCertDetail(x509Cert)) + if c.eventRecorder != nil { + c.eventRecorder.Eventf(&corev1.ObjectReference{Name: c.servingCert.Name()}, nil, corev1.EventTypeWarning, "TLSConfigChanged", "ServingCertificateReload", "loaded serving cert [%q]: %s", c.servingCert.Name(), GetHumanCertDetail(x509Cert)) + } + + newTLSConfigCopy.Certificates = []tls.Certificate{cert} + } + + if len(newContent.sniCerts) > 0 { + newTLSConfigCopy.NameToCertificate, err = c.BuildNamedCertificates(newContent.sniCerts) + if err != nil { + return fmt.Errorf("unable to build named certificate map: %v", err) + } + + // append all named certs. Otherwise, the go tls stack will think no SNI processing + // is necessary because there is only one cert anyway. + // Moreover, if servingCert is not set, the first SNI + // cert will become the default cert. That's what we expect anyway. + for _, sniCert := range newTLSConfigCopy.NameToCertificate { + newTLSConfigCopy.Certificates = append(newTLSConfigCopy.Certificates, *sniCert) + } + } + + // store new values of content for serving. + c.currentServingTLSConfig.Store(newTLSConfigCopy) + c.currentlyServedContent = newContent // this is single threaded, so we have no locking issue + + return nil +} + +// RunOnce runs a single sync step to ensure that we have a valid starting configuration. +func (c *DynamicServingCertificateController) RunOnce() error { + return c.syncCerts() +} + +// Run starts the kube-apiserver and blocks until stopCh is closed. +func (c *DynamicServingCertificateController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting DynamicServingCertificateController") + defer klog.Infof("Shutting down DynamicServingCertificateController") + + // synchronously load once. We will trigger again, so ignoring any error is fine + _ = c.RunOnce() + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + // start timer that rechecks every minute, just in case. this also serves to prime the controller quickly. + go wait.Until(func() { + c.Enqueue() + }, 1*time.Minute, stopCh) + + <-stopCh +} + +func (c *DynamicServingCertificateController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *DynamicServingCertificateController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.syncCerts() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// Enqueue a method to allow separate control loops to cause the certificate controller to trigger and read content. +func (c *DynamicServingCertificateController) Enqueue() { + c.queue.Add(workItemKey) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/union_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/union_content.go new file mode 100644 index 000000000..89e19ea5a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/union_content.go @@ -0,0 +1,107 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "bytes" + "crypto/x509" + "strings" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" +) + +type unionCAContent []CAContentProvider + +var _ Notifier = &unionCAContent{} +var _ CAContentProvider = &unionCAContent{} +var _ ControllerRunner = &unionCAContent{} + +// NewUnionCAContentProvider returns a CAContentProvider that is a union of other CAContentProviders +func NewUnionCAContentProvider(caContentProviders ...CAContentProvider) CAContentProvider { + return unionCAContent(caContentProviders) +} + +// Name is just an identifier +func (c unionCAContent) Name() string { + names := []string{} + for _, curr := range c { + names = append(names, curr.Name()) + } + return strings.Join(names, ",") +} + +// CurrentCABundleContent provides ca bundle byte content +func (c unionCAContent) CurrentCABundleContent() []byte { + caBundles := [][]byte{} + for _, curr := range c { + if currCABytes := curr.CurrentCABundleContent(); len(currCABytes) > 0 { + caBundles = append(caBundles, []byte(strings.TrimSpace(string(currCABytes)))) + } + } + + return bytes.Join(caBundles, []byte("\n")) +} + +// CurrentCABundleContent provides ca bundle byte content +func (c unionCAContent) VerifyOptions() (x509.VerifyOptions, bool) { + currCABundle := c.CurrentCABundleContent() + if len(currCABundle) == 0 { + return x509.VerifyOptions{}, false + } + + // TODO make more efficient. This isn't actually used in any of our mainline paths. It's called to build the TLSConfig + // TODO on file changes, but the actual authentication runs against the individual items, not the union. + ret, err := newCABundleAndVerifier(c.Name(), c.CurrentCABundleContent()) + if err != nil { + // because we're made up of already vetted values, this indicates some kind of coding error + panic(err) + } + + return ret.verifyOptions, true +} + +// AddListener adds a listener to be notified when the CA content changes. +func (c unionCAContent) AddListener(listener Listener) { + for _, curr := range c { + if notifier, ok := curr.(Notifier); ok { + notifier.AddListener(listener) + } + } +} + +// AddListener adds a listener to be notified when the CA content changes. +func (c unionCAContent) RunOnce() error { + errors := []error{} + for _, curr := range c { + if controller, ok := curr.(ControllerRunner); ok { + if err := controller.RunOnce(); err != nil { + errors = append(errors, err) + } + } + } + + return utilerrors.NewAggregate(errors) +} + +// Run runs the controller +func (c unionCAContent) Run(workers int, stopCh <-chan struct{}) { + for _, curr := range c { + if controller, ok := curr.(ControllerRunner); ok { + go controller.Run(workers, stopCh) + } + } +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/util.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/util.go new file mode 100644 index 000000000..6906045cd --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/util.go @@ -0,0 +1,68 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "crypto/x509" + "fmt" + "strings" + "time" +) + +// GetHumanCertDetail is a convenient method for printing compact details of certificate that helps when debugging +// kube-apiserver usage of certs. +func GetHumanCertDetail(certificate *x509.Certificate) string { + humanName := certificate.Subject.CommonName + signerHumanName := certificate.Issuer.CommonName + if certificate.Subject.CommonName == certificate.Issuer.CommonName { + signerHumanName = "" + } + + usages := []string{} + for _, curr := range certificate.ExtKeyUsage { + if curr == x509.ExtKeyUsageClientAuth { + usages = append(usages, "client") + continue + } + if curr == x509.ExtKeyUsageServerAuth { + usages = append(usages, "serving") + continue + } + + usages = append(usages, fmt.Sprintf("%d", curr)) + } + + validServingNames := []string{} + for _, ip := range certificate.IPAddresses { + validServingNames = append(validServingNames, ip.String()) + } + for _, dnsName := range certificate.DNSNames { + validServingNames = append(validServingNames, dnsName) + } + servingString := "" + if len(validServingNames) > 0 { + servingString = fmt.Sprintf(" validServingFor=[%s]", strings.Join(validServingNames, ",")) + } + + groupString := "" + if len(certificate.Subject.Organization) > 0 { + groupString = fmt.Sprintf(" groups=[%s]", strings.Join(certificate.Subject.Organization, ",")) + } + + return fmt.Sprintf("%q [%s]%s%s issuer=%q (%v to %v (now=%v))", humanName, strings.Join(usages, ","), groupString, servingString, signerHumanName, certificate.NotBefore.UTC(), certificate.NotAfter.UTC(), + time.Now().UTC()) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go b/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go index b8b1d67a5..b7e378e43 100644 --- a/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go +++ b/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go @@ -22,6 +22,7 @@ import ( "strings" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apiserver/pkg/apis/apiserver" "k8s.io/apiserver/pkg/apis/apiserver/install" @@ -32,6 +33,10 @@ import ( var cfgScheme = runtime.NewScheme() +// validEgressSelectorNames contains the set of valid egress selctor names. +// 'master' is deprecated in favor of 'controlplane' and will be removed in v1.22. +var validEgressSelectorNames = sets.NewString("master", "controlplane", "cluster", "etcd") + func init() { install.Install(cfgScheme) } @@ -97,6 +102,30 @@ func ValidateEgressSelectorConfiguration(config *apiserver.EgressSelectorConfigu })) } } + + var foundControlPlane, foundMaster bool + for _, service := range config.EgressSelections { + canonicalName := strings.ToLower(service.Name) + + if !validEgressSelectorNames.Has(canonicalName) { + allErrs = append(allErrs, field.NotSupported(field.NewPath("egressSelection", "name"), canonicalName, validEgressSelectorNames.List())) + continue + } + + if canonicalName == "master" { + foundMaster = true + } + + if canonicalName == "controlplane" { + foundControlPlane = true + } + } + + // error if both master and controlplane egress selectors are set + if foundMaster && foundControlPlane { + allErrs = append(allErrs, field.Forbidden(field.NewPath("egressSelection", "name"), "both egressSelection names 'master' and 'controlplane' are specified, only one is allowed")) + } + return allErrs } @@ -199,7 +228,7 @@ func validateTLSConfig(tlsConfig *apiserver.TLSConfig, fldPath *field.Path) fiel return allErrs } if tlsConfig.CABundle != "" { - if exists, err := path.Exists(path.CheckFollowSymlink, tlsConfig.CABundle); exists == false || err != nil { + if exists, err := path.Exists(path.CheckFollowSymlink, tlsConfig.CABundle); !exists || err != nil { allErrs = append(allErrs, field.Invalid( fldPath.Child("tlsConfig", "caBundle"), tlsConfig.CABundle, @@ -211,7 +240,7 @@ func validateTLSConfig(tlsConfig *apiserver.TLSConfig, fldPath *field.Path) fiel fldPath.Child("tlsConfig", "clientCert"), "nil", "Using TLS requires clientCert")) - } else if exists, err := path.Exists(path.CheckFollowSymlink, tlsConfig.ClientCert); exists == false || err != nil { + } else if exists, err := path.Exists(path.CheckFollowSymlink, tlsConfig.ClientCert); !exists || err != nil { allErrs = append(allErrs, field.Invalid( fldPath.Child("tlsConfig", "clientCert"), tlsConfig.ClientCert, @@ -222,7 +251,7 @@ func validateTLSConfig(tlsConfig *apiserver.TLSConfig, fldPath *field.Path) fiel fldPath.Child("tlsConfig", "clientKey"), "nil", "Using TLS requires requires clientKey")) - } else if exists, err := path.Exists(path.CheckFollowSymlink, tlsConfig.ClientKey); exists == false || err != nil { + } else if exists, err := path.Exists(path.CheckFollowSymlink, tlsConfig.ClientKey); !exists || err != nil { allErrs = append(allErrs, field.Invalid( fldPath.Child("tlsConfig", "clientKey"), tlsConfig.ClientKey, diff --git a/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go b/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go index a41a85403..a849575b8 100644 --- a/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go +++ b/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go @@ -51,8 +51,8 @@ type EgressSelector struct { type EgressType int const ( - // Master is the EgressType for traffic intended to go to the control plane. - Master EgressType = iota + // ControlPlane is the EgressType for traffic intended to go to the control plane. + ControlPlane EgressType = iota // Etcd is the EgressType for traffic intended to go to Kubernetes persistence store. Etcd // Cluster is the EgressType for traffic intended to go to the system being managed by Kubernetes. @@ -73,8 +73,8 @@ type Lookup func(networkContext NetworkContext) (utilnet.DialFunc, error) // String returns the canonical string representation of the egress type func (s EgressType) String() string { switch s { - case Master: - return "master" + case ControlPlane: + return "controlplane" case Etcd: return "etcd" case Cluster: @@ -91,8 +91,12 @@ func (s EgressType) AsNetworkContext() NetworkContext { func lookupServiceName(name string) (EgressType, error) { switch strings.ToLower(name) { + // 'master' is deprecated, interpret "master" as controlplane internally until removed in v1.22. case "master": - return Master, nil + klog.Warning("EgressSelection name 'master' is deprecated, use 'controlplane' instead") + return ControlPlane, nil + case "controlplane": + return ControlPlane, nil case "etcd": return Etcd, nil case "cluster": @@ -364,5 +368,6 @@ func (cs *EgressSelector) Lookup(networkContext NetworkContext) (utilnet.DialFun // The round trip wrapper will over-ride the dialContext method appropriately return nil, nil } + return cs.egressToDialer[networkContext.EgressSelectionName], nil } diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/OWNERS b/vendor/k8s.io/apiserver/pkg/server/filters/OWNERS new file mode 100644 index 000000000..c5f73991a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/filters/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- sttts +- dims diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/content_type.go b/vendor/k8s.io/apiserver/pkg/server/filters/content_type.go new file mode 100644 index 000000000..65c73fcdc --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/filters/content_type.go @@ -0,0 +1,28 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import "net/http" + +// WithContentType sets both the Content-Type and the X-Content-Type-Options (nosniff) header +func WithContentType(handler http.Handler, contentType string) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", contentType) + w.Header().Set("X-Content-Type-Options", "nosniff") + handler.ServeHTTP(w, r) + }) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/cors.go b/vendor/k8s.io/apiserver/pkg/server/filters/cors.go new file mode 100644 index 000000000..67df76098 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/filters/cors.go @@ -0,0 +1,98 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "net/http" + "regexp" + "strings" + + "k8s.io/klog/v2" +) + +// TODO: use restful.CrossOriginResourceSharing +// See github.com/emicklei/go-restful/blob/master/examples/restful-CORS-filter.go, and +// github.com/emicklei/go-restful/blob/master/examples/restful-basic-authentication.go +// Or, for a more detailed implementation use https://github.com/martini-contrib/cors +// or implement CORS at your proxy layer. + +// WithCORS is a simple CORS implementation that wraps an http Handler. +// Pass nil for allowedMethods and allowedHeaders to use the defaults. If allowedOriginPatterns +// is empty or nil, no CORS support is installed. +func WithCORS(handler http.Handler, allowedOriginPatterns []string, allowedMethods []string, allowedHeaders []string, exposedHeaders []string, allowCredentials string) http.Handler { + if len(allowedOriginPatterns) == 0 { + return handler + } + allowedOriginPatternsREs := allowedOriginRegexps(allowedOriginPatterns) + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + origin := req.Header.Get("Origin") + if origin != "" { + allowed := false + for _, re := range allowedOriginPatternsREs { + if allowed = re.MatchString(origin); allowed { + break + } + } + if allowed { + w.Header().Set("Access-Control-Allow-Origin", origin) + // Set defaults for methods and headers if nothing was passed + if allowedMethods == nil { + allowedMethods = []string{"POST", "GET", "OPTIONS", "PUT", "DELETE", "PATCH"} + } + if allowedHeaders == nil { + allowedHeaders = []string{"Content-Type", "Content-Length", "Accept-Encoding", "X-CSRF-Token", "Authorization", "X-Requested-With", "If-Modified-Since"} + } + if exposedHeaders == nil { + exposedHeaders = []string{"Date"} + } + w.Header().Set("Access-Control-Allow-Methods", strings.Join(allowedMethods, ", ")) + w.Header().Set("Access-Control-Allow-Headers", strings.Join(allowedHeaders, ", ")) + w.Header().Set("Access-Control-Expose-Headers", strings.Join(exposedHeaders, ", ")) + w.Header().Set("Access-Control-Allow-Credentials", allowCredentials) + + // Stop here if its a preflight OPTIONS request + if req.Method == "OPTIONS" { + w.WriteHeader(http.StatusNoContent) + return + } + } + } + // Dispatch to the next handler + handler.ServeHTTP(w, req) + }) +} + +func allowedOriginRegexps(allowedOrigins []string) []*regexp.Regexp { + res, err := compileRegexps(allowedOrigins) + if err != nil { + klog.Fatalf("Invalid CORS allowed origin, --cors-allowed-origins flag was set to %v - %v", strings.Join(allowedOrigins, ","), err) + } + return res +} + +// Takes a list of strings and compiles them into a list of regular expressions +func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) { + regexps := []*regexp.Regexp{} + for _, regexpStr := range regexpStrings { + r, err := regexp.Compile(regexpStr) + if err != nil { + return []*regexp.Regexp{}, err + } + regexps = append(regexps, r) + } + return regexps, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/doc.go b/vendor/k8s.io/apiserver/pkg/server/filters/doc.go new file mode 100644 index 000000000..a90cc3b49 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/filters/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package filters contains all the http handler chain filters which +// are not api related. +package filters // import "k8s.io/apiserver/pkg/server/filters" diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/goaway.go b/vendor/k8s.io/apiserver/pkg/server/filters/goaway.go new file mode 100644 index 000000000..065bd22bb --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/filters/goaway.go @@ -0,0 +1,84 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "math/rand" + "net/http" + "sync" +) + +// GoawayDecider decides if server should send a GOAWAY +type GoawayDecider interface { + Goaway(r *http.Request) bool +} + +var ( + // randPool used to get a rand.Rand and generate a random number thread-safely, + // which improve the performance of using rand.Rand with a locker + randPool = &sync.Pool{ + New: func() interface{} { + return rand.New(rand.NewSource(rand.Int63())) + }, + } +) + +// WithProbabilisticGoaway returns an http.Handler that send GOAWAY probabilistically +// according to the given chance for HTTP2 requests. After client receive GOAWAY, +// the in-flight long-running requests will not be influenced, and the new requests +// will use a new TCP connection to re-balancing to another server behind the load balance. +func WithProbabilisticGoaway(inner http.Handler, chance float64) http.Handler { + return &goaway{ + handler: inner, + decider: &probabilisticGoawayDecider{ + chance: chance, + next: func() float64 { + rnd := randPool.Get().(*rand.Rand) + ret := rnd.Float64() + randPool.Put(rnd) + return ret + }, + }, + } +} + +// goaway send a GOAWAY to client according to decider for HTTP2 requests +type goaway struct { + handler http.Handler + decider GoawayDecider +} + +// ServeHTTP implement HTTP handler +func (h *goaway) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Proto == "HTTP/2.0" && h.decider.Goaway(r) { + // Send a GOAWAY and tear down the TCP connection when idle. + w.Header().Set("Connection", "close") + } + + h.handler.ServeHTTP(w, r) +} + +// probabilisticGoawayDecider send GOAWAY probabilistically according to chance +type probabilisticGoawayDecider struct { + chance float64 + next func() float64 +} + +// Goaway implement GoawayDecider +func (p *probabilisticGoawayDecider) Goaway(r *http.Request) bool { + return p.next() < p.chance +} diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/longrunning.go b/vendor/k8s.io/apiserver/pkg/server/filters/longrunning.go new file mode 100644 index 000000000..1b58f1638 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/filters/longrunning.go @@ -0,0 +1,41 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "net/http" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" + apirequest "k8s.io/apiserver/pkg/endpoints/request" +) + +// BasicLongRunningRequestCheck returns true if the given request has one of the specified verbs or one of the specified subresources, or is a profiler request. +func BasicLongRunningRequestCheck(longRunningVerbs, longRunningSubresources sets.String) apirequest.LongRunningRequestCheck { + return func(r *http.Request, requestInfo *apirequest.RequestInfo) bool { + if longRunningVerbs.Has(requestInfo.Verb) { + return true + } + if requestInfo.IsResourceRequest && longRunningSubresources.Has(requestInfo.Subresource) { + return true + } + if !requestInfo.IsResourceRequest && strings.HasPrefix(requestInfo.Path, "/debug/pprof/") { + return true + } + return false + } +} diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go b/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go new file mode 100644 index 000000000..e873351c7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go @@ -0,0 +1,220 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "fmt" + "net/http" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/endpoints/metrics" + apirequest "k8s.io/apiserver/pkg/endpoints/request" + fcmetrics "k8s.io/apiserver/pkg/util/flowcontrol/metrics" + + "k8s.io/klog/v2" +) + +const ( + // Constant for the retry-after interval on rate limiting. + // TODO: maybe make this dynamic? or user-adjustable? + retryAfter = "1" + + // How often inflight usage metric should be updated. Because + // the metrics tracks maximal value over period making this + // longer will increase the metric value. + inflightUsageMetricUpdatePeriod = time.Second + + // How often to run maintenance on observations to ensure + // that they do not fall too far behind. + observationMaintenancePeriod = 10 * time.Second +) + +var nonMutatingRequestVerbs = sets.NewString("get", "list", "watch") + +func handleError(w http.ResponseWriter, r *http.Request, err error) { + errorMsg := fmt.Sprintf("Internal Server Error: %#v", r.RequestURI) + http.Error(w, errorMsg, http.StatusInternalServerError) + klog.Errorf(err.Error()) +} + +// requestWatermark is used to track maximal numbers of requests in a particular phase of handling +type requestWatermark struct { + phase string + readOnlyObserver, mutatingObserver fcmetrics.TimedObserver + lock sync.Mutex + readOnlyWatermark, mutatingWatermark int +} + +func (w *requestWatermark) recordMutating(mutatingVal int) { + w.mutatingObserver.Set(float64(mutatingVal)) + + w.lock.Lock() + defer w.lock.Unlock() + + if w.mutatingWatermark < mutatingVal { + w.mutatingWatermark = mutatingVal + } +} + +func (w *requestWatermark) recordReadOnly(readOnlyVal int) { + w.readOnlyObserver.Set(float64(readOnlyVal)) + + w.lock.Lock() + defer w.lock.Unlock() + + if w.readOnlyWatermark < readOnlyVal { + w.readOnlyWatermark = readOnlyVal + } +} + +// watermark tracks requests being executed (not waiting in a queue) +var watermark = &requestWatermark{ + phase: metrics.ExecutingPhase, + readOnlyObserver: fcmetrics.ReadWriteConcurrencyObserverPairGenerator.Generate(1, 1, []string{metrics.ReadOnlyKind}).RequestsExecuting, + mutatingObserver: fcmetrics.ReadWriteConcurrencyObserverPairGenerator.Generate(1, 1, []string{metrics.MutatingKind}).RequestsExecuting, +} + +// startWatermarkMaintenance starts the goroutines to observe and maintain the specified watermark. +func startWatermarkMaintenance(watermark *requestWatermark, stopCh <-chan struct{}) { + // Periodically update the inflight usage metric. + go wait.Until(func() { + watermark.lock.Lock() + readOnlyWatermark := watermark.readOnlyWatermark + mutatingWatermark := watermark.mutatingWatermark + watermark.readOnlyWatermark = 0 + watermark.mutatingWatermark = 0 + watermark.lock.Unlock() + + metrics.UpdateInflightRequestMetrics(watermark.phase, readOnlyWatermark, mutatingWatermark) + }, inflightUsageMetricUpdatePeriod, stopCh) + + // Periodically observe the watermarks. This is done to ensure that they do not fall too far behind. When they do + // fall too far behind, then there is a long delay in responding to the next request received while the observer + // catches back up. + go wait.Until(func() { + watermark.readOnlyObserver.Add(0) + watermark.mutatingObserver.Add(0) + }, observationMaintenancePeriod, stopCh) +} + +// WithMaxInFlightLimit limits the number of in-flight requests to buffer size of the passed in channel. +func WithMaxInFlightLimit( + handler http.Handler, + nonMutatingLimit int, + mutatingLimit int, + longRunningRequestCheck apirequest.LongRunningRequestCheck, +) http.Handler { + if nonMutatingLimit == 0 && mutatingLimit == 0 { + return handler + } + var nonMutatingChan chan bool + var mutatingChan chan bool + if nonMutatingLimit != 0 { + nonMutatingChan = make(chan bool, nonMutatingLimit) + watermark.readOnlyObserver.SetX1(float64(nonMutatingLimit)) + } + if mutatingLimit != 0 { + mutatingChan = make(chan bool, mutatingLimit) + watermark.mutatingObserver.SetX1(float64(mutatingLimit)) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + requestInfo, ok := apirequest.RequestInfoFrom(ctx) + if !ok { + handleError(w, r, fmt.Errorf("no RequestInfo found in context, handler chain must be wrong")) + return + } + + // Skip tracking long running events. + if longRunningRequestCheck != nil && longRunningRequestCheck(r, requestInfo) { + handler.ServeHTTP(w, r) + return + } + + var c chan bool + isMutatingRequest := !nonMutatingRequestVerbs.Has(requestInfo.Verb) + if isMutatingRequest { + c = mutatingChan + } else { + c = nonMutatingChan + } + + if c == nil { + handler.ServeHTTP(w, r) + } else { + + select { + case c <- true: + // We note the concurrency level both while the + // request is being served and after it is done being + // served, because both states contribute to the + // sampled stats on concurrency. + if isMutatingRequest { + watermark.recordMutating(len(c)) + } else { + watermark.recordReadOnly(len(c)) + } + defer func() { + <-c + if isMutatingRequest { + watermark.recordMutating(len(c)) + } else { + watermark.recordReadOnly(len(c)) + } + }() + handler.ServeHTTP(w, r) + + default: + // at this point we're about to return a 429, BUT not all actors should be rate limited. A system:master is so powerful + // that they should always get an answer. It's a super-admin or a loopback connection. + if currUser, ok := apirequest.UserFrom(ctx); ok { + for _, group := range currUser.GetGroups() { + if group == user.SystemPrivilegedGroup { + handler.ServeHTTP(w, r) + return + } + } + } + // We need to split this data between buckets used for throttling. + if isMutatingRequest { + metrics.DroppedRequests.WithLabelValues(metrics.MutatingKind).Inc() + } else { + metrics.DroppedRequests.WithLabelValues(metrics.ReadOnlyKind).Inc() + } + metrics.RecordRequestTermination(r, requestInfo, metrics.APIServerComponent, http.StatusTooManyRequests) + tooManyRequests(r, w) + } + } + }) +} + +// StartMaxInFlightWatermarkMaintenance starts the goroutines to observe and maintain watermarks for max-in-flight +// requests. +func StartMaxInFlightWatermarkMaintenance(stopCh <-chan struct{}) { + startWatermarkMaintenance(watermark, stopCh) +} + +func tooManyRequests(req *http.Request, w http.ResponseWriter) { + // Return a 429 status indicating "Too Many Requests" + w.Header().Set("Retry-After", retryAfter) + http.Error(w, "Too many requests, please try again later.", http.StatusTooManyRequests) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go b/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go new file mode 100644 index 000000000..e1d7b7793 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go @@ -0,0 +1,155 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "context" + "fmt" + "net/http" + "sync/atomic" + + flowcontrol "k8s.io/api/flowcontrol/v1beta1" + apitypes "k8s.io/apimachinery/pkg/types" + epmetrics "k8s.io/apiserver/pkg/endpoints/metrics" + apirequest "k8s.io/apiserver/pkg/endpoints/request" + utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" + fcmetrics "k8s.io/apiserver/pkg/util/flowcontrol/metrics" + "k8s.io/klog/v2" +) + +type priorityAndFairnessKeyType int + +const priorityAndFairnessKey priorityAndFairnessKeyType = iota + +// PriorityAndFairnessClassification identifies the results of +// classification for API Priority and Fairness +type PriorityAndFairnessClassification struct { + FlowSchemaName string + FlowSchemaUID apitypes.UID + PriorityLevelName string + PriorityLevelUID apitypes.UID +} + +// GetClassification returns the classification associated with the +// given context, if any, otherwise nil +func GetClassification(ctx context.Context) *PriorityAndFairnessClassification { + return ctx.Value(priorityAndFairnessKey).(*PriorityAndFairnessClassification) +} + +// waitingMark tracks requests waiting rather than being executed +var waitingMark = &requestWatermark{ + phase: epmetrics.WaitingPhase, + readOnlyObserver: fcmetrics.ReadWriteConcurrencyObserverPairGenerator.Generate(1, 1, []string{epmetrics.ReadOnlyKind}).RequestsWaiting, + mutatingObserver: fcmetrics.ReadWriteConcurrencyObserverPairGenerator.Generate(1, 1, []string{epmetrics.MutatingKind}).RequestsWaiting, +} + +var atomicMutatingExecuting, atomicReadOnlyExecuting int32 +var atomicMutatingWaiting, atomicReadOnlyWaiting int32 + +// WithPriorityAndFairness limits the number of in-flight +// requests in a fine-grained way. +func WithPriorityAndFairness( + handler http.Handler, + longRunningRequestCheck apirequest.LongRunningRequestCheck, + fcIfc utilflowcontrol.Interface, +) http.Handler { + if fcIfc == nil { + klog.Warningf("priority and fairness support not found, skipping") + return handler + } + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + requestInfo, ok := apirequest.RequestInfoFrom(ctx) + if !ok { + handleError(w, r, fmt.Errorf("no RequestInfo found in context")) + return + } + user, ok := apirequest.UserFrom(ctx) + if !ok { + handleError(w, r, fmt.Errorf("no User found in context")) + return + } + + // Skip tracking long running requests. + if longRunningRequestCheck != nil && longRunningRequestCheck(r, requestInfo) { + klog.V(6).Infof("Serving RequestInfo=%#+v, user.Info=%#+v as longrunning\n", requestInfo, user) + handler.ServeHTTP(w, r) + return + } + + var classification *PriorityAndFairnessClassification + note := func(fs *flowcontrol.FlowSchema, pl *flowcontrol.PriorityLevelConfiguration) { + classification = &PriorityAndFairnessClassification{ + FlowSchemaName: fs.Name, + FlowSchemaUID: fs.UID, + PriorityLevelName: pl.Name, + PriorityLevelUID: pl.UID} + } + + var served bool + isMutatingRequest := !nonMutatingRequestVerbs.Has(requestInfo.Verb) + noteExecutingDelta := func(delta int32) { + if isMutatingRequest { + watermark.recordMutating(int(atomic.AddInt32(&atomicMutatingExecuting, delta))) + } else { + watermark.recordReadOnly(int(atomic.AddInt32(&atomicReadOnlyExecuting, delta))) + } + } + noteWaitingDelta := func(delta int32) { + if isMutatingRequest { + waitingMark.recordMutating(int(atomic.AddInt32(&atomicMutatingWaiting, delta))) + } else { + waitingMark.recordReadOnly(int(atomic.AddInt32(&atomicReadOnlyWaiting, delta))) + } + } + execute := func() { + noteExecutingDelta(1) + defer noteExecutingDelta(-1) + served = true + innerCtx := context.WithValue(ctx, priorityAndFairnessKey, classification) + innerReq := r.Clone(innerCtx) + w.Header().Set(flowcontrol.ResponseHeaderMatchedPriorityLevelConfigurationUID, string(classification.PriorityLevelUID)) + w.Header().Set(flowcontrol.ResponseHeaderMatchedFlowSchemaUID, string(classification.FlowSchemaUID)) + handler.ServeHTTP(w, innerReq) + } + digest := utilflowcontrol.RequestDigest{RequestInfo: requestInfo, User: user} + fcIfc.Handle(ctx, digest, note, func(inQueue bool) { + if inQueue { + noteWaitingDelta(1) + } else { + noteWaitingDelta(-1) + } + }, execute) + if !served { + if isMutatingRequest { + epmetrics.DroppedRequests.WithLabelValues(epmetrics.MutatingKind).Inc() + } else { + epmetrics.DroppedRequests.WithLabelValues(epmetrics.ReadOnlyKind).Inc() + } + epmetrics.RecordRequestTermination(r, requestInfo, epmetrics.APIServerComponent, http.StatusTooManyRequests) + tooManyRequests(r, w) + } + + }) +} + +// StartPriorityAndFairnessWatermarkMaintenance starts the goroutines to observe and maintain watermarks for +// priority-and-fairness requests. +func StartPriorityAndFairnessWatermarkMaintenance(stopCh <-chan struct{}) { + startWatermarkMaintenance(watermark, stopCh) + startWatermarkMaintenance(waitingMark, stopCh) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go b/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go new file mode 100644 index 000000000..2405bfd1f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go @@ -0,0 +1,314 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "net" + "net/http" + "runtime" + "sync" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/endpoints/metrics" + apirequest "k8s.io/apiserver/pkg/endpoints/request" +) + +// WithTimeoutForNonLongRunningRequests times out non-long-running requests after the time given by timeout. +func WithTimeoutForNonLongRunningRequests(handler http.Handler, longRunning apirequest.LongRunningRequestCheck, timeout time.Duration) http.Handler { + if longRunning == nil { + return handler + } + timeoutFunc := func(req *http.Request) (*http.Request, <-chan time.Time, func(), *apierrors.StatusError) { + // TODO unify this with apiserver.MaxInFlightLimit + ctx := req.Context() + + requestInfo, ok := apirequest.RequestInfoFrom(ctx) + if !ok { + // if this happens, the handler chain isn't setup correctly because there is no request info + return req, time.After(timeout), func() {}, apierrors.NewInternalError(fmt.Errorf("no request info found for request during timeout")) + } + + if longRunning(req, requestInfo) { + return req, nil, nil, nil + } + + ctx, cancel := context.WithCancel(ctx) + req = req.WithContext(ctx) + + postTimeoutFn := func() { + cancel() + metrics.RecordRequestTermination(req, requestInfo, metrics.APIServerComponent, http.StatusGatewayTimeout) + } + return req, time.After(timeout), postTimeoutFn, apierrors.NewTimeoutError(fmt.Sprintf("request did not complete within %s", timeout), 0) + } + return WithTimeout(handler, timeoutFunc) +} + +type timeoutFunc = func(*http.Request) (req *http.Request, timeout <-chan time.Time, postTimeoutFunc func(), err *apierrors.StatusError) + +// WithTimeout returns an http.Handler that runs h with a timeout +// determined by timeoutFunc. The new http.Handler calls h.ServeHTTP to handle +// each request, but if a call runs for longer than its time limit, the +// handler responds with a 504 Gateway Timeout error and the message +// provided. (If msg is empty, a suitable default message will be sent.) After +// the handler times out, writes by h to its http.ResponseWriter will return +// http.ErrHandlerTimeout. If timeoutFunc returns a nil timeout channel, no +// timeout will be enforced. recordFn is a function that will be invoked whenever +// a timeout happens. +func WithTimeout(h http.Handler, timeoutFunc timeoutFunc) http.Handler { + return &timeoutHandler{h, timeoutFunc} +} + +type timeoutHandler struct { + handler http.Handler + timeout timeoutFunc +} + +func (t *timeoutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + r, after, postTimeoutFn, err := t.timeout(r) + if after == nil { + t.handler.ServeHTTP(w, r) + return + } + + // resultCh is used as both errCh and stopCh + resultCh := make(chan interface{}) + tw := newTimeoutWriter(w) + go func() { + defer func() { + err := recover() + // do not wrap the sentinel ErrAbortHandler panic value + if err != nil && err != http.ErrAbortHandler { + // Same as stdlib http server code. Manually allocate stack + // trace buffer size to prevent excessively large logs + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + err = fmt.Sprintf("%v\n%s", err, buf) + } + resultCh <- err + }() + t.handler.ServeHTTP(tw, r) + }() + select { + case err := <-resultCh: + // panic if error occurs; stop otherwise + if err != nil { + panic(err) + } + return + case <-after: + defer func() { + // resultCh needs to have a reader, since the function doing + // the work needs to send to it. This is defer'd to ensure it runs + // ever if the post timeout work itself panics. + go func() { + res := <-resultCh + if res != nil { + switch t := res.(type) { + case error: + utilruntime.HandleError(t) + default: + utilruntime.HandleError(fmt.Errorf("%v", res)) + } + } + }() + }() + + postTimeoutFn() + tw.timeout(err) + } +} + +type timeoutWriter interface { + http.ResponseWriter + timeout(*apierrors.StatusError) +} + +func newTimeoutWriter(w http.ResponseWriter) timeoutWriter { + base := &baseTimeoutWriter{w: w} + + _, notifiable := w.(http.CloseNotifier) + _, hijackable := w.(http.Hijacker) + + switch { + case notifiable && hijackable: + return &closeHijackTimeoutWriter{base} + case notifiable: + return &closeTimeoutWriter{base} + case hijackable: + return &hijackTimeoutWriter{base} + default: + return base + } +} + +type baseTimeoutWriter struct { + w http.ResponseWriter + + mu sync.Mutex + // if the timeout handler has timeout + timedOut bool + // if this timeout writer has wrote header + wroteHeader bool + // if this timeout writer has been hijacked + hijacked bool +} + +func (tw *baseTimeoutWriter) Header() http.Header { + tw.mu.Lock() + defer tw.mu.Unlock() + + if tw.timedOut { + return http.Header{} + } + + return tw.w.Header() +} + +func (tw *baseTimeoutWriter) Write(p []byte) (int, error) { + tw.mu.Lock() + defer tw.mu.Unlock() + + if tw.timedOut { + return 0, http.ErrHandlerTimeout + } + if tw.hijacked { + return 0, http.ErrHijacked + } + + tw.wroteHeader = true + return tw.w.Write(p) +} + +func (tw *baseTimeoutWriter) Flush() { + tw.mu.Lock() + defer tw.mu.Unlock() + + if tw.timedOut { + return + } + + if flusher, ok := tw.w.(http.Flusher); ok { + flusher.Flush() + } +} + +func (tw *baseTimeoutWriter) WriteHeader(code int) { + tw.mu.Lock() + defer tw.mu.Unlock() + + if tw.timedOut || tw.wroteHeader || tw.hijacked { + return + } + + tw.wroteHeader = true + tw.w.WriteHeader(code) +} + +func (tw *baseTimeoutWriter) timeout(err *apierrors.StatusError) { + tw.mu.Lock() + defer tw.mu.Unlock() + + tw.timedOut = true + + // The timeout writer has not been used by the inner handler. + // We can safely timeout the HTTP request by sending by a timeout + // handler + if !tw.wroteHeader && !tw.hijacked { + tw.w.WriteHeader(http.StatusGatewayTimeout) + enc := json.NewEncoder(tw.w) + enc.Encode(&err.ErrStatus) + } else { + // The timeout writer has been used by the inner handler. There is + // no way to timeout the HTTP request at the point. We have to shutdown + // the connection for HTTP1 or reset stream for HTTP2. + // + // Note from the golang's docs: + // If ServeHTTP panics, the server (the caller of ServeHTTP) assumes + // that the effect of the panic was isolated to the active request. + // It recovers the panic, logs a stack trace to the server error log, + // and either closes the network connection or sends an HTTP/2 + // RST_STREAM, depending on the HTTP protocol. To abort a handler so + // the client sees an interrupted response but the server doesn't log + // an error, panic with the value ErrAbortHandler. + // + // We are throwing http.ErrAbortHandler deliberately so that a client is notified and to suppress a not helpful stacktrace in the logs + panic(http.ErrAbortHandler) + } +} + +func (tw *baseTimeoutWriter) closeNotify() <-chan bool { + tw.mu.Lock() + defer tw.mu.Unlock() + + if tw.timedOut { + done := make(chan bool) + close(done) + return done + } + + return tw.w.(http.CloseNotifier).CloseNotify() +} + +func (tw *baseTimeoutWriter) hijack() (net.Conn, *bufio.ReadWriter, error) { + tw.mu.Lock() + defer tw.mu.Unlock() + + if tw.timedOut { + return nil, nil, http.ErrHandlerTimeout + } + conn, rw, err := tw.w.(http.Hijacker).Hijack() + if err == nil { + tw.hijacked = true + } + return conn, rw, err +} + +type closeTimeoutWriter struct { + *baseTimeoutWriter +} + +func (tw *closeTimeoutWriter) CloseNotify() <-chan bool { + return tw.closeNotify() +} + +type hijackTimeoutWriter struct { + *baseTimeoutWriter +} + +func (tw *hijackTimeoutWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return tw.hijack() +} + +type closeHijackTimeoutWriter struct { + *baseTimeoutWriter +} + +func (tw *closeHijackTimeoutWriter) CloseNotify() <-chan bool { + return tw.closeNotify() +} + +func (tw *closeHijackTimeoutWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return tw.hijack() +} diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/waitgroup.go b/vendor/k8s.io/apiserver/pkg/server/filters/waitgroup.go new file mode 100644 index 000000000..857ce1883 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/filters/waitgroup.go @@ -0,0 +1,61 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "errors" + "fmt" + "net/http" + + "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + utilwaitgroup "k8s.io/apimachinery/pkg/util/waitgroup" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + apirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/client-go/kubernetes/scheme" +) + +// WithWaitGroup adds all non long-running requests to wait group, which is used for graceful shutdown. +func WithWaitGroup(handler http.Handler, longRunning apirequest.LongRunningRequestCheck, wg *utilwaitgroup.SafeWaitGroup) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + requestInfo, ok := apirequest.RequestInfoFrom(ctx) + if !ok { + // if this happens, the handler chain isn't setup correctly because there is no request info + responsewriters.InternalError(w, req, errors.New("no RequestInfo found in the context")) + return + } + + if !longRunning(req, requestInfo) { + if err := wg.Add(1); err != nil { + // When apiserver is shutting down, signal clients to retry + // There is a good chance the client hit a different server, so a tight retry is good for client responsiveness. + w.Header().Add("Retry-After", "1") + w.Header().Set("Content-Type", runtime.ContentTypeJSON) + w.Header().Set("X-Content-Type-Options", "nosniff") + statusErr := apierrors.NewServiceUnavailable("apiserver is shutting down").Status() + w.WriteHeader(int(statusErr.Code)) + fmt.Fprintln(w, runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &statusErr)) + return + } + defer wg.Done() + } + + handler.ServeHTTP(w, req) + }) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/wrap.go b/vendor/k8s.io/apiserver/pkg/server/filters/wrap.go new file mode 100644 index 000000000..34c5398db --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/filters/wrap.go @@ -0,0 +1,72 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "fmt" + "net/http" + + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/endpoints/metrics" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/server/httplog" + "k8s.io/klog/v2" +) + +// WithPanicRecovery wraps an http Handler to recover and log panics (except in the special case of http.ErrAbortHandler panics, which suppress logging). +func WithPanicRecovery(handler http.Handler, resolver request.RequestInfoResolver) http.Handler { + return withPanicRecovery(handler, func(w http.ResponseWriter, req *http.Request, err interface{}) { + if err == http.ErrAbortHandler { + // Honor the http.ErrAbortHandler sentinel panic value + // + // If ServeHTTP panics, the server (the caller of ServeHTTP) assumes + // that the effect of the panic was isolated to the active request. + // It recovers the panic, logs a stack trace to the server error log, + // and either closes the network connection or sends an HTTP/2 + // RST_STREAM, depending on the HTTP protocol. To abort a handler so + // the client sees an interrupted response but the server doesn't log + // an error, panic with the value ErrAbortHandler. + // + // Note that the ReallyCrash variable controls the behaviour of the HandleCrash function + // So it might actually crash, after calling the handlers + if info, err := resolver.NewRequestInfo(req); err != nil { + metrics.RecordRequestAbort(req, nil) + } else { + metrics.RecordRequestAbort(req, info) + } + // This call can have different handlers, but the default chain rate limits. Call it after the metrics are updated + // in case the rate limit delays it. If you outrun the rate for this one timed out requests, something has gone + // seriously wrong with your server, but generally having a logging signal for timeouts is useful. + runtime.HandleError(fmt.Errorf("timeout or abort while handling: %v %q", req.Method, req.URL.Path)) + return + } + http.Error(w, "This request caused apiserver to panic. Look in the logs for details.", http.StatusInternalServerError) + klog.Errorf("apiserver panic'd on %v %v", req.Method, req.RequestURI) + }) +} + +func withPanicRecovery(handler http.Handler, crashHandler func(http.ResponseWriter, *http.Request, interface{})) http.Handler { + handler = httplog.WithLogging(handler, httplog.DefaultStacktracePred) + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + defer runtime.HandleCrash(func(err interface{}) { + crashHandler(w, req, err) + }) + + // Dispatch to the internal handler + handler.ServeHTTP(w, req) + }) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go b/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go new file mode 100644 index 000000000..d7d60b213 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go @@ -0,0 +1,632 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "fmt" + "net/http" + gpath "path" + "strings" + "sync" + "time" + + systemd "github.com/coreos/go-systemd/daemon" + "github.com/go-openapi/spec" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apimachinery/pkg/util/sets" + utilwaitgroup "k8s.io/apimachinery/pkg/util/waitgroup" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/authorization/authorizer" + genericapi "k8s.io/apiserver/pkg/endpoints" + "k8s.io/apiserver/pkg/endpoints/discovery" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/apiserver/pkg/server/healthz" + "k8s.io/apiserver/pkg/server/routes" + "k8s.io/apiserver/pkg/storageversion" + utilfeature "k8s.io/apiserver/pkg/util/feature" + utilopenapi "k8s.io/apiserver/pkg/util/openapi" + restclient "k8s.io/client-go/rest" + "k8s.io/klog/v2" + openapibuilder "k8s.io/kube-openapi/pkg/builder" + openapicommon "k8s.io/kube-openapi/pkg/common" + "k8s.io/kube-openapi/pkg/handler" + openapiutil "k8s.io/kube-openapi/pkg/util" + openapiproto "k8s.io/kube-openapi/pkg/util/proto" +) + +// Info about an API group. +type APIGroupInfo struct { + PrioritizedVersions []schema.GroupVersion + // Info about the resources in this group. It's a map from version to resource to the storage. + VersionedResourcesStorageMap map[string]map[string]rest.Storage + // OptionsExternalVersion controls the APIVersion used for common objects in the + // schema like api.Status, api.DeleteOptions, and metav1.ListOptions. Other implementors may + // define a version "v1beta1" but want to use the Kubernetes "v1" internal objects. + // If nil, defaults to groupMeta.GroupVersion. + // TODO: Remove this when https://github.com/kubernetes/kubernetes/issues/19018 is fixed. + OptionsExternalVersion *schema.GroupVersion + // MetaGroupVersion defaults to "meta.k8s.io/v1" and is the scheme group version used to decode + // common API implementations like ListOptions. Future changes will allow this to vary by group + // version (for when the inevitable meta/v2 group emerges). + MetaGroupVersion *schema.GroupVersion + + // Scheme includes all of the types used by this group and how to convert between them (or + // to convert objects from outside of this group that are accepted in this API). + // TODO: replace with interfaces + Scheme *runtime.Scheme + // NegotiatedSerializer controls how this group encodes and decodes data + NegotiatedSerializer runtime.NegotiatedSerializer + // ParameterCodec performs conversions for query parameters passed to API calls + ParameterCodec runtime.ParameterCodec + + // StaticOpenAPISpec is the spec derived from the definitions of all resources installed together. + // It is set during InstallAPIGroups, InstallAPIGroup, and InstallLegacyAPIGroup. + StaticOpenAPISpec *spec.Swagger +} + +// GenericAPIServer contains state for a Kubernetes cluster api server. +type GenericAPIServer struct { + // discoveryAddresses is used to build cluster IPs for discovery. + discoveryAddresses discovery.Addresses + + // LoopbackClientConfig is a config for a privileged loopback connection to the API server + LoopbackClientConfig *restclient.Config + + // minRequestTimeout is how short the request timeout can be. This is used to build the RESTHandler + minRequestTimeout time.Duration + + // ShutdownTimeout is the timeout used for server shutdown. This specifies the timeout before server + // gracefully shutdown returns. + ShutdownTimeout time.Duration + + // legacyAPIGroupPrefixes is used to set up URL parsing for authorization and for validating requests + // to InstallLegacyAPIGroup + legacyAPIGroupPrefixes sets.String + + // admissionControl is used to build the RESTStorage that backs an API Group. + admissionControl admission.Interface + + // SecureServingInfo holds configuration of the TLS server. + SecureServingInfo *SecureServingInfo + + // ExternalAddress is the address (hostname or IP and port) that should be used in + // external (public internet) URLs for this GenericAPIServer. + ExternalAddress string + + // Serializer controls how common API objects not in a group/version prefix are serialized for this server. + // Individual APIGroups may define their own serializers. + Serializer runtime.NegotiatedSerializer + + // "Outputs" + // Handler holds the handlers being used by this API server + Handler *APIServerHandler + + // listedPathProvider is a lister which provides the set of paths to show at / + listedPathProvider routes.ListedPathProvider + + // DiscoveryGroupManager serves /apis + DiscoveryGroupManager discovery.GroupManager + + // Enable swagger and/or OpenAPI if these configs are non-nil. + openAPIConfig *openapicommon.Config + + // OpenAPIVersionedService controls the /openapi/v2 endpoint, and can be used to update the served spec. + // It is set during PrepareRun. + OpenAPIVersionedService *handler.OpenAPIService + + // StaticOpenAPISpec is the spec derived from the restful container endpoints. + // It is set during PrepareRun. + StaticOpenAPISpec *spec.Swagger + + // PostStartHooks are each called after the server has started listening, in a separate go func for each + // with no guarantee of ordering between them. The map key is a name used for error reporting. + // It may kill the process with a panic if it wishes to by returning an error. + postStartHookLock sync.Mutex + postStartHooks map[string]postStartHookEntry + postStartHooksCalled bool + disabledPostStartHooks sets.String + + preShutdownHookLock sync.Mutex + preShutdownHooks map[string]preShutdownHookEntry + preShutdownHooksCalled bool + + // healthz checks + healthzLock sync.Mutex + healthzChecks []healthz.HealthChecker + healthzChecksInstalled bool + // livez checks + livezLock sync.Mutex + livezChecks []healthz.HealthChecker + livezChecksInstalled bool + // readyz checks + readyzLock sync.Mutex + readyzChecks []healthz.HealthChecker + readyzChecksInstalled bool + livezGracePeriod time.Duration + livezClock clock.Clock + // the readiness stop channel is used to signal that the apiserver has initiated a shutdown sequence, this + // will cause readyz to return unhealthy. + readinessStopCh chan struct{} + + // auditing. The backend is started after the server starts listening. + AuditBackend audit.Backend + + // Authorizer determines whether a user is allowed to make a certain request. The Handler does a preliminary + // authorization check using the request URI but it may be necessary to make additional checks, such as in + // the create-on-update case + Authorizer authorizer.Authorizer + + // EquivalentResourceRegistry provides information about resources equivalent to a given resource, + // and the kind associated with a given resource. As resources are installed, they are registered here. + EquivalentResourceRegistry runtime.EquivalentResourceRegistry + + // delegationTarget is the next delegate in the chain. This is never nil. + delegationTarget DelegationTarget + + // HandlerChainWaitGroup allows you to wait for all chain handlers finish after the server shutdown. + HandlerChainWaitGroup *utilwaitgroup.SafeWaitGroup + + // ShutdownDelayDuration allows to block shutdown for some time, e.g. until endpoints pointing to this API server + // have converged on all node. During this time, the API server keeps serving, /healthz will return 200, + // but /readyz will return failure. + ShutdownDelayDuration time.Duration + + // The limit on the request body size that would be accepted and decoded in a write request. + // 0 means no limit. + maxRequestBodyBytes int64 + + // APIServerID is the ID of this API server + APIServerID string + + // StorageVersionManager holds the storage versions of the API resources installed by this server. + StorageVersionManager storageversion.Manager +} + +// DelegationTarget is an interface which allows for composition of API servers with top level handling that works +// as expected. +type DelegationTarget interface { + // UnprotectedHandler returns a handler that is NOT protected by a normal chain + UnprotectedHandler() http.Handler + + // PostStartHooks returns the post-start hooks that need to be combined + PostStartHooks() map[string]postStartHookEntry + + // PreShutdownHooks returns the pre-stop hooks that need to be combined + PreShutdownHooks() map[string]preShutdownHookEntry + + // HealthzChecks returns the healthz checks that need to be combined + HealthzChecks() []healthz.HealthChecker + + // ListedPaths returns the paths for supporting an index + ListedPaths() []string + + // NextDelegate returns the next delegationTarget in the chain of delegations + NextDelegate() DelegationTarget + + // PrepareRun does post API installation setup steps. It calls recursively the same function of the delegates. + PrepareRun() preparedGenericAPIServer +} + +func (s *GenericAPIServer) UnprotectedHandler() http.Handler { + // when we delegate, we need the server we're delegating to choose whether or not to use gorestful + return s.Handler.Director +} +func (s *GenericAPIServer) PostStartHooks() map[string]postStartHookEntry { + return s.postStartHooks +} +func (s *GenericAPIServer) PreShutdownHooks() map[string]preShutdownHookEntry { + return s.preShutdownHooks +} +func (s *GenericAPIServer) HealthzChecks() []healthz.HealthChecker { + return s.healthzChecks +} +func (s *GenericAPIServer) ListedPaths() []string { + return s.listedPathProvider.ListedPaths() +} + +func (s *GenericAPIServer) NextDelegate() DelegationTarget { + return s.delegationTarget +} + +type emptyDelegate struct { +} + +func NewEmptyDelegate() DelegationTarget { + return emptyDelegate{} +} + +func (s emptyDelegate) UnprotectedHandler() http.Handler { + return nil +} +func (s emptyDelegate) PostStartHooks() map[string]postStartHookEntry { + return map[string]postStartHookEntry{} +} +func (s emptyDelegate) PreShutdownHooks() map[string]preShutdownHookEntry { + return map[string]preShutdownHookEntry{} +} +func (s emptyDelegate) HealthzChecks() []healthz.HealthChecker { + return []healthz.HealthChecker{} +} +func (s emptyDelegate) ListedPaths() []string { + return []string{} +} +func (s emptyDelegate) NextDelegate() DelegationTarget { + return nil +} +func (s emptyDelegate) PrepareRun() preparedGenericAPIServer { + return preparedGenericAPIServer{nil} +} + +// preparedGenericAPIServer is a private wrapper that enforces a call of PrepareRun() before Run can be invoked. +type preparedGenericAPIServer struct { + *GenericAPIServer +} + +// PrepareRun does post API installation setup steps. It calls recursively the same function of the delegates. +func (s *GenericAPIServer) PrepareRun() preparedGenericAPIServer { + s.delegationTarget.PrepareRun() + + if s.openAPIConfig != nil { + s.OpenAPIVersionedService, s.StaticOpenAPISpec = routes.OpenAPI{ + Config: s.openAPIConfig, + }.Install(s.Handler.GoRestfulContainer, s.Handler.NonGoRestfulMux) + } + + s.installHealthz() + s.installLivez() + err := s.addReadyzShutdownCheck(s.readinessStopCh) + if err != nil { + klog.Errorf("Failed to install readyz shutdown check %s", err) + } + s.installReadyz() + + // Register audit backend preShutdownHook. + if s.AuditBackend != nil { + err := s.AddPreShutdownHook("audit-backend", func() error { + s.AuditBackend.Shutdown() + return nil + }) + if err != nil { + klog.Errorf("Failed to add pre-shutdown hook for audit-backend %s", err) + } + } + + return preparedGenericAPIServer{s} +} + +// Run spawns the secure http server. It only returns if stopCh is closed +// or the secure port cannot be listened on initially. +func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error { + delayedStopCh := make(chan struct{}) + + go func() { + defer close(delayedStopCh) + + <-stopCh + + // As soon as shutdown is initiated, /readyz should start returning failure. + // This gives the load balancer a window defined by ShutdownDelayDuration to detect that /readyz is red + // and stop sending traffic to this server. + close(s.readinessStopCh) + + time.Sleep(s.ShutdownDelayDuration) + }() + + // close socket after delayed stopCh + stoppedCh, err := s.NonBlockingRun(delayedStopCh) + if err != nil { + return err + } + + <-stopCh + + // run shutdown hooks directly. This includes deregistering from the kubernetes endpoint in case of kube-apiserver. + err = s.RunPreShutdownHooks() + if err != nil { + return err + } + + // wait for the delayed stopCh before closing the handler chain (it rejects everything after Wait has been called). + <-delayedStopCh + // wait for stoppedCh that is closed when the graceful termination (server.Shutdown) is finished. + <-stoppedCh + + // Wait for all requests to finish, which are bounded by the RequestTimeout variable. + s.HandlerChainWaitGroup.Wait() + + return nil +} + +// NonBlockingRun spawns the secure http server. An error is +// returned if the secure port cannot be listened on. +// The returned channel is closed when the (asynchronous) termination is finished. +func (s preparedGenericAPIServer) NonBlockingRun(stopCh <-chan struct{}) (<-chan struct{}, error) { + // Use an stop channel to allow graceful shutdown without dropping audit events + // after http server shutdown. + auditStopCh := make(chan struct{}) + + // Start the audit backend before any request comes in. This means we must call Backend.Run + // before http server start serving. Otherwise the Backend.ProcessEvents call might block. + if s.AuditBackend != nil { + if err := s.AuditBackend.Run(auditStopCh); err != nil { + return nil, fmt.Errorf("failed to run the audit backend: %v", err) + } + } + + // Use an internal stop channel to allow cleanup of the listeners on error. + internalStopCh := make(chan struct{}) + var stoppedCh <-chan struct{} + if s.SecureServingInfo != nil && s.Handler != nil { + var err error + stoppedCh, err = s.SecureServingInfo.Serve(s.Handler, s.ShutdownTimeout, internalStopCh) + if err != nil { + close(internalStopCh) + close(auditStopCh) + return nil, err + } + } + + // Now that listener have bound successfully, it is the + // responsibility of the caller to close the provided channel to + // ensure cleanup. + go func() { + <-stopCh + close(internalStopCh) + if stoppedCh != nil { + <-stoppedCh + } + s.HandlerChainWaitGroup.Wait() + close(auditStopCh) + }() + + s.RunPostStartHooks(stopCh) + + if _, err := systemd.SdNotify(true, "READY=1\n"); err != nil { + klog.Errorf("Unable to send systemd daemon successful start message: %v\n", err) + } + + return stoppedCh, nil +} + +// installAPIResources is a private method for installing the REST storage backing each api groupversionresource +func (s *GenericAPIServer) installAPIResources(apiPrefix string, apiGroupInfo *APIGroupInfo, openAPIModels openapiproto.Models) error { + var resourceInfos []*storageversion.ResourceInfo + for _, groupVersion := range apiGroupInfo.PrioritizedVersions { + if len(apiGroupInfo.VersionedResourcesStorageMap[groupVersion.Version]) == 0 { + klog.Warningf("Skipping API %v because it has no resources.", groupVersion) + continue + } + + apiGroupVersion := s.getAPIGroupVersion(apiGroupInfo, groupVersion, apiPrefix) + if apiGroupInfo.OptionsExternalVersion != nil { + apiGroupVersion.OptionsExternalVersion = apiGroupInfo.OptionsExternalVersion + } + apiGroupVersion.OpenAPIModels = openAPIModels + + if openAPIModels != nil && utilfeature.DefaultFeatureGate.Enabled(features.ServerSideApply) { + typeConverter, err := fieldmanager.NewTypeConverter(openAPIModels, false) + if err != nil { + return err + } + apiGroupVersion.TypeConverter = typeConverter + } + + apiGroupVersion.MaxRequestBodyBytes = s.maxRequestBodyBytes + + r, err := apiGroupVersion.InstallREST(s.Handler.GoRestfulContainer) + if err != nil { + return fmt.Errorf("unable to setup API %v: %v", apiGroupInfo, err) + } + resourceInfos = append(resourceInfos, r...) + } + + if utilfeature.DefaultFeatureGate.Enabled(features.StorageVersionAPI) && + utilfeature.DefaultFeatureGate.Enabled(features.APIServerIdentity) { + // API installation happens before we start listening on the handlers, + // therefore it is safe to register ResourceInfos here. The handler will block + // write requests until the storage versions of the targeting resources are updated. + s.StorageVersionManager.AddResourceInfo(resourceInfos...) + } + + return nil +} + +func (s *GenericAPIServer) InstallLegacyAPIGroup(apiPrefix string, apiGroupInfo *APIGroupInfo) error { + if !s.legacyAPIGroupPrefixes.Has(apiPrefix) { + return fmt.Errorf("%q is not in the allowed legacy API prefixes: %v", apiPrefix, s.legacyAPIGroupPrefixes.List()) + } + + openAPIModels, err := s.getOpenAPIModels(apiPrefix, apiGroupInfo) + if err != nil { + return fmt.Errorf("unable to get openapi models: %v", err) + } + + if err := s.installAPIResources(apiPrefix, apiGroupInfo, openAPIModels); err != nil { + return err + } + + // Install the version handler. + // Add a handler at / to enumerate the supported api versions. + s.Handler.GoRestfulContainer.Add(discovery.NewLegacyRootAPIHandler(s.discoveryAddresses, s.Serializer, apiPrefix).WebService()) + + return nil +} + +// Exposes given api groups in the API. +func (s *GenericAPIServer) InstallAPIGroups(apiGroupInfos ...*APIGroupInfo) error { + for _, apiGroupInfo := range apiGroupInfos { + // Do not register empty group or empty version. Doing so claims /apis/ for the wrong entity to be returned. + // Catching these here places the error much closer to its origin + if len(apiGroupInfo.PrioritizedVersions[0].Group) == 0 { + return fmt.Errorf("cannot register handler with an empty group for %#v", *apiGroupInfo) + } + if len(apiGroupInfo.PrioritizedVersions[0].Version) == 0 { + return fmt.Errorf("cannot register handler with an empty version for %#v", *apiGroupInfo) + } + } + + openAPIModels, err := s.getOpenAPIModels(APIGroupPrefix, apiGroupInfos...) + if err != nil { + return fmt.Errorf("unable to get openapi models: %v", err) + } + + for _, apiGroupInfo := range apiGroupInfos { + if err := s.installAPIResources(APIGroupPrefix, apiGroupInfo, openAPIModels); err != nil { + return fmt.Errorf("unable to install api resources: %v", err) + } + + // setup discovery + // Install the version handler. + // Add a handler at /apis/ to enumerate all versions supported by this group. + apiVersionsForDiscovery := []metav1.GroupVersionForDiscovery{} + for _, groupVersion := range apiGroupInfo.PrioritizedVersions { + // Check the config to make sure that we elide versions that don't have any resources + if len(apiGroupInfo.VersionedResourcesStorageMap[groupVersion.Version]) == 0 { + continue + } + apiVersionsForDiscovery = append(apiVersionsForDiscovery, metav1.GroupVersionForDiscovery{ + GroupVersion: groupVersion.String(), + Version: groupVersion.Version, + }) + } + preferredVersionForDiscovery := metav1.GroupVersionForDiscovery{ + GroupVersion: apiGroupInfo.PrioritizedVersions[0].String(), + Version: apiGroupInfo.PrioritizedVersions[0].Version, + } + apiGroup := metav1.APIGroup{ + Name: apiGroupInfo.PrioritizedVersions[0].Group, + Versions: apiVersionsForDiscovery, + PreferredVersion: preferredVersionForDiscovery, + } + + s.DiscoveryGroupManager.AddGroup(apiGroup) + s.Handler.GoRestfulContainer.Add(discovery.NewAPIGroupHandler(s.Serializer, apiGroup).WebService()) + } + return nil +} + +// Exposes the given api group in the API. +func (s *GenericAPIServer) InstallAPIGroup(apiGroupInfo *APIGroupInfo) error { + return s.InstallAPIGroups(apiGroupInfo) +} + +func (s *GenericAPIServer) getAPIGroupVersion(apiGroupInfo *APIGroupInfo, groupVersion schema.GroupVersion, apiPrefix string) *genericapi.APIGroupVersion { + storage := make(map[string]rest.Storage) + for k, v := range apiGroupInfo.VersionedResourcesStorageMap[groupVersion.Version] { + storage[strings.ToLower(k)] = v + } + version := s.newAPIGroupVersion(apiGroupInfo, groupVersion) + version.Root = apiPrefix + version.Storage = storage + return version +} + +func (s *GenericAPIServer) newAPIGroupVersion(apiGroupInfo *APIGroupInfo, groupVersion schema.GroupVersion) *genericapi.APIGroupVersion { + return &genericapi.APIGroupVersion{ + GroupVersion: groupVersion, + MetaGroupVersion: apiGroupInfo.MetaGroupVersion, + + ParameterCodec: apiGroupInfo.ParameterCodec, + Serializer: apiGroupInfo.NegotiatedSerializer, + Creater: apiGroupInfo.Scheme, + Convertor: apiGroupInfo.Scheme, + UnsafeConvertor: runtime.UnsafeObjectConvertor(apiGroupInfo.Scheme), + Defaulter: apiGroupInfo.Scheme, + Typer: apiGroupInfo.Scheme, + Linker: runtime.SelfLinker(meta.NewAccessor()), + + EquivalentResourceRegistry: s.EquivalentResourceRegistry, + + Admit: s.admissionControl, + MinRequestTimeout: s.minRequestTimeout, + Authorizer: s.Authorizer, + } +} + +// NewDefaultAPIGroupInfo returns an APIGroupInfo stubbed with "normal" values +// exposed for easier composition from other packages +func NewDefaultAPIGroupInfo(group string, scheme *runtime.Scheme, parameterCodec runtime.ParameterCodec, codecs serializer.CodecFactory) APIGroupInfo { + return APIGroupInfo{ + PrioritizedVersions: scheme.PrioritizedVersionsForGroup(group), + VersionedResourcesStorageMap: map[string]map[string]rest.Storage{}, + // TODO unhardcode this. It was hardcoded before, but we need to re-evaluate + OptionsExternalVersion: &schema.GroupVersion{Version: "v1"}, + Scheme: scheme, + ParameterCodec: parameterCodec, + NegotiatedSerializer: codecs, + } +} + +// getOpenAPIModels is a private method for getting the OpenAPI models +func (s *GenericAPIServer) getOpenAPIModels(apiPrefix string, apiGroupInfos ...*APIGroupInfo) (openapiproto.Models, error) { + if s.openAPIConfig == nil { + return nil, nil + } + pathsToIgnore := openapiutil.NewTrie(s.openAPIConfig.IgnorePrefixes) + resourceNames := make([]string, 0) + for _, apiGroupInfo := range apiGroupInfos { + groupResources, err := getResourceNamesForGroup(apiPrefix, apiGroupInfo, pathsToIgnore) + if err != nil { + return nil, err + } + resourceNames = append(resourceNames, groupResources...) + } + + // Build the openapi definitions for those resources and convert it to proto models + openAPISpec, err := openapibuilder.BuildOpenAPIDefinitionsForResources(s.openAPIConfig, resourceNames...) + if err != nil { + return nil, err + } + for _, apiGroupInfo := range apiGroupInfos { + apiGroupInfo.StaticOpenAPISpec = openAPISpec + } + return utilopenapi.ToProtoModels(openAPISpec) +} + +// getResourceNamesForGroup is a private method for getting the canonical names for each resource to build in an api group +func getResourceNamesForGroup(apiPrefix string, apiGroupInfo *APIGroupInfo, pathsToIgnore openapiutil.Trie) ([]string, error) { + // Get the canonical names of every resource we need to build in this api group + resourceNames := make([]string, 0) + for _, groupVersion := range apiGroupInfo.PrioritizedVersions { + for resource, storage := range apiGroupInfo.VersionedResourcesStorageMap[groupVersion.Version] { + path := gpath.Join(apiPrefix, groupVersion.Group, groupVersion.Version, resource) + if !pathsToIgnore.HasPrefix(path) { + kind, err := genericapi.GetResourceKind(groupVersion, storage, apiGroupInfo.Scheme) + if err != nil { + return nil, err + } + sampleObject, err := apiGroupInfo.Scheme.New(kind) + if err != nil { + return nil, err + } + name := openapiutil.GetCanonicalTypeName(sampleObject) + resourceNames = append(resourceNames, name) + } + } + } + + return resourceNames, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/handler.go b/vendor/k8s.io/apiserver/pkg/server/handler.go new file mode 100644 index 000000000..85d8af1ce --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/handler.go @@ -0,0 +1,190 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "bytes" + "fmt" + "net/http" + rt "runtime" + "sort" + "strings" + + "github.com/emicklei/go-restful" + "k8s.io/klog/v2" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + "k8s.io/apiserver/pkg/server/mux" +) + +// APIServerHandlers holds the different http.Handlers used by the API server. +// This includes the full handler chain, the director (which chooses between gorestful and nonGoRestful, +// the gorestful handler (used for the API) which falls through to the nonGoRestful handler on unregistered paths, +// and the nonGoRestful handler (which can contain a fallthrough of its own) +// FullHandlerChain -> Director -> {GoRestfulContainer,NonGoRestfulMux} based on inspection of registered web services +type APIServerHandler struct { + // FullHandlerChain is the one that is eventually served with. It should include the full filter + // chain and then call the Director. + FullHandlerChain http.Handler + // The registered APIs. InstallAPIs uses this. Other servers probably shouldn't access this directly. + GoRestfulContainer *restful.Container + // NonGoRestfulMux is the final HTTP handler in the chain. + // It comes after all filters and the API handling + // This is where other servers can attach handler to various parts of the chain. + NonGoRestfulMux *mux.PathRecorderMux + + // Director is here so that we can properly handle fall through and proxy cases. + // This looks a bit bonkers, but here's what's happening. We need to have /apis handling registered in gorestful in order to have + // swagger generated for compatibility. Doing that with `/apis` as a webservice, means that it forcibly 404s (no defaulting allowed) + // all requests which are not /apis or /apis/. We need those calls to fall through behind goresful for proper delegation. Trying to + // register for a pattern which includes everything behind it doesn't work because gorestful negotiates for verbs and content encoding + // and all those things go crazy when gorestful really just needs to pass through. In addition, openapi enforces unique verb constraints + // which we don't fit into and it still muddies up swagger. Trying to switch the webservices into a route doesn't work because the + // containing webservice faces all the same problems listed above. + // This leads to the crazy thing done here. Our mux does what we need, so we'll place it in front of gorestful. It will introspect to + // decide if the route is likely to be handled by goresful and route there if needed. Otherwise, it goes to PostGoRestful mux in + // order to handle "normal" paths and delegation. Hopefully no API consumers will ever have to deal with this level of detail. I think + // we should consider completely removing gorestful. + // Other servers should only use this opaquely to delegate to an API server. + Director http.Handler +} + +// HandlerChainBuilderFn is used to wrap the GoRestfulContainer handler using the provided handler chain. +// It is normally used to apply filtering like authentication and authorization +type HandlerChainBuilderFn func(apiHandler http.Handler) http.Handler + +func NewAPIServerHandler(name string, s runtime.NegotiatedSerializer, handlerChainBuilder HandlerChainBuilderFn, notFoundHandler http.Handler) *APIServerHandler { + nonGoRestfulMux := mux.NewPathRecorderMux(name) + if notFoundHandler != nil { + nonGoRestfulMux.NotFoundHandler(notFoundHandler) + } + + gorestfulContainer := restful.NewContainer() + gorestfulContainer.ServeMux = http.NewServeMux() + gorestfulContainer.Router(restful.CurlyRouter{}) // e.g. for proxy/{kind}/{name}/{*} + gorestfulContainer.RecoverHandler(func(panicReason interface{}, httpWriter http.ResponseWriter) { + logStackOnRecover(s, panicReason, httpWriter) + }) + gorestfulContainer.ServiceErrorHandler(func(serviceErr restful.ServiceError, request *restful.Request, response *restful.Response) { + serviceErrorHandler(s, serviceErr, request, response) + }) + + director := director{ + name: name, + goRestfulContainer: gorestfulContainer, + nonGoRestfulMux: nonGoRestfulMux, + } + + return &APIServerHandler{ + FullHandlerChain: handlerChainBuilder(director), + GoRestfulContainer: gorestfulContainer, + NonGoRestfulMux: nonGoRestfulMux, + Director: director, + } +} + +// ListedPaths returns the paths that should be shown under / +func (a *APIServerHandler) ListedPaths() []string { + var handledPaths []string + // Extract the paths handled using restful.WebService + for _, ws := range a.GoRestfulContainer.RegisteredWebServices() { + handledPaths = append(handledPaths, ws.RootPath()) + } + handledPaths = append(handledPaths, a.NonGoRestfulMux.ListedPaths()...) + sort.Strings(handledPaths) + + return handledPaths +} + +type director struct { + name string + goRestfulContainer *restful.Container + nonGoRestfulMux *mux.PathRecorderMux +} + +func (d director) ServeHTTP(w http.ResponseWriter, req *http.Request) { + path := req.URL.Path + + // check to see if our webservices want to claim this path + for _, ws := range d.goRestfulContainer.RegisteredWebServices() { + switch { + case ws.RootPath() == "/apis": + // if we are exactly /apis or /apis/, then we need special handling in loop. + // normally these are passed to the nonGoRestfulMux, but if discovery is enabled, it will go directly. + // We can't rely on a prefix match since /apis matches everything (see the big comment on Director above) + if path == "/apis" || path == "/apis/" { + klog.V(5).Infof("%v: %v %q satisfied by gorestful with webservice %v", d.name, req.Method, path, ws.RootPath()) + // don't use servemux here because gorestful servemuxes get messed up when removing webservices + // TODO fix gorestful, remove TPRs, or stop using gorestful + d.goRestfulContainer.Dispatch(w, req) + return + } + + case strings.HasPrefix(path, ws.RootPath()): + // ensure an exact match or a path boundary match + if len(path) == len(ws.RootPath()) || path[len(ws.RootPath())] == '/' { + klog.V(5).Infof("%v: %v %q satisfied by gorestful with webservice %v", d.name, req.Method, path, ws.RootPath()) + // don't use servemux here because gorestful servemuxes get messed up when removing webservices + // TODO fix gorestful, remove TPRs, or stop using gorestful + d.goRestfulContainer.Dispatch(w, req) + return + } + } + } + + // if we didn't find a match, then we just skip gorestful altogether + klog.V(5).Infof("%v: %v %q satisfied by nonGoRestful", d.name, req.Method, path) + d.nonGoRestfulMux.ServeHTTP(w, req) +} + +//TODO: Unify with RecoverPanics? +func logStackOnRecover(s runtime.NegotiatedSerializer, panicReason interface{}, w http.ResponseWriter) { + var buffer bytes.Buffer + buffer.WriteString(fmt.Sprintf("recover from panic situation: - %v\r\n", panicReason)) + for i := 2; ; i++ { + _, file, line, ok := rt.Caller(i) + if !ok { + break + } + buffer.WriteString(fmt.Sprintf(" %s:%d\r\n", file, line)) + } + klog.Errorln(buffer.String()) + + headers := http.Header{} + if ct := w.Header().Get("Content-Type"); len(ct) > 0 { + headers.Set("Accept", ct) + } + responsewriters.ErrorNegotiated(apierrors.NewGenericServerResponse(http.StatusInternalServerError, "", schema.GroupResource{}, "", "", 0, false), s, schema.GroupVersion{}, w, &http.Request{Header: headers}) +} + +func serviceErrorHandler(s runtime.NegotiatedSerializer, serviceErr restful.ServiceError, request *restful.Request, resp *restful.Response) { + responsewriters.ErrorNegotiated( + apierrors.NewGenericServerResponse(serviceErr.Code, "", schema.GroupResource{}, "", serviceErr.Message, 0, false), + s, + schema.GroupVersion{}, + resp, + request.Request, + ) +} + +// ServeHTTP makes it an http.Handler +func (a *APIServerHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + a.FullHandlerChain.ServeHTTP(w, r) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/healthz.go b/vendor/k8s.io/apiserver/pkg/server/healthz.go new file mode 100644 index 000000000..645886949 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/healthz.go @@ -0,0 +1,159 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "fmt" + "net/http" + "time" + + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apiserver/pkg/server/healthz" +) + +// AddHealthChecks adds HealthCheck(s) to health endpoints (healthz, livez, readyz) but +// configures the liveness grace period to be zero, which means we expect this health check +// to immediately indicate that the apiserver is unhealthy. +func (s *GenericAPIServer) AddHealthChecks(checks ...healthz.HealthChecker) error { + // we opt for a delay of zero here, because this entrypoint adds generic health checks + // and not health checks which are specifically related to kube-apiserver boot-sequences. + return s.addHealthChecks(0, checks...) +} + +// AddBootSequenceHealthChecks adds health checks to the old healthz endpoint (for backwards compatibility reasons) +// as well as livez and readyz. The livez grace period is defined by the value of the +// command-line flag --livez-grace-period; before the grace period elapses, the livez health checks +// will default to healthy. One may want to set a grace period in order to prevent the kubelet from restarting +// the kube-apiserver due to long-ish boot sequences. Readyz health checks, on the other hand, have no grace period, +// since readyz should fail until boot fully completes. +func (s *GenericAPIServer) AddBootSequenceHealthChecks(checks ...healthz.HealthChecker) error { + return s.addHealthChecks(s.livezGracePeriod, checks...) +} + +// addHealthChecks adds health checks to healthz, livez, and readyz. The delay passed in will set +// a corresponding grace period on livez. +func (s *GenericAPIServer) addHealthChecks(livezGracePeriod time.Duration, checks ...healthz.HealthChecker) error { + s.healthzLock.Lock() + defer s.healthzLock.Unlock() + if s.healthzChecksInstalled { + return fmt.Errorf("unable to add because the healthz endpoint has already been created") + } + s.healthzChecks = append(s.healthzChecks, checks...) + return s.addLivezChecks(livezGracePeriod, checks...) +} + +// addReadyzChecks allows you to add a HealthCheck to readyz. +func (s *GenericAPIServer) addReadyzChecks(checks ...healthz.HealthChecker) error { + s.readyzLock.Lock() + defer s.readyzLock.Unlock() + if s.readyzChecksInstalled { + return fmt.Errorf("unable to add because the readyz endpoint has already been created") + } + s.readyzChecks = append(s.readyzChecks, checks...) + return nil +} + +// addLivezChecks allows you to add a HealthCheck to livez. It will also automatically add a check to readyz, +// since we want to avoid being ready when we are not live. +func (s *GenericAPIServer) addLivezChecks(delay time.Duration, checks ...healthz.HealthChecker) error { + s.livezLock.Lock() + defer s.livezLock.Unlock() + if s.livezChecksInstalled { + return fmt.Errorf("unable to add because the livez endpoint has already been created") + } + for _, check := range checks { + s.livezChecks = append(s.livezChecks, delayedHealthCheck(check, s.livezClock, delay)) + } + return s.addReadyzChecks(checks...) +} + +// addReadyzShutdownCheck is a convenience function for adding a readyz shutdown check, so +// that we can register that the api-server is no longer ready while we attempt to gracefully +// shutdown. +func (s *GenericAPIServer) addReadyzShutdownCheck(stopCh <-chan struct{}) error { + return s.addReadyzChecks(shutdownCheck{stopCh}) +} + +// installHealthz creates the healthz endpoint for this server +func (s *GenericAPIServer) installHealthz() { + s.healthzLock.Lock() + defer s.healthzLock.Unlock() + s.healthzChecksInstalled = true + healthz.InstallHandler(s.Handler.NonGoRestfulMux, s.healthzChecks...) +} + +// installReadyz creates the readyz endpoint for this server. +func (s *GenericAPIServer) installReadyz() { + s.readyzLock.Lock() + defer s.readyzLock.Unlock() + s.readyzChecksInstalled = true + healthz.InstallReadyzHandler(s.Handler.NonGoRestfulMux, s.readyzChecks...) +} + +// installLivez creates the livez endpoint for this server. +func (s *GenericAPIServer) installLivez() { + s.livezLock.Lock() + defer s.livezLock.Unlock() + s.livezChecksInstalled = true + healthz.InstallLivezHandler(s.Handler.NonGoRestfulMux, s.livezChecks...) +} + +// shutdownCheck fails if the embedded channel is closed. This is intended to allow for graceful shutdown sequences +// for the apiserver. +type shutdownCheck struct { + StopCh <-chan struct{} +} + +func (shutdownCheck) Name() string { + return "shutdown" +} + +func (c shutdownCheck) Check(req *http.Request) error { + select { + case <-c.StopCh: + return fmt.Errorf("process is shutting down") + default: + } + return nil +} + +// delayedHealthCheck wraps a health check which will not fail until the explicitly defined delay has elapsed. This +// is intended for use primarily for livez health checks. +func delayedHealthCheck(check healthz.HealthChecker, clock clock.Clock, delay time.Duration) healthz.HealthChecker { + return delayedLivezCheck{ + check, + clock.Now().Add(delay), + clock, + } +} + +type delayedLivezCheck struct { + check healthz.HealthChecker + startCheck time.Time + clock clock.Clock +} + +func (c delayedLivezCheck) Name() string { + return c.check.Name() +} + +func (c delayedLivezCheck) Check(req *http.Request) error { + if c.clock.Now().After(c.startCheck) { + return c.check.Check(req) + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go b/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go index b2d0007f5..e80b8501e 100644 --- a/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go +++ b/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go @@ -161,6 +161,7 @@ func InstallPathHandler(mux mux, path string, checks ...HealthChecker) { klog.V(5).Infof("Installing health checkers for (%v): %v", path, formatQuoted(checkerNames(checks...)...)) + name := strings.Split(strings.TrimPrefix(path, "/"), "/")[0] mux.Handle(path, metrics.InstrumentHandlerFunc("GET", /* group = */ "", @@ -171,7 +172,7 @@ func InstallPathHandler(mux mux, path string, checks ...HealthChecker) { /* component = */ "", /* deprecated */ false, /* removedRelease */ "", - handleRootHealthz(checks...))) + handleRootHealth(name, checks...))) for _, check := range checks { mux.Handle(fmt.Sprintf("%s/%v", path, check.Name()), adaptCheckToHandler(check.Check)) } @@ -207,8 +208,8 @@ func getExcludedChecks(r *http.Request) sets.String { return sets.NewString() } -// handleRootHealthz returns an http.HandlerFunc that serves the provided checks. -func handleRootHealthz(checks ...HealthChecker) http.HandlerFunc { +// handleRootHealth returns an http.HandlerFunc that serves the provided checks. +func handleRootHealth(name string, checks ...HealthChecker) http.HandlerFunc { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { excluded := getExcludedChecks(r) // failedVerboseLogOutput is for output to the log. It indicates detailed failed output information for the log. @@ -240,8 +241,8 @@ func handleRootHealthz(checks ...HealthChecker) http.HandlerFunc { } // always be verbose on failure if len(failedChecks) > 0 { - klog.V(2).Infof("healthz check failed: %s\n%v", strings.Join(failedChecks, ","), failedVerboseLogOutput.String()) - http.Error(httplog.Unlogged(r, w), fmt.Sprintf("%shealthz check failed", individualCheckOutput.String()), http.StatusInternalServerError) + klog.V(2).Infof("%s check failed: %s\n%v", strings.Join(failedChecks, ","), name, failedVerboseLogOutput.String()) + http.Error(httplog.Unlogged(r, w), fmt.Sprintf("%s%s check failed", individualCheckOutput.String(), name), http.StatusInternalServerError) return } @@ -253,7 +254,7 @@ func handleRootHealthz(checks ...HealthChecker) http.HandlerFunc { } individualCheckOutput.WriteTo(w) - fmt.Fprint(w, "healthz check passed\n") + fmt.Fprintf(w, "%s check passed\n", name) }) } diff --git a/vendor/k8s.io/apiserver/pkg/server/hooks.go b/vendor/k8s.io/apiserver/pkg/server/hooks.go new file mode 100644 index 000000000..999ad3600 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/hooks.go @@ -0,0 +1,244 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "errors" + "fmt" + "net/http" + "runtime/debug" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/server/healthz" + restclient "k8s.io/client-go/rest" + "k8s.io/klog/v2" +) + +// PostStartHookFunc is a function that is called after the server has started. +// It must properly handle cases like: +// 1. asynchronous start in multiple API server processes +// 2. conflicts between the different processes all trying to perform the same action +// 3. partially complete work (API server crashes while running your hook) +// 4. API server access **BEFORE** your hook has completed +// Think of it like a mini-controller that is super privileged and gets to run in-process +// If you use this feature, tag @deads2k on github who has promised to review code for anyone's PostStartHook +// until it becomes easier to use. +type PostStartHookFunc func(context PostStartHookContext) error + +// PreShutdownHookFunc is a function that can be added to the shutdown logic. +type PreShutdownHookFunc func() error + +// PostStartHookContext provides information about this API server to a PostStartHookFunc +type PostStartHookContext struct { + // LoopbackClientConfig is a config for a privileged loopback connection to the API server + LoopbackClientConfig *restclient.Config + // StopCh is the channel that will be closed when the server stops + StopCh <-chan struct{} +} + +// PostStartHookProvider is an interface in addition to provide a post start hook for the api server +type PostStartHookProvider interface { + PostStartHook() (string, PostStartHookFunc, error) +} + +type postStartHookEntry struct { + hook PostStartHookFunc + // originatingStack holds the stack that registered postStartHooks. This allows us to show a more helpful message + // for duplicate registration. + originatingStack string + + // done will be closed when the postHook is finished + done chan struct{} +} + +type PostStartHookConfigEntry struct { + hook PostStartHookFunc + // originatingStack holds the stack that registered postStartHooks. This allows us to show a more helpful message + // for duplicate registration. + originatingStack string +} + +type preShutdownHookEntry struct { + hook PreShutdownHookFunc +} + +// AddPostStartHook allows you to add a PostStartHook. +func (s *GenericAPIServer) AddPostStartHook(name string, hook PostStartHookFunc) error { + if len(name) == 0 { + return fmt.Errorf("missing name") + } + if hook == nil { + return fmt.Errorf("hook func may not be nil: %q", name) + } + if s.disabledPostStartHooks.Has(name) { + klog.V(1).Infof("skipping %q because it was explicitly disabled", name) + return nil + } + + s.postStartHookLock.Lock() + defer s.postStartHookLock.Unlock() + + if s.postStartHooksCalled { + return fmt.Errorf("unable to add %q because PostStartHooks have already been called", name) + } + if postStartHook, exists := s.postStartHooks[name]; exists { + // this is programmer error, but it can be hard to debug + return fmt.Errorf("unable to add %q because it was already registered by: %s", name, postStartHook.originatingStack) + } + + // done is closed when the poststarthook is finished. This is used by the health check to be able to indicate + // that the poststarthook is finished + done := make(chan struct{}) + if err := s.AddBootSequenceHealthChecks(postStartHookHealthz{name: "poststarthook/" + name, done: done}); err != nil { + return err + } + s.postStartHooks[name] = postStartHookEntry{hook: hook, originatingStack: string(debug.Stack()), done: done} + + return nil +} + +// AddPostStartHookOrDie allows you to add a PostStartHook, but dies on failure +func (s *GenericAPIServer) AddPostStartHookOrDie(name string, hook PostStartHookFunc) { + if err := s.AddPostStartHook(name, hook); err != nil { + klog.Fatalf("Error registering PostStartHook %q: %v", name, err) + } +} + +// AddPreShutdownHook allows you to add a PreShutdownHook. +func (s *GenericAPIServer) AddPreShutdownHook(name string, hook PreShutdownHookFunc) error { + if len(name) == 0 { + return fmt.Errorf("missing name") + } + if hook == nil { + return nil + } + + s.preShutdownHookLock.Lock() + defer s.preShutdownHookLock.Unlock() + + if s.preShutdownHooksCalled { + return fmt.Errorf("unable to add %q because PreShutdownHooks have already been called", name) + } + if _, exists := s.preShutdownHooks[name]; exists { + return fmt.Errorf("unable to add %q because it is already registered", name) + } + + s.preShutdownHooks[name] = preShutdownHookEntry{hook: hook} + + return nil +} + +// AddPreShutdownHookOrDie allows you to add a PostStartHook, but dies on failure +func (s *GenericAPIServer) AddPreShutdownHookOrDie(name string, hook PreShutdownHookFunc) { + if err := s.AddPreShutdownHook(name, hook); err != nil { + klog.Fatalf("Error registering PreShutdownHook %q: %v", name, err) + } +} + +// RunPostStartHooks runs the PostStartHooks for the server +func (s *GenericAPIServer) RunPostStartHooks(stopCh <-chan struct{}) { + s.postStartHookLock.Lock() + defer s.postStartHookLock.Unlock() + s.postStartHooksCalled = true + + context := PostStartHookContext{ + LoopbackClientConfig: s.LoopbackClientConfig, + StopCh: stopCh, + } + + for hookName, hookEntry := range s.postStartHooks { + go runPostStartHook(hookName, hookEntry, context) + } +} + +// RunPreShutdownHooks runs the PreShutdownHooks for the server +func (s *GenericAPIServer) RunPreShutdownHooks() error { + var errorList []error + + s.preShutdownHookLock.Lock() + defer s.preShutdownHookLock.Unlock() + s.preShutdownHooksCalled = true + + for hookName, hookEntry := range s.preShutdownHooks { + if err := runPreShutdownHook(hookName, hookEntry); err != nil { + errorList = append(errorList, err) + } + } + return utilerrors.NewAggregate(errorList) +} + +// isPostStartHookRegistered checks whether a given PostStartHook is registered +func (s *GenericAPIServer) isPostStartHookRegistered(name string) bool { + s.postStartHookLock.Lock() + defer s.postStartHookLock.Unlock() + _, exists := s.postStartHooks[name] + return exists +} + +func runPostStartHook(name string, entry postStartHookEntry, context PostStartHookContext) { + var err error + func() { + // don't let the hook *accidentally* panic and kill the server + defer utilruntime.HandleCrash() + err = entry.hook(context) + }() + // if the hook intentionally wants to kill server, let it. + if err != nil { + klog.Fatalf("PostStartHook %q failed: %v", name, err) + } + close(entry.done) +} + +func runPreShutdownHook(name string, entry preShutdownHookEntry) error { + var err error + func() { + // don't let the hook *accidentally* panic and kill the server + defer utilruntime.HandleCrash() + err = entry.hook() + }() + if err != nil { + return fmt.Errorf("PreShutdownHook %q failed: %v", name, err) + } + return nil +} + +// postStartHookHealthz implements a healthz check for poststarthooks. It will return a "hookNotFinished" +// error until the poststarthook is finished. +type postStartHookHealthz struct { + name string + + // done will be closed when the postStartHook is finished + done chan struct{} +} + +var _ healthz.HealthChecker = postStartHookHealthz{} + +func (h postStartHookHealthz) Name() string { + return h.name +} + +var hookNotFinished = errors.New("not finished") + +func (h postStartHookHealthz) Check(req *http.Request) error { + select { + case <-h.done: + return nil + default: + return hookNotFinished + } +} diff --git a/vendor/k8s.io/apiserver/pkg/server/mux/OWNERS b/vendor/k8s.io/apiserver/pkg/server/mux/OWNERS new file mode 100644 index 000000000..4da107c8c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/mux/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- sttts diff --git a/vendor/k8s.io/apiserver/pkg/server/mux/doc.go b/vendor/k8s.io/apiserver/pkg/server/mux/doc.go new file mode 100644 index 000000000..178aa9fe6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/mux/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package mux contains abstractions for http multiplexing of APIs. +package mux // import "k8s.io/apiserver/pkg/server/mux" diff --git a/vendor/k8s.io/apiserver/pkg/server/mux/pathrecorder.go b/vendor/k8s.io/apiserver/pkg/server/mux/pathrecorder.go new file mode 100644 index 000000000..cb4941f02 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/mux/pathrecorder.go @@ -0,0 +1,278 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mux + +import ( + "fmt" + "net/http" + "runtime/debug" + "sort" + "strings" + "sync" + "sync/atomic" + + "k8s.io/klog/v2" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" +) + +// PathRecorderMux wraps a mux object and records the registered exposedPaths. +type PathRecorderMux struct { + // name is used for logging so you can trace requests through + name string + + lock sync.Mutex + notFoundHandler http.Handler + pathToHandler map[string]http.Handler + prefixToHandler map[string]http.Handler + + // mux stores a pathHandler and is used to handle the actual serving. + // Turns out, we want to accept trailing slashes, BUT we don't care about handling + // everything under them. This does exactly matches only unless its explicitly requested to + // do something different + mux atomic.Value + + // exposedPaths is the list of paths that should be shown at / + exposedPaths []string + + // pathStacks holds the stacks of all registered paths. This allows us to show a more helpful message + // before the "http: multiple registrations for %s" panic. + pathStacks map[string]string +} + +// pathHandler is an http.Handler that will satisfy requests first by exact match, then by prefix, +// then by notFoundHandler +type pathHandler struct { + // muxName is used for logging so you can trace requests through + muxName string + + // pathToHandler is a map of exactly matching request to its handler + pathToHandler map[string]http.Handler + + // this has to be sorted by most slashes then by length + prefixHandlers []prefixHandler + + // notFoundHandler is the handler to use for satisfying requests with no other match + notFoundHandler http.Handler +} + +// prefixHandler holds the prefix it should match and the handler to use +type prefixHandler struct { + // prefix is the prefix to test for a request match + prefix string + // handler is used to satisfy matching requests + handler http.Handler +} + +// NewPathRecorderMux creates a new PathRecorderMux +func NewPathRecorderMux(name string) *PathRecorderMux { + ret := &PathRecorderMux{ + name: name, + pathToHandler: map[string]http.Handler{}, + prefixToHandler: map[string]http.Handler{}, + mux: atomic.Value{}, + exposedPaths: []string{}, + pathStacks: map[string]string{}, + } + + ret.mux.Store(&pathHandler{notFoundHandler: http.NotFoundHandler()}) + return ret +} + +// ListedPaths returns the registered handler exposedPaths. +func (m *PathRecorderMux) ListedPaths() []string { + handledPaths := append([]string{}, m.exposedPaths...) + sort.Strings(handledPaths) + + return handledPaths +} + +func (m *PathRecorderMux) trackCallers(path string) { + if existingStack, ok := m.pathStacks[path]; ok { + utilruntime.HandleError(fmt.Errorf("registered %q from %v", path, existingStack)) + } + m.pathStacks[path] = string(debug.Stack()) +} + +// refreshMuxLocked creates a new mux and must be called while locked. Otherwise the view of handlers may +// not be consistent +func (m *PathRecorderMux) refreshMuxLocked() { + newMux := &pathHandler{ + muxName: m.name, + pathToHandler: map[string]http.Handler{}, + prefixHandlers: []prefixHandler{}, + notFoundHandler: http.NotFoundHandler(), + } + if m.notFoundHandler != nil { + newMux.notFoundHandler = m.notFoundHandler + } + for path, handler := range m.pathToHandler { + newMux.pathToHandler[path] = handler + } + + keys := sets.StringKeySet(m.prefixToHandler).List() + sort.Sort(sort.Reverse(byPrefixPriority(keys))) + for _, prefix := range keys { + newMux.prefixHandlers = append(newMux.prefixHandlers, prefixHandler{ + prefix: prefix, + handler: m.prefixToHandler[prefix], + }) + } + + m.mux.Store(newMux) +} + +// NotFoundHandler sets the handler to use if there's no match for a give path +func (m *PathRecorderMux) NotFoundHandler(notFoundHandler http.Handler) { + m.lock.Lock() + defer m.lock.Unlock() + + m.notFoundHandler = notFoundHandler + + m.refreshMuxLocked() +} + +// Unregister removes a path from the mux. +func (m *PathRecorderMux) Unregister(path string) { + m.lock.Lock() + defer m.lock.Unlock() + + delete(m.pathToHandler, path) + delete(m.prefixToHandler, path) + delete(m.pathStacks, path) + for i := range m.exposedPaths { + if m.exposedPaths[i] == path { + m.exposedPaths = append(m.exposedPaths[:i], m.exposedPaths[i+1:]...) + break + } + } + + m.refreshMuxLocked() +} + +// Handle registers the handler for the given pattern. +// If a handler already exists for pattern, Handle panics. +func (m *PathRecorderMux) Handle(path string, handler http.Handler) { + m.lock.Lock() + defer m.lock.Unlock() + m.trackCallers(path) + + m.exposedPaths = append(m.exposedPaths, path) + m.pathToHandler[path] = handler + m.refreshMuxLocked() +} + +// HandleFunc registers the handler function for the given pattern. +// If a handler already exists for pattern, Handle panics. +func (m *PathRecorderMux) HandleFunc(path string, handler func(http.ResponseWriter, *http.Request)) { + m.Handle(path, http.HandlerFunc(handler)) +} + +// UnlistedHandle registers the handler for the given pattern, but doesn't list it. +// If a handler already exists for pattern, Handle panics. +func (m *PathRecorderMux) UnlistedHandle(path string, handler http.Handler) { + m.lock.Lock() + defer m.lock.Unlock() + m.trackCallers(path) + + m.pathToHandler[path] = handler + m.refreshMuxLocked() +} + +// UnlistedHandleFunc registers the handler function for the given pattern, but doesn't list it. +// If a handler already exists for pattern, Handle panics. +func (m *PathRecorderMux) UnlistedHandleFunc(path string, handler func(http.ResponseWriter, *http.Request)) { + m.UnlistedHandle(path, http.HandlerFunc(handler)) +} + +// HandlePrefix is like Handle, but matches for anything under the path. Like a standard golang trailing slash. +func (m *PathRecorderMux) HandlePrefix(path string, handler http.Handler) { + if !strings.HasSuffix(path, "/") { + panic(fmt.Sprintf("%q must end in a trailing slash", path)) + } + + m.lock.Lock() + defer m.lock.Unlock() + m.trackCallers(path) + + m.exposedPaths = append(m.exposedPaths, path) + m.prefixToHandler[path] = handler + m.refreshMuxLocked() +} + +// UnlistedHandlePrefix is like UnlistedHandle, but matches for anything under the path. Like a standard golang trailing slash. +func (m *PathRecorderMux) UnlistedHandlePrefix(path string, handler http.Handler) { + if !strings.HasSuffix(path, "/") { + panic(fmt.Sprintf("%q must end in a trailing slash", path)) + } + + m.lock.Lock() + defer m.lock.Unlock() + m.trackCallers(path) + + m.prefixToHandler[path] = handler + m.refreshMuxLocked() +} + +// ServeHTTP makes it an http.Handler +func (m *PathRecorderMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + m.mux.Load().(*pathHandler).ServeHTTP(w, r) +} + +// ServeHTTP makes it an http.Handler +func (h *pathHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if exactHandler, ok := h.pathToHandler[r.URL.Path]; ok { + klog.V(5).Infof("%v: %q satisfied by exact match", h.muxName, r.URL.Path) + exactHandler.ServeHTTP(w, r) + return + } + + for _, prefixHandler := range h.prefixHandlers { + if strings.HasPrefix(r.URL.Path, prefixHandler.prefix) { + klog.V(5).Infof("%v: %q satisfied by prefix %v", h.muxName, r.URL.Path, prefixHandler.prefix) + prefixHandler.handler.ServeHTTP(w, r) + return + } + } + + klog.V(5).Infof("%v: %q satisfied by NotFoundHandler", h.muxName, r.URL.Path) + h.notFoundHandler.ServeHTTP(w, r) +} + +// byPrefixPriority sorts url prefixes by the order in which they should be tested by the mux +// this has to be sorted by most slashes then by length so that we can iterate straight +// through to match the "best" one first. +type byPrefixPriority []string + +func (s byPrefixPriority) Len() int { return len(s) } +func (s byPrefixPriority) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byPrefixPriority) Less(i, j int) bool { + lhsNumParts := strings.Count(s[i], "/") + rhsNumParts := strings.Count(s[j], "/") + if lhsNumParts != rhsNumParts { + return lhsNumParts < rhsNumParts + } + + lhsLen := len(s[i]) + rhsLen := len(s[j]) + if lhsLen != rhsLen { + return lhsLen < rhsLen + } + + return strings.Compare(s[i], s[j]) < 0 +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/OWNERS b/vendor/k8s.io/apiserver/pkg/server/options/OWNERS new file mode 100644 index 000000000..0e84109d7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/OWNERS @@ -0,0 +1,16 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- smarterclayton +- wojtek-t +- deads2k +- liggitt +- nikhiljindal +- sttts +- jlowdermilk +- soltysh +- dims +- cjcullen +- ping035627 +- xiangpengzhao +- enj diff --git a/vendor/k8s.io/apiserver/pkg/server/options/admission.go b/vendor/k8s.io/apiserver/pkg/server/options/admission.go new file mode 100644 index 000000000..765e2ad2b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/admission.go @@ -0,0 +1,234 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "strings" + + "github.com/spf13/pflag" + + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + admissionmetrics "k8s.io/apiserver/pkg/admission/metrics" + "k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle" + mutatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/mutating" + validatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/validating" + apiserverapi "k8s.io/apiserver/pkg/apis/apiserver" + apiserverapiv1 "k8s.io/apiserver/pkg/apis/apiserver/v1" + apiserverapiv1alpha1 "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1" + "k8s.io/apiserver/pkg/server" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/component-base/featuregate" +) + +var configScheme = runtime.NewScheme() + +func init() { + utilruntime.Must(apiserverapi.AddToScheme(configScheme)) + utilruntime.Must(apiserverapiv1alpha1.AddToScheme(configScheme)) + utilruntime.Must(apiserverapiv1.AddToScheme(configScheme)) +} + +// AdmissionOptions holds the admission options +type AdmissionOptions struct { + // RecommendedPluginOrder holds an ordered list of plugin names we recommend to use by default + RecommendedPluginOrder []string + // DefaultOffPlugins is a set of plugin names that is disabled by default + DefaultOffPlugins sets.String + + // EnablePlugins indicates plugins to be enabled passed through `--enable-admission-plugins`. + EnablePlugins []string + // DisablePlugins indicates plugins to be disabled passed through `--disable-admission-plugins`. + DisablePlugins []string + // ConfigFile is the file path with admission control configuration. + ConfigFile string + // Plugins contains all registered plugins. + Plugins *admission.Plugins + // Decorators is a list of admission decorator to wrap around the admission plugins + Decorators admission.Decorators +} + +// NewAdmissionOptions creates a new instance of AdmissionOptions +// Note: +// In addition it calls RegisterAllAdmissionPlugins to register +// all generic admission plugins. +// +// Provides the list of RecommendedPluginOrder that holds sane values +// that can be used by servers that don't care about admission chain. +// Servers that do care can overwrite/append that field after creation. +func NewAdmissionOptions() *AdmissionOptions { + options := &AdmissionOptions{ + Plugins: admission.NewPlugins(), + Decorators: admission.Decorators{admission.DecoratorFunc(admissionmetrics.WithControllerMetrics)}, + // This list is mix of mutating admission plugins and validating + // admission plugins. The apiserver always runs the validating ones + // after all the mutating ones, so their relative order in this list + // doesn't matter. + RecommendedPluginOrder: []string{lifecycle.PluginName, mutatingwebhook.PluginName, validatingwebhook.PluginName}, + DefaultOffPlugins: sets.NewString(), + } + server.RegisterAllAdmissionPlugins(options.Plugins) + return options +} + +// AddFlags adds flags related to admission for a specific APIServer to the specified FlagSet +func (a *AdmissionOptions) AddFlags(fs *pflag.FlagSet) { + if a == nil { + return + } + + fs.StringSliceVar(&a.EnablePlugins, "enable-admission-plugins", a.EnablePlugins, ""+ + "admission plugins that should be enabled in addition to default enabled ones ("+ + strings.Join(a.defaultEnabledPluginNames(), ", ")+"). "+ + "Comma-delimited list of admission plugins: "+strings.Join(a.Plugins.Registered(), ", ")+". "+ + "The order of plugins in this flag does not matter.") + fs.StringSliceVar(&a.DisablePlugins, "disable-admission-plugins", a.DisablePlugins, ""+ + "admission plugins that should be disabled although they are in the default enabled plugins list ("+ + strings.Join(a.defaultEnabledPluginNames(), ", ")+"). "+ + "Comma-delimited list of admission plugins: "+strings.Join(a.Plugins.Registered(), ", ")+". "+ + "The order of plugins in this flag does not matter.") + fs.StringVar(&a.ConfigFile, "admission-control-config-file", a.ConfigFile, + "File with admission control configuration.") +} + +// ApplyTo adds the admission chain to the server configuration. +// In case admission plugin names were not provided by a cluster-admin they will be prepared from the recommended/default values. +// In addition the method lazily initializes a generic plugin that is appended to the list of pluginInitializers +// note this method uses: +// genericconfig.Authorizer +func (a *AdmissionOptions) ApplyTo( + c *server.Config, + informers informers.SharedInformerFactory, + kubeAPIServerClientConfig *rest.Config, + features featuregate.FeatureGate, + pluginInitializers ...admission.PluginInitializer, +) error { + if a == nil { + return nil + } + + // Admission depends on CoreAPI to set SharedInformerFactory and ClientConfig. + if informers == nil { + return fmt.Errorf("admission depends on a Kubernetes core API shared informer, it cannot be nil") + } + + pluginNames := a.enabledPluginNames() + + pluginsConfigProvider, err := admission.ReadAdmissionConfiguration(pluginNames, a.ConfigFile, configScheme) + if err != nil { + return fmt.Errorf("failed to read plugin config: %v", err) + } + + clientset, err := kubernetes.NewForConfig(kubeAPIServerClientConfig) + if err != nil { + return err + } + genericInitializer := initializer.New(clientset, informers, c.Authorization.Authorizer, features) + initializersChain := admission.PluginInitializers{} + pluginInitializers = append(pluginInitializers, genericInitializer) + initializersChain = append(initializersChain, pluginInitializers...) + + admissionChain, err := a.Plugins.NewFromPlugins(pluginNames, pluginsConfigProvider, initializersChain, a.Decorators) + if err != nil { + return err + } + + c.AdmissionControl = admissionmetrics.WithStepMetrics(admissionChain) + return nil +} + +// Validate verifies flags passed to AdmissionOptions. +func (a *AdmissionOptions) Validate() []error { + if a == nil { + return nil + } + + errs := []error{} + + registeredPlugins := sets.NewString(a.Plugins.Registered()...) + for _, name := range a.EnablePlugins { + if !registeredPlugins.Has(name) { + errs = append(errs, fmt.Errorf("enable-admission-plugins plugin %q is unknown", name)) + } + } + + for _, name := range a.DisablePlugins { + if !registeredPlugins.Has(name) { + errs = append(errs, fmt.Errorf("disable-admission-plugins plugin %q is unknown", name)) + } + } + + enablePlugins := sets.NewString(a.EnablePlugins...) + disablePlugins := sets.NewString(a.DisablePlugins...) + if len(enablePlugins.Intersection(disablePlugins).List()) > 0 { + errs = append(errs, fmt.Errorf("%v in enable-admission-plugins and disable-admission-plugins "+ + "overlapped", enablePlugins.Intersection(disablePlugins).List())) + } + + // Verify RecommendedPluginOrder. + recommendPlugins := sets.NewString(a.RecommendedPluginOrder...) + intersections := registeredPlugins.Intersection(recommendPlugins) + if !intersections.Equal(recommendPlugins) { + // Developer error, this should never run in. + errs = append(errs, fmt.Errorf("plugins %v in RecommendedPluginOrder are not registered", + recommendPlugins.Difference(intersections).List())) + } + if !intersections.Equal(registeredPlugins) { + // Developer error, this should never run in. + errs = append(errs, fmt.Errorf("plugins %v registered are not in RecommendedPluginOrder", + registeredPlugins.Difference(intersections).List())) + } + + return errs +} + +// enabledPluginNames makes use of RecommendedPluginOrder, DefaultOffPlugins, +// EnablePlugins, DisablePlugins fields +// to prepare a list of ordered plugin names that are enabled. +func (a *AdmissionOptions) enabledPluginNames() []string { + allOffPlugins := append(a.DefaultOffPlugins.List(), a.DisablePlugins...) + disabledPlugins := sets.NewString(allOffPlugins...) + enabledPlugins := sets.NewString(a.EnablePlugins...) + disabledPlugins = disabledPlugins.Difference(enabledPlugins) + + orderedPlugins := []string{} + for _, plugin := range a.RecommendedPluginOrder { + if !disabledPlugins.Has(plugin) { + orderedPlugins = append(orderedPlugins, plugin) + } + } + + return orderedPlugins +} + +//Return names of plugins which are enabled by default +func (a *AdmissionOptions) defaultEnabledPluginNames() []string { + defaultOnPluginNames := []string{} + for _, pluginName := range a.RecommendedPluginOrder { + if !a.DefaultOffPlugins.Has(pluginName) { + defaultOnPluginNames = append(defaultOnPluginNames, pluginName) + } + } + + return defaultOnPluginNames +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/api_enablement.go b/vendor/k8s.io/apiserver/pkg/server/options/api_enablement.go new file mode 100644 index 000000000..794e89ded --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/api_enablement.go @@ -0,0 +1,115 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "strings" + + "github.com/spf13/pflag" + + "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/server/resourceconfig" + serverstore "k8s.io/apiserver/pkg/server/storage" + cliflag "k8s.io/component-base/cli/flag" +) + +// APIEnablementOptions contains the options for which resources to turn on and off. +// Given small aggregated API servers, this option isn't required for "normal" API servers +type APIEnablementOptions struct { + RuntimeConfig cliflag.ConfigurationMap +} + +func NewAPIEnablementOptions() *APIEnablementOptions { + return &APIEnablementOptions{ + RuntimeConfig: make(cliflag.ConfigurationMap), + } +} + +// AddFlags adds flags for a specific APIServer to the specified FlagSet +func (s *APIEnablementOptions) AddFlags(fs *pflag.FlagSet) { + fs.Var(&s.RuntimeConfig, "runtime-config", ""+ + "A set of key=value pairs that enable or disable built-in APIs. Supported options are:\n"+ + "v1=true|false for the core API group\n"+ + "/=true|false for a specific API group and version (e.g. apps/v1=true)\n"+ + "api/all=true|false controls all API versions\n"+ + "api/ga=true|false controls all API versions of the form v[0-9]+\n"+ + "api/beta=true|false controls all API versions of the form v[0-9]+beta[0-9]+\n"+ + "api/alpha=true|false controls all API versions of the form v[0-9]+alpha[0-9]+\n"+ + "api/legacy is deprecated, and will be removed in a future version") +} + +// Validate validates RuntimeConfig with a list of registries. +// Usually this list only has one element, the apiserver registry of the process. +// But in the advanced (and usually not recommended) case of delegated apiservers there can be more. +// Validate will filter out the known groups of each registry. +// If anything is left over after that, an error is returned. +func (s *APIEnablementOptions) Validate(registries ...GroupRegisty) []error { + if s == nil { + return nil + } + + errors := []error{} + if s.RuntimeConfig[resourceconfig.APIAll] == "false" && len(s.RuntimeConfig) == 1 { + // Do not allow only set api/all=false, in such case apiserver startup has no meaning. + return append(errors, fmt.Errorf("invalid key with only %v=false", resourceconfig.APIAll)) + } + + groups, err := resourceconfig.ParseGroups(s.RuntimeConfig) + if err != nil { + return append(errors, err) + } + + for _, registry := range registries { + // filter out known groups + groups = unknownGroups(groups, registry) + } + if len(groups) != 0 { + errors = append(errors, fmt.Errorf("unknown api groups %s", strings.Join(groups, ","))) + } + + return errors +} + +// ApplyTo override MergedResourceConfig with defaults and registry +func (s *APIEnablementOptions) ApplyTo(c *server.Config, defaultResourceConfig *serverstore.ResourceConfig, registry resourceconfig.GroupVersionRegistry) error { + + if s == nil { + return nil + } + + mergedResourceConfig, err := resourceconfig.MergeAPIResourceConfigs(defaultResourceConfig, s.RuntimeConfig, registry) + c.MergedResourceConfig = mergedResourceConfig + + return err +} + +func unknownGroups(groups []string, registry GroupRegisty) []string { + unknownGroups := []string{} + for _, group := range groups { + if !registry.IsGroupRegistered(group) { + unknownGroups = append(unknownGroups, group) + } + } + return unknownGroups +} + +// GroupRegisty provides a method to check whether given group is registered. +type GroupRegisty interface { + // IsRegistered returns true if given group is registered. + IsGroupRegistered(group string) bool +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/audit.go b/vendor/k8s.io/apiserver/pkg/server/options/audit.go new file mode 100644 index 000000000..c3a709dcf --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/audit.go @@ -0,0 +1,607 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/spf13/pflag" + "gopkg.in/natefinch/lumberjack.v2" + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/runtime/schema" + utilnet "k8s.io/apimachinery/pkg/util/net" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + auditv1 "k8s.io/apiserver/pkg/apis/audit/v1" + auditv1alpha1 "k8s.io/apiserver/pkg/apis/audit/v1alpha1" + auditv1beta1 "k8s.io/apiserver/pkg/apis/audit/v1beta1" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/audit/policy" + "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/server/egressselector" + "k8s.io/apiserver/pkg/util/webhook" + pluginbuffered "k8s.io/apiserver/plugin/pkg/audit/buffered" + pluginlog "k8s.io/apiserver/plugin/pkg/audit/log" + plugintruncate "k8s.io/apiserver/plugin/pkg/audit/truncate" + pluginwebhook "k8s.io/apiserver/plugin/pkg/audit/webhook" +) + +const ( + // Default configuration values for ModeBatch. + defaultBatchBufferSize = 10000 // Buffer up to 10000 events before starting discarding. + // These batch parameters are only used by the webhook backend. + defaultBatchMaxSize = 400 // Only send up to 400 events at a time. + defaultBatchMaxWait = 30 * time.Second // Send events at least twice a minute. + defaultBatchThrottleQPS = 10 // Limit the send rate by 10 QPS. + defaultBatchThrottleBurst = 15 // Allow up to 15 QPS burst. +) + +func appendBackend(existing, newBackend audit.Backend) audit.Backend { + if existing == nil { + return newBackend + } + if newBackend == nil { + return existing + } + return audit.Union(existing, newBackend) +} + +type AuditOptions struct { + // Policy configuration file for filtering audit events that are captured. + // If unspecified, a default is provided. + PolicyFile string + + // Plugin options + LogOptions AuditLogOptions + WebhookOptions AuditWebhookOptions +} + +const ( + // ModeBatch indicates that the audit backend should buffer audit events + // internally, sending batch updates either once a certain number of + // events have been received or a certain amount of time has passed. + ModeBatch = "batch" + // ModeBlocking causes the audit backend to block on every attempt to process + // a set of events. This causes requests to the API server to wait for the + // flush before sending a response. + ModeBlocking = "blocking" + // ModeBlockingStrict is the same as ModeBlocking, except when there is + // a failure during audit logging at RequestReceived stage, the whole + // request to apiserver will fail. + ModeBlockingStrict = "blocking-strict" +) + +// AllowedModes is the modes known for audit backends. +var AllowedModes = []string{ + ModeBatch, + ModeBlocking, + ModeBlockingStrict, +} + +type AuditBatchOptions struct { + // Should the backend asynchronous batch events to the webhook backend or + // should the backend block responses? + // + // Defaults to asynchronous batch events. + Mode string + // Configuration for batching backend. Only used in batch mode. + BatchConfig pluginbuffered.BatchConfig +} + +type AuditTruncateOptions struct { + // Whether truncating is enabled or not. + Enabled bool + + // Truncating configuration. + TruncateConfig plugintruncate.Config +} + +// AuditLogOptions determines the output of the structured audit log by default. +type AuditLogOptions struct { + Path string + MaxAge int + MaxBackups int + MaxSize int + Format string + Compress bool + + BatchOptions AuditBatchOptions + TruncateOptions AuditTruncateOptions + + // API group version used for serializing audit events. + GroupVersionString string +} + +// AuditWebhookOptions control the webhook configuration for audit events. +type AuditWebhookOptions struct { + ConfigFile string + InitialBackoff time.Duration + + BatchOptions AuditBatchOptions + TruncateOptions AuditTruncateOptions + + // API group version used for serializing audit events. + GroupVersionString string +} + +// AuditDynamicOptions control the configuration of dynamic backends for audit events +type AuditDynamicOptions struct { + // Enabled tells whether the dynamic audit capability is enabled. + Enabled bool + + // Configuration for batching backend. This is currently only used as an override + // for integration tests + BatchConfig *pluginbuffered.BatchConfig +} + +func NewAuditOptions() *AuditOptions { + return &AuditOptions{ + WebhookOptions: AuditWebhookOptions{ + InitialBackoff: pluginwebhook.DefaultInitialBackoffDelay, + BatchOptions: AuditBatchOptions{ + Mode: ModeBatch, + BatchConfig: defaultWebhookBatchConfig(), + }, + TruncateOptions: NewAuditTruncateOptions(), + GroupVersionString: "audit.k8s.io/v1", + }, + LogOptions: AuditLogOptions{ + Format: pluginlog.FormatJson, + BatchOptions: AuditBatchOptions{ + Mode: ModeBlocking, + BatchConfig: defaultLogBatchConfig(), + }, + TruncateOptions: NewAuditTruncateOptions(), + GroupVersionString: "audit.k8s.io/v1", + }, + } +} + +func NewAuditTruncateOptions() AuditTruncateOptions { + return AuditTruncateOptions{ + Enabled: false, + TruncateConfig: plugintruncate.Config{ + MaxBatchSize: 10 * 1024 * 1024, // 10MB + MaxEventSize: 100 * 1024, // 100KB + }, + } +} + +// Validate checks invalid config combination +func (o *AuditOptions) Validate() []error { + if o == nil { + return nil + } + + var allErrors []error + allErrors = append(allErrors, o.LogOptions.Validate()...) + allErrors = append(allErrors, o.WebhookOptions.Validate()...) + + return allErrors +} + +func validateBackendMode(pluginName string, mode string) error { + for _, m := range AllowedModes { + if m == mode { + return nil + } + } + return fmt.Errorf("invalid audit %s mode %s, allowed modes are %q", pluginName, mode, strings.Join(AllowedModes, ",")) +} + +func validateBackendBatchOptions(pluginName string, options AuditBatchOptions) error { + if err := validateBackendMode(pluginName, options.Mode); err != nil { + return err + } + if options.Mode != ModeBatch { + // Don't validate the unused options. + return nil + } + config := options.BatchConfig + if config.BufferSize <= 0 { + return fmt.Errorf("invalid audit batch %s buffer size %v, must be a positive number", pluginName, config.BufferSize) + } + if config.MaxBatchSize <= 0 { + return fmt.Errorf("invalid audit batch %s max batch size %v, must be a positive number", pluginName, config.MaxBatchSize) + } + if config.ThrottleEnable { + if config.ThrottleQPS <= 0 { + return fmt.Errorf("invalid audit batch %s throttle QPS %v, must be a positive number", pluginName, config.ThrottleQPS) + } + if config.ThrottleBurst <= 0 { + return fmt.Errorf("invalid audit batch %s throttle burst %v, must be a positive number", pluginName, config.ThrottleBurst) + } + } + return nil +} + +var knownGroupVersions = []schema.GroupVersion{ + auditv1alpha1.SchemeGroupVersion, + auditv1beta1.SchemeGroupVersion, + auditv1.SchemeGroupVersion, +} + +func validateGroupVersionString(groupVersion string) error { + gv, err := schema.ParseGroupVersion(groupVersion) + if err != nil { + return err + } + if !knownGroupVersion(gv) { + return fmt.Errorf("invalid group version, allowed versions are %q", knownGroupVersions) + } + return nil +} + +func knownGroupVersion(gv schema.GroupVersion) bool { + for _, knownGv := range knownGroupVersions { + if gv == knownGv { + return true + } + } + return false +} + +func (o *AuditOptions) AddFlags(fs *pflag.FlagSet) { + if o == nil { + return + } + + fs.StringVar(&o.PolicyFile, "audit-policy-file", o.PolicyFile, + "Path to the file that defines the audit policy configuration.") + + o.LogOptions.AddFlags(fs) + o.LogOptions.BatchOptions.AddFlags(pluginlog.PluginName, fs) + o.LogOptions.TruncateOptions.AddFlags(pluginlog.PluginName, fs) + o.WebhookOptions.AddFlags(fs) + o.WebhookOptions.BatchOptions.AddFlags(pluginwebhook.PluginName, fs) + o.WebhookOptions.TruncateOptions.AddFlags(pluginwebhook.PluginName, fs) +} + +func (o *AuditOptions) ApplyTo( + c *server.Config, +) error { + if o == nil { + return nil + } + if c == nil { + return fmt.Errorf("server config must be non-nil") + } + + // 1. Build policy checker + checker, err := o.newPolicyChecker() + if err != nil { + return err + } + + // 2. Build log backend + var logBackend audit.Backend + if w := o.LogOptions.getWriter(); w != nil { + if checker == nil { + klog.V(2).Info("No audit policy file provided, no events will be recorded for log backend") + } else { + logBackend = o.LogOptions.newBackend(w) + } + } + + // 3. Build webhook backend + var webhookBackend audit.Backend + if o.WebhookOptions.enabled() { + if checker == nil { + klog.V(2).Info("No audit policy file provided, no events will be recorded for webhook backend") + } else { + if c.EgressSelector != nil { + egressDialer, err := c.EgressSelector.Lookup(egressselector.ControlPlane.AsNetworkContext()) + if err != nil { + return err + } + webhookBackend, err = o.WebhookOptions.newUntruncatedBackend(egressDialer) + } else { + webhookBackend, err = o.WebhookOptions.newUntruncatedBackend(nil) + } + if err != nil { + return err + } + } + } + + groupVersion, err := schema.ParseGroupVersion(o.WebhookOptions.GroupVersionString) + if err != nil { + return err + } + + // 4. Apply dynamic options. + var dynamicBackend audit.Backend + if webhookBackend != nil { + // if only webhook is enabled wrap it in the truncate options + dynamicBackend = o.WebhookOptions.TruncateOptions.wrapBackend(webhookBackend, groupVersion) + } + + // 5. Set the policy checker + c.AuditPolicyChecker = checker + + // 6. Join the log backend with the webhooks + c.AuditBackend = appendBackend(logBackend, dynamicBackend) + + if c.AuditBackend != nil { + klog.V(2).Infof("Using audit backend: %s", c.AuditBackend) + } + return nil +} + +func (o *AuditOptions) newPolicyChecker() (policy.Checker, error) { + if o.PolicyFile == "" { + return nil, nil + } + + p, err := policy.LoadPolicyFromFile(o.PolicyFile) + if err != nil { + return nil, fmt.Errorf("loading audit policy file: %v", err) + } + return policy.NewChecker(p), nil +} + +func (o *AuditBatchOptions) AddFlags(pluginName string, fs *pflag.FlagSet) { + fs.StringVar(&o.Mode, fmt.Sprintf("audit-%s-mode", pluginName), o.Mode, + "Strategy for sending audit events. Blocking indicates sending events should block"+ + " server responses. Batch causes the backend to buffer and write events"+ + " asynchronously. Known modes are "+strings.Join(AllowedModes, ",")+".") + fs.IntVar(&o.BatchConfig.BufferSize, fmt.Sprintf("audit-%s-batch-buffer-size", pluginName), + o.BatchConfig.BufferSize, "The size of the buffer to store events before "+ + "batching and writing. Only used in batch mode.") + fs.IntVar(&o.BatchConfig.MaxBatchSize, fmt.Sprintf("audit-%s-batch-max-size", pluginName), + o.BatchConfig.MaxBatchSize, "The maximum size of a batch. Only used in batch mode.") + fs.DurationVar(&o.BatchConfig.MaxBatchWait, fmt.Sprintf("audit-%s-batch-max-wait", pluginName), + o.BatchConfig.MaxBatchWait, "The amount of time to wait before force writing the "+ + "batch that hadn't reached the max size. Only used in batch mode.") + fs.BoolVar(&o.BatchConfig.ThrottleEnable, fmt.Sprintf("audit-%s-batch-throttle-enable", pluginName), + o.BatchConfig.ThrottleEnable, "Whether batching throttling is enabled. Only used in batch mode.") + fs.Float32Var(&o.BatchConfig.ThrottleQPS, fmt.Sprintf("audit-%s-batch-throttle-qps", pluginName), + o.BatchConfig.ThrottleQPS, "Maximum average number of batches per second. "+ + "Only used in batch mode.") + fs.IntVar(&o.BatchConfig.ThrottleBurst, fmt.Sprintf("audit-%s-batch-throttle-burst", pluginName), + o.BatchConfig.ThrottleBurst, "Maximum number of requests sent at the same "+ + "moment if ThrottleQPS was not utilized before. Only used in batch mode.") +} + +type ignoreErrorsBackend struct { + audit.Backend +} + +func (i *ignoreErrorsBackend) ProcessEvents(ev ...*auditinternal.Event) bool { + i.Backend.ProcessEvents(ev...) + return true +} + +func (i *ignoreErrorsBackend) String() string { + return fmt.Sprintf("ignoreErrors<%s>", i.Backend) +} + +func (o *AuditBatchOptions) wrapBackend(delegate audit.Backend) audit.Backend { + if o.Mode == ModeBlockingStrict { + return delegate + } + if o.Mode == ModeBlocking { + return &ignoreErrorsBackend{Backend: delegate} + } + return pluginbuffered.NewBackend(delegate, o.BatchConfig) +} + +func (o *AuditTruncateOptions) Validate(pluginName string) error { + config := o.TruncateConfig + if config.MaxEventSize <= 0 { + return fmt.Errorf("invalid audit truncate %s max event size %v, must be a positive number", pluginName, config.MaxEventSize) + } + if config.MaxBatchSize < config.MaxEventSize { + return fmt.Errorf("invalid audit truncate %s max batch size %v, must be greater than "+ + "max event size (%v)", pluginName, config.MaxBatchSize, config.MaxEventSize) + } + return nil +} + +func (o *AuditTruncateOptions) AddFlags(pluginName string, fs *pflag.FlagSet) { + fs.BoolVar(&o.Enabled, fmt.Sprintf("audit-%s-truncate-enabled", pluginName), + o.Enabled, "Whether event and batch truncating is enabled.") + fs.Int64Var(&o.TruncateConfig.MaxBatchSize, fmt.Sprintf("audit-%s-truncate-max-batch-size", pluginName), + o.TruncateConfig.MaxBatchSize, "Maximum size of the batch sent to the underlying backend. "+ + "Actual serialized size can be several hundreds of bytes greater. If a batch exceeds this limit, "+ + "it is split into several batches of smaller size.") + fs.Int64Var(&o.TruncateConfig.MaxEventSize, fmt.Sprintf("audit-%s-truncate-max-event-size", pluginName), + o.TruncateConfig.MaxEventSize, "Maximum size of the audit event sent to the underlying backend. "+ + "If the size of an event is greater than this number, first request and response are removed, and "+ + "if this doesn't reduce the size enough, event is discarded.") +} + +func (o *AuditTruncateOptions) wrapBackend(delegate audit.Backend, gv schema.GroupVersion) audit.Backend { + if !o.Enabled { + return delegate + } + return plugintruncate.NewBackend(delegate, o.TruncateConfig, gv) +} + +func (o *AuditLogOptions) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&o.Path, "audit-log-path", o.Path, + "If set, all requests coming to the apiserver will be logged to this file. '-' means standard out.") + fs.IntVar(&o.MaxAge, "audit-log-maxage", o.MaxAge, + "The maximum number of days to retain old audit log files based on the timestamp encoded in their filename.") + fs.IntVar(&o.MaxBackups, "audit-log-maxbackup", o.MaxBackups, + "The maximum number of old audit log files to retain.") + fs.IntVar(&o.MaxSize, "audit-log-maxsize", o.MaxSize, + "The maximum size in megabytes of the audit log file before it gets rotated.") + fs.StringVar(&o.Format, "audit-log-format", o.Format, + "Format of saved audits. \"legacy\" indicates 1-line text format for each event."+ + " \"json\" indicates structured json format. Known formats are "+ + strings.Join(pluginlog.AllowedFormats, ",")+".") + fs.StringVar(&o.GroupVersionString, "audit-log-version", o.GroupVersionString, + "API group and version used for serializing audit events written to log.") + fs.BoolVar(&o.Compress, "audit-log-compress", o.Compress, "If set, the rotated log files will be compressed using gzip.") +} + +func (o *AuditLogOptions) Validate() []error { + // Check whether the log backend is enabled based on the options. + if !o.enabled() { + return nil + } + + var allErrors []error + + if err := validateBackendBatchOptions(pluginlog.PluginName, o.BatchOptions); err != nil { + allErrors = append(allErrors, err) + } + if err := o.TruncateOptions.Validate(pluginlog.PluginName); err != nil { + allErrors = append(allErrors, err) + } + + if err := validateGroupVersionString(o.GroupVersionString); err != nil { + allErrors = append(allErrors, err) + } + + // Check log format + validFormat := false + for _, f := range pluginlog.AllowedFormats { + if f == o.Format { + validFormat = true + break + } + } + if !validFormat { + allErrors = append(allErrors, fmt.Errorf("invalid audit log format %s, allowed formats are %q", o.Format, strings.Join(pluginlog.AllowedFormats, ","))) + } + + // Check validities of MaxAge, MaxBackups and MaxSize of log options, if file log backend is enabled. + if o.MaxAge < 0 { + allErrors = append(allErrors, fmt.Errorf("--audit-log-maxage %v can't be a negative number", o.MaxAge)) + } + if o.MaxBackups < 0 { + allErrors = append(allErrors, fmt.Errorf("--audit-log-maxbackup %v can't be a negative number", o.MaxBackups)) + } + if o.MaxSize < 0 { + allErrors = append(allErrors, fmt.Errorf("--audit-log-maxsize %v can't be a negative number", o.MaxSize)) + } + + return allErrors +} + +// Check whether the log backend is enabled based on the options. +func (o *AuditLogOptions) enabled() bool { + return o != nil && o.Path != "" +} + +func (o *AuditLogOptions) getWriter() io.Writer { + if !o.enabled() { + return nil + } + + var w io.Writer = os.Stdout + if o.Path != "-" { + w = &lumberjack.Logger{ + Filename: o.Path, + MaxAge: o.MaxAge, + MaxBackups: o.MaxBackups, + MaxSize: o.MaxSize, + Compress: o.Compress, + } + } + return w +} + +func (o *AuditLogOptions) newBackend(w io.Writer) audit.Backend { + groupVersion, _ := schema.ParseGroupVersion(o.GroupVersionString) + log := pluginlog.NewBackend(w, o.Format, groupVersion) + log = o.BatchOptions.wrapBackend(log) + log = o.TruncateOptions.wrapBackend(log, groupVersion) + return log +} + +func (o *AuditWebhookOptions) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&o.ConfigFile, "audit-webhook-config-file", o.ConfigFile, + "Path to a kubeconfig formatted file that defines the audit webhook configuration.") + fs.DurationVar(&o.InitialBackoff, "audit-webhook-initial-backoff", + o.InitialBackoff, "The amount of time to wait before retrying the first failed request.") + fs.DurationVar(&o.InitialBackoff, "audit-webhook-batch-initial-backoff", + o.InitialBackoff, "The amount of time to wait before retrying the first failed request.") + fs.MarkDeprecated("audit-webhook-batch-initial-backoff", + "Deprecated, use --audit-webhook-initial-backoff instead.") + fs.StringVar(&o.GroupVersionString, "audit-webhook-version", o.GroupVersionString, + "API group and version used for serializing audit events written to webhook.") +} + +func (o *AuditWebhookOptions) Validate() []error { + if !o.enabled() { + return nil + } + + var allErrors []error + if err := validateBackendBatchOptions(pluginwebhook.PluginName, o.BatchOptions); err != nil { + allErrors = append(allErrors, err) + } + if err := o.TruncateOptions.Validate(pluginwebhook.PluginName); err != nil { + allErrors = append(allErrors, err) + } + + if err := validateGroupVersionString(o.GroupVersionString); err != nil { + allErrors = append(allErrors, err) + } + return allErrors +} + +func (o *AuditWebhookOptions) enabled() bool { + return o != nil && o.ConfigFile != "" +} + +// newUntruncatedBackend returns a webhook backend without the truncate options applied +// this is done so that the same trucate backend can wrap both the webhook and dynamic backends +func (o *AuditWebhookOptions) newUntruncatedBackend(customDial utilnet.DialFunc) (audit.Backend, error) { + groupVersion, _ := schema.ParseGroupVersion(o.GroupVersionString) + webhook, err := pluginwebhook.NewBackend(o.ConfigFile, groupVersion, webhook.DefaultRetryBackoffWithInitialDelay(o.InitialBackoff), customDial) + if err != nil { + return nil, fmt.Errorf("initializing audit webhook: %v", err) + } + webhook = o.BatchOptions.wrapBackend(webhook) + return webhook, nil +} + +// defaultWebhookBatchConfig returns the default BatchConfig used by the Webhook backend. +func defaultWebhookBatchConfig() pluginbuffered.BatchConfig { + return pluginbuffered.BatchConfig{ + BufferSize: defaultBatchBufferSize, + MaxBatchSize: defaultBatchMaxSize, + MaxBatchWait: defaultBatchMaxWait, + + ThrottleEnable: true, + ThrottleQPS: defaultBatchThrottleQPS, + ThrottleBurst: defaultBatchThrottleBurst, + + AsyncDelegate: true, + } +} + +// defaultLogBatchConfig returns the default BatchConfig used by the Log backend. +func defaultLogBatchConfig() pluginbuffered.BatchConfig { + return pluginbuffered.BatchConfig{ + BufferSize: defaultBatchBufferSize, + // Batching is not useful for the log-file backend. + // MaxBatchWait ignored. + MaxBatchSize: 1, + ThrottleEnable: false, + // Asynchronous log threads just create lock contention. + AsyncDelegate: false, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/authentication.go b/vendor/k8s.io/apiserver/pkg/server/options/authentication.go new file mode 100644 index 000000000..e266fb73e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/authentication.go @@ -0,0 +1,421 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "strings" + "time" + + "k8s.io/apiserver/pkg/server/dynamiccertificates" + + "github.com/spf13/pflag" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/authentication/authenticatorfactory" + "k8s.io/apiserver/pkg/authentication/request/headerrequest" + "k8s.io/apiserver/pkg/server" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/klog/v2" + openapicommon "k8s.io/kube-openapi/pkg/common" +) + +// DefaultAuthWebhookRetryBackoff is the default backoff parameters for +// both authentication and authorization webhook used by the apiserver. +func DefaultAuthWebhookRetryBackoff() *wait.Backoff { + return &wait.Backoff{ + Duration: 500 * time.Millisecond, + Factor: 1.5, + Jitter: 0.2, + Steps: 5, + } +} + +type RequestHeaderAuthenticationOptions struct { + // ClientCAFile is the root certificate bundle to verify client certificates on incoming requests + // before trusting usernames in headers. + ClientCAFile string + + UsernameHeaders []string + GroupHeaders []string + ExtraHeaderPrefixes []string + AllowedNames []string +} + +func (s *RequestHeaderAuthenticationOptions) Validate() []error { + allErrors := []error{} + + if err := checkForWhiteSpaceOnly("requestheader-username-headers", s.UsernameHeaders...); err != nil { + allErrors = append(allErrors, err) + } + if err := checkForWhiteSpaceOnly("requestheader-group-headers", s.GroupHeaders...); err != nil { + allErrors = append(allErrors, err) + } + if err := checkForWhiteSpaceOnly("requestheader-extra-headers-prefix", s.ExtraHeaderPrefixes...); err != nil { + allErrors = append(allErrors, err) + } + if err := checkForWhiteSpaceOnly("requestheader-allowed-names", s.AllowedNames...); err != nil { + allErrors = append(allErrors, err) + } + + return allErrors +} + +func checkForWhiteSpaceOnly(flag string, headerNames ...string) error { + for _, headerName := range headerNames { + if len(strings.TrimSpace(headerName)) == 0 { + return fmt.Errorf("empty value in %q", flag) + } + } + + return nil +} + +func (s *RequestHeaderAuthenticationOptions) AddFlags(fs *pflag.FlagSet) { + if s == nil { + return + } + + fs.StringSliceVar(&s.UsernameHeaders, "requestheader-username-headers", s.UsernameHeaders, ""+ + "List of request headers to inspect for usernames. X-Remote-User is common.") + + fs.StringSliceVar(&s.GroupHeaders, "requestheader-group-headers", s.GroupHeaders, ""+ + "List of request headers to inspect for groups. X-Remote-Group is suggested.") + + fs.StringSliceVar(&s.ExtraHeaderPrefixes, "requestheader-extra-headers-prefix", s.ExtraHeaderPrefixes, ""+ + "List of request header prefixes to inspect. X-Remote-Extra- is suggested.") + + fs.StringVar(&s.ClientCAFile, "requestheader-client-ca-file", s.ClientCAFile, ""+ + "Root certificate bundle to use to verify client certificates on incoming requests "+ + "before trusting usernames in headers specified by --requestheader-username-headers. "+ + "WARNING: generally do not depend on authorization being already done for incoming requests.") + + fs.StringSliceVar(&s.AllowedNames, "requestheader-allowed-names", s.AllowedNames, ""+ + "List of client certificate common names to allow to provide usernames in headers "+ + "specified by --requestheader-username-headers. If empty, any client certificate validated "+ + "by the authorities in --requestheader-client-ca-file is allowed.") +} + +// ToAuthenticationRequestHeaderConfig returns a RequestHeaderConfig config object for these options +// if necessary, nil otherwise. +func (s *RequestHeaderAuthenticationOptions) ToAuthenticationRequestHeaderConfig() (*authenticatorfactory.RequestHeaderConfig, error) { + if len(s.ClientCAFile) == 0 { + return nil, nil + } + + caBundleProvider, err := dynamiccertificates.NewDynamicCAContentFromFile("request-header", s.ClientCAFile) + if err != nil { + return nil, err + } + + return &authenticatorfactory.RequestHeaderConfig{ + UsernameHeaders: headerrequest.StaticStringSlice(s.UsernameHeaders), + GroupHeaders: headerrequest.StaticStringSlice(s.GroupHeaders), + ExtraHeaderPrefixes: headerrequest.StaticStringSlice(s.ExtraHeaderPrefixes), + CAContentProvider: caBundleProvider, + AllowedClientNames: headerrequest.StaticStringSlice(s.AllowedNames), + }, nil +} + +// ClientCertAuthenticationOptions provides different options for client cert auth. You should use `GetClientVerifyOptionFn` to +// get the verify options for your authenticator. +type ClientCertAuthenticationOptions struct { + // ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates + ClientCA string + + // CAContentProvider are the options for verifying incoming connections using mTLS and directly assigning to users. + // Generally this is the CA bundle file used to authenticate client certificates + // If non-nil, this takes priority over the ClientCA file. + CAContentProvider dynamiccertificates.CAContentProvider +} + +// GetClientVerifyOptionFn provides verify options for your authenticator while respecting the preferred order of verifiers. +func (s *ClientCertAuthenticationOptions) GetClientCAContentProvider() (dynamiccertificates.CAContentProvider, error) { + if s.CAContentProvider != nil { + return s.CAContentProvider, nil + } + + if len(s.ClientCA) == 0 { + return nil, nil + } + + return dynamiccertificates.NewDynamicCAContentFromFile("client-ca-bundle", s.ClientCA) +} + +func (s *ClientCertAuthenticationOptions) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&s.ClientCA, "client-ca-file", s.ClientCA, ""+ + "If set, any request presenting a client certificate signed by one of "+ + "the authorities in the client-ca-file is authenticated with an identity "+ + "corresponding to the CommonName of the client certificate.") +} + +// DelegatingAuthenticationOptions provides an easy way for composing API servers to delegate their authentication to +// the root kube API server. The API federator will act as +// a front proxy and direction connections will be able to delegate to the core kube API server +type DelegatingAuthenticationOptions struct { + // RemoteKubeConfigFile is the file to use to connect to a "normal" kube API server which hosts the + // TokenAccessReview.authentication.k8s.io endpoint for checking tokens. + RemoteKubeConfigFile string + // RemoteKubeConfigFileOptional is specifying whether not specifying the kubeconfig or + // a missing in-cluster config will be fatal. + RemoteKubeConfigFileOptional bool + + // CacheTTL is the length of time that a token authentication answer will be cached. + CacheTTL time.Duration + + ClientCert ClientCertAuthenticationOptions + RequestHeader RequestHeaderAuthenticationOptions + + // SkipInClusterLookup indicates missing authentication configuration should not be retrieved from the cluster configmap + SkipInClusterLookup bool + + // TolerateInClusterLookupFailure indicates failures to look up authentication configuration from the cluster configmap should not be fatal. + // Setting this can result in an authenticator that will reject all requests. + TolerateInClusterLookupFailure bool + + // WebhookRetryBackoff specifies the backoff parameters for the authentication webhook retry logic. + // This allows us to configure the sleep time at each iteration and the maximum number of retries allowed + // before we fail the webhook call in order to limit the fan out that ensues when the system is degraded. + WebhookRetryBackoff *wait.Backoff + + // ClientTimeout specifies a time limit for requests made by the authorization webhook client. + // The default value is set to 10 seconds. + ClientTimeout time.Duration +} + +func NewDelegatingAuthenticationOptions() *DelegatingAuthenticationOptions { + return &DelegatingAuthenticationOptions{ + // very low for responsiveness, but high enough to handle storms + CacheTTL: 10 * time.Second, + ClientCert: ClientCertAuthenticationOptions{}, + RequestHeader: RequestHeaderAuthenticationOptions{ + UsernameHeaders: []string{"x-remote-user"}, + GroupHeaders: []string{"x-remote-group"}, + ExtraHeaderPrefixes: []string{"x-remote-extra-"}, + }, + WebhookRetryBackoff: DefaultAuthWebhookRetryBackoff(), + ClientTimeout: 10 * time.Second, + } +} + +// WithCustomRetryBackoff sets the custom backoff parameters for the authentication webhook retry logic. +func (s *DelegatingAuthenticationOptions) WithCustomRetryBackoff(backoff wait.Backoff) { + s.WebhookRetryBackoff = &backoff +} + +// WithClientTimeout sets the given timeout for the authentication webhook client. +func (s *DelegatingAuthenticationOptions) WithClientTimeout(timeout time.Duration) { + s.ClientTimeout = timeout +} + +func (s *DelegatingAuthenticationOptions) Validate() []error { + allErrors := []error{} + allErrors = append(allErrors, s.RequestHeader.Validate()...) + + if s.WebhookRetryBackoff != nil && s.WebhookRetryBackoff.Steps <= 0 { + allErrors = append(allErrors, fmt.Errorf("number of webhook retry attempts must be greater than 1, but is: %d", s.WebhookRetryBackoff.Steps)) + } + + return allErrors +} + +func (s *DelegatingAuthenticationOptions) AddFlags(fs *pflag.FlagSet) { + if s == nil { + return + } + + var optionalKubeConfigSentence string + if s.RemoteKubeConfigFileOptional { + optionalKubeConfigSentence = " This is optional. If empty, all token requests are considered to be anonymous and no client CA is looked up in the cluster." + } + fs.StringVar(&s.RemoteKubeConfigFile, "authentication-kubeconfig", s.RemoteKubeConfigFile, ""+ + "kubeconfig file pointing at the 'core' kubernetes server with enough rights to create "+ + "tokenreviews.authentication.k8s.io."+optionalKubeConfigSentence) + + fs.DurationVar(&s.CacheTTL, "authentication-token-webhook-cache-ttl", s.CacheTTL, + "The duration to cache responses from the webhook token authenticator.") + + s.ClientCert.AddFlags(fs) + s.RequestHeader.AddFlags(fs) + + fs.BoolVar(&s.SkipInClusterLookup, "authentication-skip-lookup", s.SkipInClusterLookup, ""+ + "If false, the authentication-kubeconfig will be used to lookup missing authentication "+ + "configuration from the cluster.") + fs.BoolVar(&s.TolerateInClusterLookupFailure, "authentication-tolerate-lookup-failure", s.TolerateInClusterLookupFailure, ""+ + "If true, failures to look up missing authentication configuration from the cluster are not considered fatal. "+ + "Note that this can result in authentication that treats all requests as anonymous.") +} + +func (s *DelegatingAuthenticationOptions) ApplyTo(authenticationInfo *server.AuthenticationInfo, servingInfo *server.SecureServingInfo, openAPIConfig *openapicommon.Config) error { + if s == nil { + authenticationInfo.Authenticator = nil + return nil + } + + cfg := authenticatorfactory.DelegatingAuthenticatorConfig{ + Anonymous: true, + CacheTTL: s.CacheTTL, + WebhookRetryBackoff: s.WebhookRetryBackoff, + } + + client, err := s.getClient() + if err != nil { + return fmt.Errorf("failed to get delegated authentication kubeconfig: %v", err) + } + + // configure token review + if client != nil { + cfg.TokenAccessReviewClient = client.AuthenticationV1().TokenReviews() + } + + // get the clientCA information + clientCAFileSpecified := len(s.ClientCert.ClientCA) > 0 + var clientCAProvider dynamiccertificates.CAContentProvider + if clientCAFileSpecified { + clientCAProvider, err = s.ClientCert.GetClientCAContentProvider() + if err != nil { + return fmt.Errorf("unable to load client CA file %q: %v", s.ClientCert.ClientCA, err) + } + cfg.ClientCertificateCAContentProvider = clientCAProvider + if err = authenticationInfo.ApplyClientCert(cfg.ClientCertificateCAContentProvider, servingInfo); err != nil { + return fmt.Errorf("unable to assign client CA file: %v", err) + } + + } else if !s.SkipInClusterLookup { + if client == nil { + klog.Warningf("No authentication-kubeconfig provided in order to lookup client-ca-file in configmap/%s in %s, so client certificate authentication won't work.", authenticationConfigMapName, authenticationConfigMapNamespace) + } else { + clientCAProvider, err = dynamiccertificates.NewDynamicCAFromConfigMapController("client-ca", authenticationConfigMapNamespace, authenticationConfigMapName, "client-ca-file", client) + if err != nil { + return fmt.Errorf("unable to load configmap based client CA file: %v", err) + } + cfg.ClientCertificateCAContentProvider = clientCAProvider + if err = authenticationInfo.ApplyClientCert(cfg.ClientCertificateCAContentProvider, servingInfo); err != nil { + return fmt.Errorf("unable to assign configmap based client CA file: %v", err) + } + + } + } + + requestHeaderCAFileSpecified := len(s.RequestHeader.ClientCAFile) > 0 + var requestHeaderConfig *authenticatorfactory.RequestHeaderConfig + if requestHeaderCAFileSpecified { + requestHeaderConfig, err = s.RequestHeader.ToAuthenticationRequestHeaderConfig() + if err != nil { + return fmt.Errorf("unable to create request header authentication config: %v", err) + } + + } else if !s.SkipInClusterLookup { + if client == nil { + klog.Warningf("No authentication-kubeconfig provided in order to lookup requestheader-client-ca-file in configmap/%s in %s, so request-header client certificate authentication won't work.", authenticationConfigMapName, authenticationConfigMapNamespace) + } else { + requestHeaderConfig, err = s.createRequestHeaderConfig(client) + if err != nil { + if s.TolerateInClusterLookupFailure { + klog.Warningf("Error looking up in-cluster authentication configuration: %v", err) + klog.Warningf("Continuing without authentication configuration. This may treat all requests as anonymous.") + klog.Warningf("To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false") + } else { + return fmt.Errorf("unable to load configmap based request-header-client-ca-file: %v", err) + } + } + } + } + if requestHeaderConfig != nil { + cfg.RequestHeaderConfig = requestHeaderConfig + if err = authenticationInfo.ApplyClientCert(cfg.RequestHeaderConfig.CAContentProvider, servingInfo); err != nil { + return fmt.Errorf("unable to load request-header-client-ca-file: %v", err) + } + } + + // create authenticator + authenticator, securityDefinitions, err := cfg.New() + if err != nil { + return err + } + authenticationInfo.Authenticator = authenticator + if openAPIConfig != nil { + openAPIConfig.SecurityDefinitions = securityDefinitions + } + + return nil +} + +const ( + authenticationConfigMapNamespace = metav1.NamespaceSystem + // authenticationConfigMapName is the name of ConfigMap in the kube-system namespace holding the root certificate + // bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified + // by --requestheader-username-headers. This is created in the cluster by the kube-apiserver. + // "WARNING: generally do not depend on authorization being already done for incoming requests.") + authenticationConfigMapName = "extension-apiserver-authentication" +) + +func (s *DelegatingAuthenticationOptions) createRequestHeaderConfig(client kubernetes.Interface) (*authenticatorfactory.RequestHeaderConfig, error) { + dynamicRequestHeaderProvider, err := newDynamicRequestHeaderController(client) + if err != nil { + return nil, fmt.Errorf("unable to create request header authentication config: %v", err) + } + + // look up authentication configuration in the cluster and in case of an err defer to authentication-tolerate-lookup-failure flag + if err := dynamicRequestHeaderProvider.RunOnce(); err != nil { + return nil, err + } + + return &authenticatorfactory.RequestHeaderConfig{ + CAContentProvider: dynamicRequestHeaderProvider, + UsernameHeaders: headerrequest.StringSliceProvider(headerrequest.StringSliceProviderFunc(dynamicRequestHeaderProvider.UsernameHeaders)), + GroupHeaders: headerrequest.StringSliceProvider(headerrequest.StringSliceProviderFunc(dynamicRequestHeaderProvider.GroupHeaders)), + ExtraHeaderPrefixes: headerrequest.StringSliceProvider(headerrequest.StringSliceProviderFunc(dynamicRequestHeaderProvider.ExtraHeaderPrefixes)), + AllowedClientNames: headerrequest.StringSliceProvider(headerrequest.StringSliceProviderFunc(dynamicRequestHeaderProvider.AllowedClientNames)), + }, nil +} + +// getClient returns a Kubernetes clientset. If s.RemoteKubeConfigFileOptional is true, nil will be returned +// if no kubeconfig is specified by the user and the in-cluster config is not found. +func (s *DelegatingAuthenticationOptions) getClient() (kubernetes.Interface, error) { + var clientConfig *rest.Config + var err error + if len(s.RemoteKubeConfigFile) > 0 { + loadingRules := &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.RemoteKubeConfigFile} + loader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{}) + + clientConfig, err = loader.ClientConfig() + } else { + // without the remote kubeconfig file, try to use the in-cluster config. Most addon API servers will + // use this path. If it is optional, ignore errors. + clientConfig, err = rest.InClusterConfig() + if err != nil && s.RemoteKubeConfigFileOptional { + if err != rest.ErrNotInCluster { + klog.Warningf("failed to read in-cluster kubeconfig for delegated authentication: %v", err) + } + return nil, nil + } + } + if err != nil { + return nil, fmt.Errorf("failed to get delegated authentication kubeconfig: %v", err) + } + + // set high qps/burst limits since this will effectively limit API server responsiveness + clientConfig.QPS = 200 + clientConfig.Burst = 400 + clientConfig.Timeout = s.ClientTimeout + + return kubernetes.NewForConfig(clientConfig) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/authentication_dynamic_request_header.go b/vendor/k8s.io/apiserver/pkg/server/options/authentication_dynamic_request_header.go new file mode 100644 index 000000000..5c558b06d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/authentication_dynamic_request_header.go @@ -0,0 +1,79 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apiserver/pkg/authentication/request/headerrequest" + "k8s.io/apiserver/pkg/server/dynamiccertificates" + "k8s.io/client-go/kubernetes" +) + +var _ dynamiccertificates.ControllerRunner = &DynamicRequestHeaderController{} +var _ dynamiccertificates.Notifier = &DynamicRequestHeaderController{} +var _ dynamiccertificates.CAContentProvider = &DynamicRequestHeaderController{} + +var _ headerrequest.RequestHeaderAuthRequestProvider = &DynamicRequestHeaderController{} + +// DynamicRequestHeaderController combines DynamicCAFromConfigMapController and RequestHeaderAuthRequestController +// into one controller for dynamically filling RequestHeaderConfig struct +type DynamicRequestHeaderController struct { + *dynamiccertificates.ConfigMapCAController + *headerrequest.RequestHeaderAuthRequestController +} + +// newDynamicRequestHeaderController creates a new controller that implements DynamicRequestHeaderController +func newDynamicRequestHeaderController(client kubernetes.Interface) (*DynamicRequestHeaderController, error) { + requestHeaderCAController, err := dynamiccertificates.NewDynamicCAFromConfigMapController( + "client-ca", + authenticationConfigMapNamespace, + authenticationConfigMapName, + "requestheader-client-ca-file", + client) + if err != nil { + return nil, fmt.Errorf("unable to create DynamicCAFromConfigMap controller: %v", err) + } + + requestHeaderAuthRequestController := headerrequest.NewRequestHeaderAuthRequestController( + authenticationConfigMapName, + authenticationConfigMapNamespace, + client, + "requestheader-username-headers", + "requestheader-group-headers", + "requestheader-extra-headers-prefix", + "requestheader-allowed-names", + ) + return &DynamicRequestHeaderController{ + ConfigMapCAController: requestHeaderCAController, + RequestHeaderAuthRequestController: requestHeaderAuthRequestController, + }, nil +} + +func (c *DynamicRequestHeaderController) RunOnce() error { + errs := []error{} + errs = append(errs, c.ConfigMapCAController.RunOnce()) + errs = append(errs, c.RequestHeaderAuthRequestController.RunOnce()) + return errors.NewAggregate(errs) +} + +func (c *DynamicRequestHeaderController) Run(workers int, stopCh <-chan struct{}) { + go c.ConfigMapCAController.Run(workers, stopCh) + go c.RequestHeaderAuthRequestController.Run(workers, stopCh) + <-stopCh +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/authorization.go b/vendor/k8s.io/apiserver/pkg/server/options/authorization.go new file mode 100644 index 000000000..04523e8f2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/authorization.go @@ -0,0 +1,220 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "time" + + "github.com/spf13/pflag" + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/authorization/authorizerfactory" + "k8s.io/apiserver/pkg/authorization/path" + "k8s.io/apiserver/pkg/authorization/union" + "k8s.io/apiserver/pkg/server" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +// DelegatingAuthorizationOptions provides an easy way for composing API servers to delegate their authorization to +// the root kube API server. +// WARNING: never assume that every authenticated incoming request already does authorization. +// The aggregator in the kube API server does this today, but this behaviour is not +// guaranteed in the future. +type DelegatingAuthorizationOptions struct { + // RemoteKubeConfigFile is the file to use to connect to a "normal" kube API server which hosts the + // SubjectAccessReview.authorization.k8s.io endpoint for checking tokens. + RemoteKubeConfigFile string + // RemoteKubeConfigFileOptional is specifying whether not specifying the kubeconfig or + // a missing in-cluster config will be fatal. + RemoteKubeConfigFileOptional bool + + // AllowCacheTTL is the length of time that a successful authorization response will be cached + AllowCacheTTL time.Duration + + // DenyCacheTTL is the length of time that an unsuccessful authorization response will be cached. + // You generally want more responsive, "deny, try again" flows. + DenyCacheTTL time.Duration + + // AlwaysAllowPaths are HTTP paths which are excluded from authorization. They can be plain + // paths or end in * in which case prefix-match is applied. A leading / is optional. + AlwaysAllowPaths []string + + // AlwaysAllowGroups are groups which are allowed to take any actions. In kube, this is system:masters. + AlwaysAllowGroups []string + + // ClientTimeout specifies a time limit for requests made by SubjectAccessReviews client. + // The default value is set to 10 seconds. + ClientTimeout time.Duration + + // WebhookRetryBackoff specifies the backoff parameters for the authorization webhook retry logic. + // This allows us to configure the sleep time at each iteration and the maximum number of retries allowed + // before we fail the webhook call in order to limit the fan out that ensues when the system is degraded. + WebhookRetryBackoff *wait.Backoff +} + +func NewDelegatingAuthorizationOptions() *DelegatingAuthorizationOptions { + return &DelegatingAuthorizationOptions{ + // very low for responsiveness, but high enough to handle storms + AllowCacheTTL: 10 * time.Second, + DenyCacheTTL: 10 * time.Second, + ClientTimeout: 10 * time.Second, + WebhookRetryBackoff: DefaultAuthWebhookRetryBackoff(), + } +} + +// WithAlwaysAllowGroups appends the list of paths to AlwaysAllowGroups +func (s *DelegatingAuthorizationOptions) WithAlwaysAllowGroups(groups ...string) *DelegatingAuthorizationOptions { + s.AlwaysAllowGroups = append(s.AlwaysAllowGroups, groups...) + return s +} + +// WithAlwaysAllowPaths appends the list of paths to AlwaysAllowPaths +func (s *DelegatingAuthorizationOptions) WithAlwaysAllowPaths(paths ...string) *DelegatingAuthorizationOptions { + s.AlwaysAllowPaths = append(s.AlwaysAllowPaths, paths...) + return s +} + +// WithClientTimeout sets the given timeout for SAR client used by this authorizer +func (s *DelegatingAuthorizationOptions) WithClientTimeout(timeout time.Duration) { + s.ClientTimeout = timeout +} + +// WithCustomRetryBackoff sets the custom backoff parameters for the authorization webhook retry logic. +func (s *DelegatingAuthorizationOptions) WithCustomRetryBackoff(backoff wait.Backoff) { + s.WebhookRetryBackoff = &backoff +} + +func (s *DelegatingAuthorizationOptions) Validate() []error { + allErrors := []error{} + + if s.WebhookRetryBackoff != nil && s.WebhookRetryBackoff.Steps <= 0 { + allErrors = append(allErrors, fmt.Errorf("number of webhook retry attempts must be greater than 1, but is: %d", s.WebhookRetryBackoff.Steps)) + } + + return allErrors +} + +func (s *DelegatingAuthorizationOptions) AddFlags(fs *pflag.FlagSet) { + if s == nil { + return + } + + var optionalKubeConfigSentence string + if s.RemoteKubeConfigFileOptional { + optionalKubeConfigSentence = " This is optional. If empty, all requests not skipped by authorization are forbidden." + } + fs.StringVar(&s.RemoteKubeConfigFile, "authorization-kubeconfig", s.RemoteKubeConfigFile, + "kubeconfig file pointing at the 'core' kubernetes server with enough rights to create "+ + "subjectaccessreviews.authorization.k8s.io."+optionalKubeConfigSentence) + + fs.DurationVar(&s.AllowCacheTTL, "authorization-webhook-cache-authorized-ttl", + s.AllowCacheTTL, + "The duration to cache 'authorized' responses from the webhook authorizer.") + + fs.DurationVar(&s.DenyCacheTTL, + "authorization-webhook-cache-unauthorized-ttl", s.DenyCacheTTL, + "The duration to cache 'unauthorized' responses from the webhook authorizer.") + + fs.StringSliceVar(&s.AlwaysAllowPaths, "authorization-always-allow-paths", s.AlwaysAllowPaths, + "A list of HTTP paths to skip during authorization, i.e. these are authorized without "+ + "contacting the 'core' kubernetes server.") +} + +func (s *DelegatingAuthorizationOptions) ApplyTo(c *server.AuthorizationInfo) error { + if s == nil { + c.Authorizer = authorizerfactory.NewAlwaysAllowAuthorizer() + return nil + } + + client, err := s.getClient() + if err != nil { + return err + } + + c.Authorizer, err = s.toAuthorizer(client) + return err +} + +func (s *DelegatingAuthorizationOptions) toAuthorizer(client kubernetes.Interface) (authorizer.Authorizer, error) { + var authorizers []authorizer.Authorizer + + if len(s.AlwaysAllowGroups) > 0 { + authorizers = append(authorizers, authorizerfactory.NewPrivilegedGroups(s.AlwaysAllowGroups...)) + } + + if len(s.AlwaysAllowPaths) > 0 { + a, err := path.NewAuthorizer(s.AlwaysAllowPaths) + if err != nil { + return nil, err + } + authorizers = append(authorizers, a) + } + + if client == nil { + klog.Warningf("No authorization-kubeconfig provided, so SubjectAccessReview of authorization tokens won't work.") + } else { + cfg := authorizerfactory.DelegatingAuthorizerConfig{ + SubjectAccessReviewClient: client.AuthorizationV1().SubjectAccessReviews(), + AllowCacheTTL: s.AllowCacheTTL, + DenyCacheTTL: s.DenyCacheTTL, + WebhookRetryBackoff: s.WebhookRetryBackoff, + } + delegatedAuthorizer, err := cfg.New() + if err != nil { + return nil, err + } + authorizers = append(authorizers, delegatedAuthorizer) + } + + return union.New(authorizers...), nil +} + +func (s *DelegatingAuthorizationOptions) getClient() (kubernetes.Interface, error) { + var clientConfig *rest.Config + var err error + if len(s.RemoteKubeConfigFile) > 0 { + loadingRules := &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.RemoteKubeConfigFile} + loader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{}) + + clientConfig, err = loader.ClientConfig() + } else { + // without the remote kubeconfig file, try to use the in-cluster config. Most addon API servers will + // use this path. If it is optional, ignore errors. + clientConfig, err = rest.InClusterConfig() + if err != nil && s.RemoteKubeConfigFileOptional { + if err != rest.ErrNotInCluster { + klog.Warningf("failed to read in-cluster kubeconfig for delegated authorization: %v", err) + } + return nil, nil + } + } + if err != nil { + return nil, fmt.Errorf("failed to get delegated authorization kubeconfig: %v", err) + } + + // set high qps/burst limits since this will effectively limit API server responsiveness + clientConfig.QPS = 200 + clientConfig.Burst = 400 + clientConfig.Timeout = s.ClientTimeout + + return kubernetes.NewForConfig(clientConfig) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/coreapi.go b/vendor/k8s.io/apiserver/pkg/server/options/coreapi.go new file mode 100644 index 000000000..d46dece4a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/coreapi.go @@ -0,0 +1,84 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "time" + + "github.com/spf13/pflag" + "k8s.io/apiserver/pkg/server" + clientgoinformers "k8s.io/client-go/informers" + clientgoclientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +// CoreAPIOptions contains options to configure the connection to a core API Kubernetes apiserver. +type CoreAPIOptions struct { + // CoreAPIKubeconfigPath is a filename for a kubeconfig file to contact the core API server with. + // If it is not set, the in cluster config is used. + CoreAPIKubeconfigPath string +} + +func NewCoreAPIOptions() *CoreAPIOptions { + return &CoreAPIOptions{} +} + +func (o *CoreAPIOptions) AddFlags(fs *pflag.FlagSet) { + if o == nil { + return + } + + fs.StringVar(&o.CoreAPIKubeconfigPath, "kubeconfig", o.CoreAPIKubeconfigPath, + "kubeconfig file pointing at the 'core' kubernetes server.") +} + +func (o *CoreAPIOptions) ApplyTo(config *server.RecommendedConfig) error { + if o == nil { + return nil + } + + // create shared informer for Kubernetes APIs + var kubeconfig *rest.Config + var err error + if len(o.CoreAPIKubeconfigPath) > 0 { + loadingRules := &clientcmd.ClientConfigLoadingRules{ExplicitPath: o.CoreAPIKubeconfigPath} + loader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{}) + kubeconfig, err = loader.ClientConfig() + if err != nil { + return fmt.Errorf("failed to load kubeconfig at %q: %v", o.CoreAPIKubeconfigPath, err) + } + } else { + kubeconfig, err = rest.InClusterConfig() + if err != nil { + return err + } + } + clientgoExternalClient, err := clientgoclientset.NewForConfig(kubeconfig) + if err != nil { + return fmt.Errorf("failed to create Kubernetes clientset: %v", err) + } + config.ClientConfig = kubeconfig + config.SharedInformerFactory = clientgoinformers.NewSharedInformerFactory(clientgoExternalClient, 10*time.Minute) + + return nil +} + +func (o *CoreAPIOptions) Validate() []error { + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/deprecated_insecure_serving.go b/vendor/k8s.io/apiserver/pkg/server/options/deprecated_insecure_serving.go new file mode 100644 index 000000000..1c066313c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/deprecated_insecure_serving.go @@ -0,0 +1,169 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "net" + + "github.com/spf13/pflag" + + "k8s.io/apiserver/pkg/server" + "k8s.io/client-go/rest" +) + +// DeprecatedInsecureServingOptions are for creating an unauthenticated, unauthorized, insecure port. +// No one should be using these anymore. +// DEPRECATED: all insecure serving options are removed in a future version +type DeprecatedInsecureServingOptions struct { + BindAddress net.IP + BindPort int + // BindNetwork is the type of network to bind to - defaults to "tcp", accepts "tcp", + // "tcp4", and "tcp6". + BindNetwork string + + // Listener is the secure server network listener. + // either Listener or BindAddress/BindPort/BindNetwork is set, + // if Listener is set, use it and omit BindAddress/BindPort/BindNetwork. + Listener net.Listener + + // ListenFunc can be overridden to create a custom listener, e.g. for mocking in tests. + // It defaults to options.CreateListener. + ListenFunc func(network, addr string, config net.ListenConfig) (net.Listener, int, error) +} + +// Validate ensures that the insecure port values within the range of the port. +func (s *DeprecatedInsecureServingOptions) Validate() []error { + if s == nil { + return nil + } + + errors := []error{} + + if s.BindPort < 0 || s.BindPort > 65535 { + errors = append(errors, fmt.Errorf("insecure port %v must be between 0 and 65535, inclusive. 0 for turning off insecure (HTTP) port", s.BindPort)) + } + + return errors +} + +// AddFlags adds flags related to insecure serving to the specified FlagSet. +func (s *DeprecatedInsecureServingOptions) AddFlags(fs *pflag.FlagSet) { + if s == nil { + return + } + + fs.IPVar(&s.BindAddress, "insecure-bind-address", s.BindAddress, ""+ + "The IP address on which to serve the --insecure-port (set to 0.0.0.0 for all IPv4 interfaces and :: for all IPv6 interfaces).") + // Though this flag is deprecated, we discovered security concerns over how to do health checks without it e.g. #43784 + fs.MarkDeprecated("insecure-bind-address", "This flag will be removed in a future version.") + fs.Lookup("insecure-bind-address").Hidden = false + + fs.IntVar(&s.BindPort, "insecure-port", s.BindPort, ""+ + "The port on which to serve unsecured, unauthenticated access.") + // Though this flag is deprecated, we discovered security concerns over how to do health checks without it e.g. #43784 + fs.MarkDeprecated("insecure-port", "This flag will be removed in a future version.") + fs.Lookup("insecure-port").Hidden = false +} + +// AddUnqualifiedFlags adds flags related to insecure serving without the --insecure prefix to the specified FlagSet. +func (s *DeprecatedInsecureServingOptions) AddUnqualifiedFlags(fs *pflag.FlagSet) { + if s == nil { + return + } + + fs.IPVar(&s.BindAddress, "address", s.BindAddress, + "The IP address on which to serve the insecure --port (set to 0.0.0.0 for all IPv4 interfaces and :: for all IPv6 interfaces).") + fs.MarkDeprecated("address", "see --bind-address instead.") + fs.Lookup("address").Hidden = false + + fs.IntVar(&s.BindPort, "port", s.BindPort, "The port on which to serve unsecured, unauthenticated access. Set to 0 to disable.") + fs.MarkDeprecated("port", "see --secure-port instead.") + fs.Lookup("port").Hidden = false +} + +// ApplyTo adds DeprecatedInsecureServingOptions to the insecureserverinfo and kube-controller manager configuration. +// Note: the double pointer allows to set the *DeprecatedInsecureServingInfo to nil without referencing the struct hosting this pointer. +func (s *DeprecatedInsecureServingOptions) ApplyTo(c **server.DeprecatedInsecureServingInfo) error { + if s == nil { + return nil + } + if s.BindPort <= 0 { + return nil + } + + if s.Listener == nil { + var err error + listen := CreateListener + if s.ListenFunc != nil { + listen = s.ListenFunc + } + addr := net.JoinHostPort(s.BindAddress.String(), fmt.Sprintf("%d", s.BindPort)) + s.Listener, s.BindPort, err = listen(s.BindNetwork, addr, net.ListenConfig{}) + if err != nil { + return fmt.Errorf("failed to create listener: %v", err) + } + } + + *c = &server.DeprecatedInsecureServingInfo{ + Listener: s.Listener, + } + + return nil +} + +// WithLoopback adds loopback functionality to the serving options. +func (o *DeprecatedInsecureServingOptions) WithLoopback() *DeprecatedInsecureServingOptionsWithLoopback { + return &DeprecatedInsecureServingOptionsWithLoopback{o} +} + +// DeprecatedInsecureServingOptionsWithLoopback adds loopback functionality to the DeprecatedInsecureServingOptions. +// DEPRECATED: all insecure serving options will be removed in a future version, however note that +// there are security concerns over how health checks can work here - see e.g. #43784 +type DeprecatedInsecureServingOptionsWithLoopback struct { + *DeprecatedInsecureServingOptions +} + +// ApplyTo fills up serving information in the server configuration. +func (s *DeprecatedInsecureServingOptionsWithLoopback) ApplyTo(insecureServingInfo **server.DeprecatedInsecureServingInfo, loopbackClientConfig **rest.Config) error { + if s == nil || s.DeprecatedInsecureServingOptions == nil || insecureServingInfo == nil { + return nil + } + + if err := s.DeprecatedInsecureServingOptions.ApplyTo(insecureServingInfo); err != nil { + return err + } + + if *insecureServingInfo == nil || loopbackClientConfig == nil { + return nil + } + + secureLoopbackClientConfig, err := (*insecureServingInfo).NewLoopbackClientConfig() + switch { + // if we failed and there's no fallback loopback client config, we need to fail + case err != nil && *loopbackClientConfig == nil: + return err + + // if we failed, but we already have a fallback loopback client config (usually insecure), allow it + case err != nil && *loopbackClientConfig != nil: + + default: + *loopbackClientConfig = secureLoopbackClientConfig + } + + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/doc.go b/vendor/k8s.io/apiserver/pkg/server/options/doc.go new file mode 100644 index 000000000..426336be0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package options is the public flags and options used by a generic api +// server. It takes a minimal set of dependencies and does not reference +// implementations, in order to ensure it may be reused by multiple components +// (such as CLI commands that wish to generate or validate config). +package options // import "k8s.io/apiserver/pkg/server/options" diff --git a/vendor/k8s.io/apiserver/pkg/server/options/egress_selector.go b/vendor/k8s.io/apiserver/pkg/server/options/egress_selector.go new file mode 100644 index 000000000..07999cab1 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/egress_selector.go @@ -0,0 +1,93 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + + "github.com/spf13/pflag" + "k8s.io/utils/path" + + "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/server/egressselector" +) + +// EgressSelectorOptions holds the api server egress selector options. +// See https://github.com/kubernetes/enhancements/blob/master/keps/sig-api-machinery/20190226-network-proxy.md +type EgressSelectorOptions struct { + // ConfigFile is the file path with api-server egress selector configuration. + ConfigFile string +} + +// NewEgressSelectorOptions creates a new instance of EgressSelectorOptions +// +// The option is to point to a configuration file for egress/konnectivity. +// This determines which types of requests use egress/konnectivity and how they use it. +// If empty the API Server will attempt to connect directly using the network. +func NewEgressSelectorOptions() *EgressSelectorOptions { + return &EgressSelectorOptions{} +} + +// AddFlags adds flags related to admission for a specific APIServer to the specified FlagSet +func (o *EgressSelectorOptions) AddFlags(fs *pflag.FlagSet) { + if o == nil { + return + } + + fs.StringVar(&o.ConfigFile, "egress-selector-config-file", o.ConfigFile, + "File with apiserver egress selector configuration.") +} + +// ApplyTo adds the egress selector settings to the server configuration. +// In case egress selector settings were not provided by a cluster-admin +// they will be prepared from the recommended/default/no-op values. +func (o *EgressSelectorOptions) ApplyTo(c *server.Config) error { + if o == nil { + return nil + } + + npConfig, err := egressselector.ReadEgressSelectorConfiguration(o.ConfigFile) + if err != nil { + return fmt.Errorf("failed to read egress selector config: %v", err) + } + errs := egressselector.ValidateEgressSelectorConfiguration(npConfig) + if len(errs) > 0 { + return fmt.Errorf("failed to validate egress selector configuration: %v", errs.ToAggregate()) + } + + cs, err := egressselector.NewEgressSelector(npConfig) + if err != nil { + return fmt.Errorf("failed to setup egress selector with config %#v: %v", npConfig, err) + } + c.EgressSelector = cs + return nil +} + +// Validate verifies flags passed to EgressSelectorOptions. +func (o *EgressSelectorOptions) Validate() []error { + if o == nil || o.ConfigFile == "" { + return nil + } + + errs := []error{} + + if exists, err := path.Exists(path.CheckFollowSymlink, o.ConfigFile); !exists || err != nil { + errs = append(errs, fmt.Errorf("egress-selector-config-file %s does not exist", o.ConfigFile)) + } + + return errs +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/OWNERS b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/OWNERS new file mode 100644 index 000000000..71edc3ecd --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/OWNERS @@ -0,0 +1,9 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- sig-auth-encryption-at-rest-approvers +reviewers: +- sig-auth-encryption-at-rest-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go new file mode 100644 index 000000000..372ab5eb8 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go @@ -0,0 +1,375 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package encryptionconfig + +import ( + "crypto/aes" + "crypto/cipher" + "encoding/base64" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "sync" + "time" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + apiserverconfig "k8s.io/apiserver/pkg/apis/config" + apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1" + "k8s.io/apiserver/pkg/apis/config/validation" + "k8s.io/apiserver/pkg/server/healthz" + "k8s.io/apiserver/pkg/storage/value" + aestransformer "k8s.io/apiserver/pkg/storage/value/encrypt/aes" + "k8s.io/apiserver/pkg/storage/value/encrypt/envelope" + "k8s.io/apiserver/pkg/storage/value/encrypt/identity" + "k8s.io/apiserver/pkg/storage/value/encrypt/secretbox" +) + +const ( + aesCBCTransformerPrefixV1 = "k8s:enc:aescbc:v1:" + aesGCMTransformerPrefixV1 = "k8s:enc:aesgcm:v1:" + secretboxTransformerPrefixV1 = "k8s:enc:secretbox:v1:" + kmsTransformerPrefixV1 = "k8s:enc:kms:v1:" + kmsPluginHealthzNegativeTTL = 3 * time.Second + kmsPluginHealthzPositiveTTL = 20 * time.Second +) + +type kmsPluginHealthzResponse struct { + err error + received time.Time +} + +type kmsPluginProbe struct { + name string + ttl time.Duration + envelope.Service + lastResponse *kmsPluginHealthzResponse + l *sync.Mutex +} + +func (h *kmsPluginProbe) toHealthzCheck(idx int) healthz.HealthChecker { + return healthz.NamedCheck(fmt.Sprintf("kms-provider-%d", idx), func(r *http.Request) error { + return h.Check() + }) +} + +// GetKMSPluginHealthzCheckers extracts KMSPluginProbes from the EncryptionConfig. +func GetKMSPluginHealthzCheckers(filepath string) ([]healthz.HealthChecker, error) { + f, err := os.Open(filepath) + if err != nil { + return nil, fmt.Errorf("error opening encryption provider configuration file %q: %v", filepath, err) + } + defer f.Close() + var result []healthz.HealthChecker + probes, err := getKMSPluginProbes(f) + if err != nil { + return nil, err + } + + for i, p := range probes { + probe := p + result = append(result, probe.toHealthzCheck(i)) + } + return result, nil +} + +func getKMSPluginProbes(reader io.Reader) ([]*kmsPluginProbe, error) { + var result []*kmsPluginProbe + + configFileContents, err := ioutil.ReadAll(reader) + if err != nil { + return result, fmt.Errorf("could not read content of encryption provider configuration: %v", err) + } + + config, err := loadConfig(configFileContents) + if err != nil { + return result, fmt.Errorf("error while parsing encrypiton provider configuration: %v", err) + } + + for _, r := range config.Resources { + for _, p := range r.Providers { + if p.KMS != nil { + s, err := envelope.NewGRPCService(p.KMS.Endpoint, p.KMS.Timeout.Duration) + if err != nil { + return nil, fmt.Errorf("could not configure KMS-Plugin's probe %q, error: %v", p.KMS.Name, err) + } + + result = append(result, &kmsPluginProbe{ + name: p.KMS.Name, + ttl: kmsPluginHealthzNegativeTTL, + Service: s, + l: &sync.Mutex{}, + lastResponse: &kmsPluginHealthzResponse{}, + }) + } + } + } + + return result, nil +} + +// Check encrypts and decrypts test data against KMS-Plugin's gRPC endpoint. +func (h *kmsPluginProbe) Check() error { + h.l.Lock() + defer h.l.Unlock() + + if (time.Since(h.lastResponse.received)) < h.ttl { + return h.lastResponse.err + } + + p, err := h.Service.Encrypt([]byte("ping")) + if err != nil { + h.lastResponse = &kmsPluginHealthzResponse{err: err, received: time.Now()} + h.ttl = kmsPluginHealthzNegativeTTL + return fmt.Errorf("failed to perform encrypt section of the healthz check for KMS Provider %s, error: %v", h.name, err) + } + + if _, err := h.Service.Decrypt(p); err != nil { + h.lastResponse = &kmsPluginHealthzResponse{err: err, received: time.Now()} + h.ttl = kmsPluginHealthzNegativeTTL + return fmt.Errorf("failed to perform decrypt section of the healthz check for KMS Provider %s, error: %v", h.name, err) + } + + h.lastResponse = &kmsPluginHealthzResponse{err: nil, received: time.Now()} + h.ttl = kmsPluginHealthzPositiveTTL + return nil +} + +// GetTransformerOverrides returns the transformer overrides by reading and parsing the encryption provider configuration file +func GetTransformerOverrides(filepath string) (map[schema.GroupResource]value.Transformer, error) { + f, err := os.Open(filepath) + if err != nil { + return nil, fmt.Errorf("error opening encryption provider configuration file %q: %v", filepath, err) + } + defer f.Close() + + result, err := parseEncryptionConfiguration(f) + if err != nil { + return nil, fmt.Errorf("error while parsing encryption provider configuration file %q: %v", filepath, err) + } + return result, nil +} + +func parseEncryptionConfiguration(f io.Reader) (map[schema.GroupResource]value.Transformer, error) { + configFileContents, err := ioutil.ReadAll(f) + if err != nil { + return nil, fmt.Errorf("could not read contents: %v", err) + } + + config, err := loadConfig(configFileContents) + if err != nil { + return nil, fmt.Errorf("error while parsing file: %v", err) + } + + resourceToPrefixTransformer := map[schema.GroupResource][]value.PrefixTransformer{} + + // For each entry in the configuration + for _, resourceConfig := range config.Resources { + transformers, err := prefixTransformers(&resourceConfig) + if err != nil { + return nil, err + } + + // For each resource, create a list of providers to use + for _, resource := range resourceConfig.Resources { + gr := schema.ParseGroupResource(resource) + resourceToPrefixTransformer[gr] = append( + resourceToPrefixTransformer[gr], transformers...) + } + } + + result := map[schema.GroupResource]value.Transformer{} + for gr, transList := range resourceToPrefixTransformer { + result[gr] = value.NewMutableTransformer(value.NewPrefixTransformers(fmt.Errorf("no matching prefix found"), transList...)) + } + return result, nil + +} + +func loadConfig(data []byte) (*apiserverconfig.EncryptionConfiguration, error) { + scheme := runtime.NewScheme() + codecs := serializer.NewCodecFactory(scheme) + apiserverconfig.AddToScheme(scheme) + apiserverconfigv1.AddToScheme(scheme) + + configObj, gvk, err := codecs.UniversalDecoder().Decode(data, nil, nil) + if err != nil { + return nil, err + } + config, ok := configObj.(*apiserverconfig.EncryptionConfiguration) + if !ok { + return nil, fmt.Errorf("got unexpected config type: %v", gvk) + } + + return config, validation.ValidateEncryptionConfiguration(config).ToAggregate() +} + +// The factory to create kms service. This is to make writing test easier. +var envelopeServiceFactory = envelope.NewGRPCService + +func prefixTransformers(config *apiserverconfig.ResourceConfiguration) ([]value.PrefixTransformer, error) { + var result []value.PrefixTransformer + for _, provider := range config.Providers { + var ( + transformer value.PrefixTransformer + err error + ) + + switch { + case provider.AESGCM != nil: + transformer, err = aesPrefixTransformer(provider.AESGCM, aestransformer.NewGCMTransformer, aesGCMTransformerPrefixV1) + case provider.AESCBC != nil: + transformer, err = aesPrefixTransformer(provider.AESCBC, aestransformer.NewCBCTransformer, aesCBCTransformerPrefixV1) + case provider.Secretbox != nil: + transformer, err = secretboxPrefixTransformer(provider.Secretbox) + case provider.KMS != nil: + envelopeService, err := envelopeServiceFactory(provider.KMS.Endpoint, provider.KMS.Timeout.Duration) + if err != nil { + return nil, fmt.Errorf("could not configure KMS plugin %q, error: %v", provider.KMS.Name, err) + } + + transformer, err = envelopePrefixTransformer(provider.KMS, envelopeService, kmsTransformerPrefixV1) + case provider.Identity != nil: + transformer = value.PrefixTransformer{ + Transformer: identity.NewEncryptCheckTransformer(), + Prefix: []byte{}, + } + default: + return nil, errors.New("provider does not contain any of the expected providers: KMS, AESGCM, AESCBC, Secretbox, Identity") + } + + if err != nil { + return result, err + } + result = append(result, transformer) + } + return result, nil +} + +type blockTransformerFunc func(cipher.Block) value.Transformer + +func aesPrefixTransformer(config *apiserverconfig.AESConfiguration, fn blockTransformerFunc, prefix string) (value.PrefixTransformer, error) { + var result value.PrefixTransformer + + if len(config.Keys) == 0 { + return result, fmt.Errorf("aes provider has no valid keys") + } + for _, key := range config.Keys { + if key.Name == "" { + return result, fmt.Errorf("key with invalid name provided") + } + if key.Secret == "" { + return result, fmt.Errorf("key %v has no provided secret", key.Name) + } + } + + keyTransformers := []value.PrefixTransformer{} + + for _, keyData := range config.Keys { + key, err := base64.StdEncoding.DecodeString(keyData.Secret) + if err != nil { + return result, fmt.Errorf("could not obtain secret for named key %s: %s", keyData.Name, err) + } + block, err := aes.NewCipher(key) + if err != nil { + return result, fmt.Errorf("error while creating cipher for named key %s: %s", keyData.Name, err) + } + + // Create a new PrefixTransformer for this key + keyTransformers = append(keyTransformers, + value.PrefixTransformer{ + Transformer: fn(block), + Prefix: []byte(keyData.Name + ":"), + }) + } + + // Create a prefixTransformer which can choose between these keys + keyTransformer := value.NewPrefixTransformers( + fmt.Errorf("no matching key was found for the provided AES transformer"), keyTransformers...) + + // Create a PrefixTransformer which shall later be put in a list with other providers + result = value.PrefixTransformer{ + Transformer: keyTransformer, + Prefix: []byte(prefix), + } + return result, nil +} + +func secretboxPrefixTransformer(config *apiserverconfig.SecretboxConfiguration) (value.PrefixTransformer, error) { + var result value.PrefixTransformer + + if len(config.Keys) == 0 { + return result, fmt.Errorf("secretbox provider has no valid keys") + } + for _, key := range config.Keys { + if key.Name == "" { + return result, fmt.Errorf("key with invalid name provided") + } + if key.Secret == "" { + return result, fmt.Errorf("key %v has no provided secret", key.Name) + } + } + + keyTransformers := []value.PrefixTransformer{} + + for _, keyData := range config.Keys { + key, err := base64.StdEncoding.DecodeString(keyData.Secret) + if err != nil { + return result, fmt.Errorf("could not obtain secret for named key %s: %s", keyData.Name, err) + } + + if len(key) != 32 { + return result, fmt.Errorf("expected key size 32 for secretbox provider, got %v", len(key)) + } + + keyArray := [32]byte{} + copy(keyArray[:], key) + + // Create a new PrefixTransformer for this key + keyTransformers = append(keyTransformers, + value.PrefixTransformer{ + Transformer: secretbox.NewSecretboxTransformer(keyArray), + Prefix: []byte(keyData.Name + ":"), + }) + } + + // Create a prefixTransformer which can choose between these keys + keyTransformer := value.NewPrefixTransformers( + fmt.Errorf("no matching key was found for the provided Secretbox transformer"), keyTransformers...) + + // Create a PrefixTransformer which shall later be put in a list with other providers + result = value.PrefixTransformer{ + Transformer: keyTransformer, + Prefix: []byte(secretboxTransformerPrefixV1), + } + return result, nil +} + +func envelopePrefixTransformer(config *apiserverconfig.KMSConfiguration, envelopeService envelope.Service, prefix string) (value.PrefixTransformer, error) { + envelopeTransformer, err := envelope.NewEnvelopeTransformer(envelopeService, int(*config.CacheSize), aestransformer.NewCBCTransformer) + if err != nil { + return value.PrefixTransformer{}, err + } + return value.PrefixTransformer{ + Transformer: envelopeTransformer, + Prefix: []byte(prefix + config.Name + ":"), + }, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/etcd.go b/vendor/k8s.io/apiserver/pkg/server/options/etcd.go new file mode 100644 index 000000000..d9c6f9e54 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/etcd.go @@ -0,0 +1,329 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "net/http" + "strconv" + "strings" + "time" + + "github.com/spf13/pflag" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/registry/generic" + genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" + "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/server/healthz" + "k8s.io/apiserver/pkg/server/options/encryptionconfig" + serverstorage "k8s.io/apiserver/pkg/server/storage" + "k8s.io/apiserver/pkg/storage/storagebackend" + storagefactory "k8s.io/apiserver/pkg/storage/storagebackend/factory" + "k8s.io/klog/v2" +) + +type EtcdOptions struct { + // The value of Paging on StorageConfig will be overridden by the + // calculated feature gate value. + StorageConfig storagebackend.Config + EncryptionProviderConfigFilepath string + + EtcdServersOverrides []string + + // To enable protobuf as storage format, it is enough + // to set it to "application/vnd.kubernetes.protobuf". + DefaultStorageMediaType string + DeleteCollectionWorkers int + EnableGarbageCollection bool + + // Set EnableWatchCache to false to disable all watch caches + EnableWatchCache bool + // Set DefaultWatchCacheSize to zero to disable watch caches for those resources that have no explicit cache size set + DefaultWatchCacheSize int + // WatchCacheSizes represents override to a given resource + WatchCacheSizes []string +} + +var storageTypes = sets.NewString( + storagebackend.StorageTypeETCD3, +) + +func NewEtcdOptions(backendConfig *storagebackend.Config) *EtcdOptions { + options := &EtcdOptions{ + StorageConfig: *backendConfig, + DefaultStorageMediaType: "application/json", + DeleteCollectionWorkers: 1, + EnableGarbageCollection: true, + EnableWatchCache: true, + DefaultWatchCacheSize: 100, + } + options.StorageConfig.CountMetricPollPeriod = time.Minute + return options +} + +func (s *EtcdOptions) Validate() []error { + if s == nil { + return nil + } + + allErrors := []error{} + if len(s.StorageConfig.Transport.ServerList) == 0 { + allErrors = append(allErrors, fmt.Errorf("--etcd-servers must be specified")) + } + + if s.StorageConfig.Type != storagebackend.StorageTypeUnset && !storageTypes.Has(s.StorageConfig.Type) { + allErrors = append(allErrors, fmt.Errorf("--storage-backend invalid, allowed values: %s. If not specified, it will default to 'etcd3'", strings.Join(storageTypes.List(), ", "))) + } + + for _, override := range s.EtcdServersOverrides { + tokens := strings.Split(override, "#") + if len(tokens) != 2 { + allErrors = append(allErrors, fmt.Errorf("--etcd-servers-overrides invalid, must be of format: group/resource#servers, where servers are URLs, semicolon separated")) + continue + } + + apiresource := strings.Split(tokens[0], "/") + if len(apiresource) != 2 { + allErrors = append(allErrors, fmt.Errorf("--etcd-servers-overrides invalid, must be of format: group/resource#servers, where servers are URLs, semicolon separated")) + continue + } + + } + + return allErrors +} + +// AddEtcdFlags adds flags related to etcd storage for a specific APIServer to the specified FlagSet +func (s *EtcdOptions) AddFlags(fs *pflag.FlagSet) { + if s == nil { + return + } + + fs.StringSliceVar(&s.EtcdServersOverrides, "etcd-servers-overrides", s.EtcdServersOverrides, ""+ + "Per-resource etcd servers overrides, comma separated. The individual override "+ + "format: group/resource#servers, where servers are URLs, semicolon separated.") + + fs.StringVar(&s.DefaultStorageMediaType, "storage-media-type", s.DefaultStorageMediaType, ""+ + "The media type to use to store objects in storage. "+ + "Some resources or storage backends may only support a specific media type and will ignore this setting.") + fs.IntVar(&s.DeleteCollectionWorkers, "delete-collection-workers", s.DeleteCollectionWorkers, + "Number of workers spawned for DeleteCollection call. These are used to speed up namespace cleanup.") + + fs.BoolVar(&s.EnableGarbageCollection, "enable-garbage-collector", s.EnableGarbageCollection, ""+ + "Enables the generic garbage collector. MUST be synced with the corresponding flag "+ + "of the kube-controller-manager.") + + fs.BoolVar(&s.EnableWatchCache, "watch-cache", s.EnableWatchCache, + "Enable watch caching in the apiserver") + + fs.IntVar(&s.DefaultWatchCacheSize, "default-watch-cache-size", s.DefaultWatchCacheSize, + "Default watch cache size. If zero, watch cache will be disabled for resources that do not have a default watch size set.") + + fs.StringSliceVar(&s.WatchCacheSizes, "watch-cache-sizes", s.WatchCacheSizes, ""+ + "Watch cache size settings for some resources (pods, nodes, etc.), comma separated. "+ + "The individual setting format: resource[.group]#size, where resource is lowercase plural (no version), "+ + "group is omitted for resources of apiVersion v1 (the legacy core API) and included for others, "+ + "and size is a number. It takes effect when watch-cache is enabled. "+ + "Some resources (replicationcontrollers, endpoints, nodes, pods, services, apiservices.apiregistration.k8s.io) "+ + "have system defaults set by heuristics, others default to default-watch-cache-size") + + fs.StringVar(&s.StorageConfig.Type, "storage-backend", s.StorageConfig.Type, + "The storage backend for persistence. Options: 'etcd3' (default).") + + dummyCacheSize := 0 + fs.IntVar(&dummyCacheSize, "deserialization-cache-size", 0, "Number of deserialized json objects to cache in memory.") + fs.MarkDeprecated("deserialization-cache-size", "the deserialization cache was dropped in 1.13 with support for etcd2") + + fs.StringSliceVar(&s.StorageConfig.Transport.ServerList, "etcd-servers", s.StorageConfig.Transport.ServerList, + "List of etcd servers to connect with (scheme://ip:port), comma separated.") + + fs.StringVar(&s.StorageConfig.Prefix, "etcd-prefix", s.StorageConfig.Prefix, + "The prefix to prepend to all resource paths in etcd.") + + fs.StringVar(&s.StorageConfig.Transport.KeyFile, "etcd-keyfile", s.StorageConfig.Transport.KeyFile, + "SSL key file used to secure etcd communication.") + + fs.StringVar(&s.StorageConfig.Transport.CertFile, "etcd-certfile", s.StorageConfig.Transport.CertFile, + "SSL certification file used to secure etcd communication.") + + fs.StringVar(&s.StorageConfig.Transport.TrustedCAFile, "etcd-cafile", s.StorageConfig.Transport.TrustedCAFile, + "SSL Certificate Authority file used to secure etcd communication.") + + fs.StringVar(&s.EncryptionProviderConfigFilepath, "experimental-encryption-provider-config", s.EncryptionProviderConfigFilepath, + "The file containing configuration for encryption providers to be used for storing secrets in etcd") + fs.MarkDeprecated("experimental-encryption-provider-config", "use --encryption-provider-config.") + + fs.StringVar(&s.EncryptionProviderConfigFilepath, "encryption-provider-config", s.EncryptionProviderConfigFilepath, + "The file containing configuration for encryption providers to be used for storing secrets in etcd") + + fs.DurationVar(&s.StorageConfig.CompactionInterval, "etcd-compaction-interval", s.StorageConfig.CompactionInterval, + "The interval of compaction requests. If 0, the compaction request from apiserver is disabled.") + + fs.DurationVar(&s.StorageConfig.CountMetricPollPeriod, "etcd-count-metric-poll-period", s.StorageConfig.CountMetricPollPeriod, ""+ + "Frequency of polling etcd for number of resources per type. 0 disables the metric collection.") + + fs.DurationVar(&s.StorageConfig.DBMetricPollInterval, "etcd-db-metric-poll-interval", s.StorageConfig.DBMetricPollInterval, + "The interval of requests to poll etcd and update metric. 0 disables the metric collection") + + fs.DurationVar(&s.StorageConfig.HealthcheckTimeout, "etcd-healthcheck-timeout", s.StorageConfig.HealthcheckTimeout, + "The timeout to use when checking etcd health.") +} + +func (s *EtcdOptions) ApplyTo(c *server.Config) error { + if s == nil { + return nil + } + if err := s.addEtcdHealthEndpoint(c); err != nil { + return err + } + c.RESTOptionsGetter = &SimpleRestOptionsFactory{Options: *s} + return nil +} + +func (s *EtcdOptions) ApplyWithStorageFactoryTo(factory serverstorage.StorageFactory, c *server.Config) error { + if err := s.addEtcdHealthEndpoint(c); err != nil { + return err + } + c.RESTOptionsGetter = &StorageFactoryRestOptionsFactory{Options: *s, StorageFactory: factory} + return nil +} + +func (s *EtcdOptions) addEtcdHealthEndpoint(c *server.Config) error { + healthCheck, err := storagefactory.CreateHealthCheck(s.StorageConfig) + if err != nil { + return err + } + c.AddHealthChecks(healthz.NamedCheck("etcd", func(r *http.Request) error { + return healthCheck() + })) + + if s.EncryptionProviderConfigFilepath != "" { + kmsPluginHealthzChecks, err := encryptionconfig.GetKMSPluginHealthzCheckers(s.EncryptionProviderConfigFilepath) + if err != nil { + return err + } + c.AddHealthChecks(kmsPluginHealthzChecks...) + } + + return nil +} + +type SimpleRestOptionsFactory struct { + Options EtcdOptions +} + +func (f *SimpleRestOptionsFactory) GetRESTOptions(resource schema.GroupResource) (generic.RESTOptions, error) { + ret := generic.RESTOptions{ + StorageConfig: &f.Options.StorageConfig, + Decorator: generic.UndecoratedStorage, + EnableGarbageCollection: f.Options.EnableGarbageCollection, + DeleteCollectionWorkers: f.Options.DeleteCollectionWorkers, + ResourcePrefix: resource.Group + "/" + resource.Resource, + CountMetricPollPeriod: f.Options.StorageConfig.CountMetricPollPeriod, + } + if f.Options.EnableWatchCache { + sizes, err := ParseWatchCacheSizes(f.Options.WatchCacheSizes) + if err != nil { + return generic.RESTOptions{}, err + } + size, ok := sizes[resource] + if ok && size > 0 { + klog.Warningf("Dropping watch-cache-size for %v - watchCache size is now dynamic", resource) + } + if ok && size <= 0 { + ret.Decorator = generic.UndecoratedStorage + } else { + ret.Decorator = genericregistry.StorageWithCacher() + } + } + return ret, nil +} + +type StorageFactoryRestOptionsFactory struct { + Options EtcdOptions + StorageFactory serverstorage.StorageFactory +} + +func (f *StorageFactoryRestOptionsFactory) GetRESTOptions(resource schema.GroupResource) (generic.RESTOptions, error) { + storageConfig, err := f.StorageFactory.NewConfig(resource) + if err != nil { + return generic.RESTOptions{}, fmt.Errorf("unable to find storage destination for %v, due to %v", resource, err.Error()) + } + + ret := generic.RESTOptions{ + StorageConfig: storageConfig, + Decorator: generic.UndecoratedStorage, + DeleteCollectionWorkers: f.Options.DeleteCollectionWorkers, + EnableGarbageCollection: f.Options.EnableGarbageCollection, + ResourcePrefix: f.StorageFactory.ResourcePrefix(resource), + CountMetricPollPeriod: f.Options.StorageConfig.CountMetricPollPeriod, + } + if f.Options.EnableWatchCache { + sizes, err := ParseWatchCacheSizes(f.Options.WatchCacheSizes) + if err != nil { + return generic.RESTOptions{}, err + } + size, ok := sizes[resource] + if ok && size > 0 { + klog.Warningf("Dropping watch-cache-size for %v - watchCache size is now dynamic", resource) + } + if ok && size <= 0 { + ret.Decorator = generic.UndecoratedStorage + } else { + ret.Decorator = genericregistry.StorageWithCacher() + } + } + + return ret, nil +} + +// ParseWatchCacheSizes turns a list of cache size values into a map of group resources +// to requested sizes. +func ParseWatchCacheSizes(cacheSizes []string) (map[schema.GroupResource]int, error) { + watchCacheSizes := make(map[schema.GroupResource]int) + for _, c := range cacheSizes { + tokens := strings.Split(c, "#") + if len(tokens) != 2 { + return nil, fmt.Errorf("invalid value of watch cache size: %s", c) + } + + size, err := strconv.Atoi(tokens[1]) + if err != nil { + return nil, fmt.Errorf("invalid size of watch cache size: %s", c) + } + if size < 0 { + return nil, fmt.Errorf("watch cache size cannot be negative: %s", c) + } + watchCacheSizes[schema.ParseGroupResource(tokens[0])] = size + } + return watchCacheSizes, nil +} + +// WriteWatchCacheSizes turns a map of cache size values into a list of string specifications. +func WriteWatchCacheSizes(watchCacheSizes map[schema.GroupResource]int) ([]string, error) { + var cacheSizes []string + + for resource, size := range watchCacheSizes { + if size < 0 { + return nil, fmt.Errorf("watch cache size cannot be negative for resource %s", resource) + } + cacheSizes = append(cacheSizes, fmt.Sprintf("%s#%d", resource.String(), size)) + } + return cacheSizes, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/feature.go b/vendor/k8s.io/apiserver/pkg/server/options/feature.go new file mode 100644 index 000000000..235635ea9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/feature.go @@ -0,0 +1,72 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "github.com/spf13/pflag" + + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apiserver/pkg/server" +) + +type FeatureOptions struct { + EnableProfiling bool + EnableContentionProfiling bool +} + +func NewFeatureOptions() *FeatureOptions { + defaults := server.NewConfig(serializer.CodecFactory{}) + + return &FeatureOptions{ + EnableProfiling: defaults.EnableProfiling, + EnableContentionProfiling: defaults.EnableContentionProfiling, + } +} + +func (o *FeatureOptions) AddFlags(fs *pflag.FlagSet) { + if o == nil { + return + } + + fs.BoolVar(&o.EnableProfiling, "profiling", o.EnableProfiling, + "Enable profiling via web interface host:port/debug/pprof/") + fs.BoolVar(&o.EnableContentionProfiling, "contention-profiling", o.EnableContentionProfiling, + "Enable lock contention profiling, if profiling is enabled") + dummy := false + fs.BoolVar(&dummy, "enable-swagger-ui", dummy, "Enables swagger ui on the apiserver at /swagger-ui") + fs.MarkDeprecated("enable-swagger-ui", "swagger 1.2 support has been removed") +} + +func (o *FeatureOptions) ApplyTo(c *server.Config) error { + if o == nil { + return nil + } + + c.EnableProfiling = o.EnableProfiling + c.EnableContentionProfiling = o.EnableContentionProfiling + + return nil +} + +func (o *FeatureOptions) Validate() []error { + if o == nil { + return nil + } + + errs := []error{} + return errs +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/recommended.go b/vendor/k8s.io/apiserver/pkg/server/options/recommended.go new file mode 100644 index 000000000..18824083e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/recommended.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "github.com/spf13/pflag" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/storage/storagebackend" + "k8s.io/apiserver/pkg/util/feature" + utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" + "k8s.io/client-go/kubernetes" + "k8s.io/component-base/featuregate" +) + +// RecommendedOptions contains the recommended options for running an API server. +// If you add something to this list, it should be in a logical grouping. +// Each of them can be nil to leave the feature unconfigured on ApplyTo. +type RecommendedOptions struct { + Etcd *EtcdOptions + SecureServing *SecureServingOptionsWithLoopback + Authentication *DelegatingAuthenticationOptions + Authorization *DelegatingAuthorizationOptions + Audit *AuditOptions + Features *FeatureOptions + CoreAPI *CoreAPIOptions + + // FeatureGate is a way to plumb feature gate through if you have them. + FeatureGate featuregate.FeatureGate + // ExtraAdmissionInitializers is called once after all ApplyTo from the options above, to pass the returned + // admission plugin initializers to Admission.ApplyTo. + ExtraAdmissionInitializers func(c *server.RecommendedConfig) ([]admission.PluginInitializer, error) + Admission *AdmissionOptions + // API Server Egress Selector is used to control outbound traffic from the API Server + EgressSelector *EgressSelectorOptions +} + +func NewRecommendedOptions(prefix string, codec runtime.Codec) *RecommendedOptions { + sso := NewSecureServingOptions() + + // We are composing recommended options for an aggregated api-server, + // whose client is typically a proxy multiplexing many operations --- + // notably including long-running ones --- into one HTTP/2 connection + // into this server. So allow many concurrent operations. + sso.HTTP2MaxStreamsPerConnection = 1000 + + return &RecommendedOptions{ + Etcd: NewEtcdOptions(storagebackend.NewDefaultConfig(prefix, codec)), + SecureServing: sso.WithLoopback(), + Authentication: NewDelegatingAuthenticationOptions(), + Authorization: NewDelegatingAuthorizationOptions(), + Audit: NewAuditOptions(), + Features: NewFeatureOptions(), + CoreAPI: NewCoreAPIOptions(), + // Wired a global by default that sadly people will abuse to have different meanings in different repos. + // Please consider creating your own FeatureGate so you can have a consistent meaning for what a variable contains + // across different repos. Future you will thank you. + FeatureGate: feature.DefaultFeatureGate, + ExtraAdmissionInitializers: func(c *server.RecommendedConfig) ([]admission.PluginInitializer, error) { return nil, nil }, + Admission: NewAdmissionOptions(), + EgressSelector: NewEgressSelectorOptions(), + } +} + +func (o *RecommendedOptions) AddFlags(fs *pflag.FlagSet) { + o.Etcd.AddFlags(fs) + o.SecureServing.AddFlags(fs) + o.Authentication.AddFlags(fs) + o.Authorization.AddFlags(fs) + o.Audit.AddFlags(fs) + o.Features.AddFlags(fs) + o.CoreAPI.AddFlags(fs) + o.Admission.AddFlags(fs) + o.EgressSelector.AddFlags(fs) +} + +// ApplyTo adds RecommendedOptions to the server configuration. +// pluginInitializers can be empty, it is only need for additional initializers. +func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig) error { + if err := o.Etcd.ApplyTo(&config.Config); err != nil { + return err + } + if err := o.SecureServing.ApplyTo(&config.Config.SecureServing, &config.Config.LoopbackClientConfig); err != nil { + return err + } + if err := o.Authentication.ApplyTo(&config.Config.Authentication, config.SecureServing, config.OpenAPIConfig); err != nil { + return err + } + if err := o.Authorization.ApplyTo(&config.Config.Authorization); err != nil { + return err + } + if err := o.Audit.ApplyTo(&config.Config); err != nil { + return err + } + if err := o.Features.ApplyTo(&config.Config); err != nil { + return err + } + if err := o.CoreAPI.ApplyTo(config); err != nil { + return err + } + if initializers, err := o.ExtraAdmissionInitializers(config); err != nil { + return err + } else if err := o.Admission.ApplyTo(&config.Config, config.SharedInformerFactory, config.ClientConfig, o.FeatureGate, initializers...); err != nil { + return err + } + if err := o.EgressSelector.ApplyTo(&config.Config); err != nil { + return err + } + if feature.DefaultFeatureGate.Enabled(features.APIPriorityAndFairness) { + config.FlowControl = utilflowcontrol.New( + config.SharedInformerFactory, + kubernetes.NewForConfigOrDie(config.ClientConfig).FlowcontrolV1beta1(), + config.MaxRequestsInFlight+config.MaxMutatingRequestsInFlight, + config.RequestTimeout/4, + ) + } + return nil +} + +func (o *RecommendedOptions) Validate() []error { + errors := []error{} + errors = append(errors, o.Etcd.Validate()...) + errors = append(errors, o.SecureServing.Validate()...) + errors = append(errors, o.Authentication.Validate()...) + errors = append(errors, o.Authorization.Validate()...) + errors = append(errors, o.Audit.Validate()...) + errors = append(errors, o.Features.Validate()...) + errors = append(errors, o.CoreAPI.Validate()...) + errors = append(errors, o.Admission.Validate()...) + errors = append(errors, o.EgressSelector.Validate()...) + + return errors +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go b/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go new file mode 100644 index 000000000..1ee706042 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go @@ -0,0 +1,215 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "net" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apiserver/pkg/server" + utilfeature "k8s.io/apiserver/pkg/util/feature" + + "github.com/spf13/pflag" +) + +// ServerRunOptions contains the options while running a generic api server. +type ServerRunOptions struct { + AdvertiseAddress net.IP + + CorsAllowedOriginList []string + ExternalHost string + MaxRequestsInFlight int + MaxMutatingRequestsInFlight int + RequestTimeout time.Duration + GoawayChance float64 + LivezGracePeriod time.Duration + MinRequestTimeout int + ShutdownDelayDuration time.Duration + // We intentionally did not add a flag for this option. Users of the + // apiserver library can wire it to a flag. + JSONPatchMaxCopyBytes int64 + // The limit on the request body size that would be accepted and + // decoded in a write request. 0 means no limit. + // We intentionally did not add a flag for this option. Users of the + // apiserver library can wire it to a flag. + MaxRequestBodyBytes int64 + EnablePriorityAndFairness bool +} + +func NewServerRunOptions() *ServerRunOptions { + defaults := server.NewConfig(serializer.CodecFactory{}) + return &ServerRunOptions{ + MaxRequestsInFlight: defaults.MaxRequestsInFlight, + MaxMutatingRequestsInFlight: defaults.MaxMutatingRequestsInFlight, + RequestTimeout: defaults.RequestTimeout, + LivezGracePeriod: defaults.LivezGracePeriod, + MinRequestTimeout: defaults.MinRequestTimeout, + ShutdownDelayDuration: defaults.ShutdownDelayDuration, + JSONPatchMaxCopyBytes: defaults.JSONPatchMaxCopyBytes, + MaxRequestBodyBytes: defaults.MaxRequestBodyBytes, + EnablePriorityAndFairness: true, + } +} + +// ApplyTo applies the run options to the method receiver and returns self +func (s *ServerRunOptions) ApplyTo(c *server.Config) error { + c.CorsAllowedOriginList = s.CorsAllowedOriginList + c.ExternalAddress = s.ExternalHost + c.MaxRequestsInFlight = s.MaxRequestsInFlight + c.MaxMutatingRequestsInFlight = s.MaxMutatingRequestsInFlight + c.LivezGracePeriod = s.LivezGracePeriod + c.RequestTimeout = s.RequestTimeout + c.GoawayChance = s.GoawayChance + c.MinRequestTimeout = s.MinRequestTimeout + c.ShutdownDelayDuration = s.ShutdownDelayDuration + c.JSONPatchMaxCopyBytes = s.JSONPatchMaxCopyBytes + c.MaxRequestBodyBytes = s.MaxRequestBodyBytes + c.PublicAddress = s.AdvertiseAddress + + return nil +} + +// DefaultAdvertiseAddress sets the field AdvertiseAddress if unset. The field will be set based on the SecureServingOptions. +func (s *ServerRunOptions) DefaultAdvertiseAddress(secure *SecureServingOptions) error { + if secure == nil { + return nil + } + + if s.AdvertiseAddress == nil || s.AdvertiseAddress.IsUnspecified() { + hostIP, err := secure.DefaultExternalAddress() + if err != nil { + return fmt.Errorf("Unable to find suitable network address.error='%v'. "+ + "Try to set the AdvertiseAddress directly or provide a valid BindAddress to fix this.", err) + } + s.AdvertiseAddress = hostIP + } + + return nil +} + +// Validate checks validation of ServerRunOptions +func (s *ServerRunOptions) Validate() []error { + errors := []error{} + + if s.LivezGracePeriod < 0 { + errors = append(errors, fmt.Errorf("--livez-grace-period can not be a negative value")) + } + + if s.MaxRequestsInFlight < 0 { + errors = append(errors, fmt.Errorf("--max-requests-inflight can not be negative value")) + } + if s.MaxMutatingRequestsInFlight < 0 { + errors = append(errors, fmt.Errorf("--max-mutating-requests-inflight can not be negative value")) + } + + if s.RequestTimeout.Nanoseconds() < 0 { + errors = append(errors, fmt.Errorf("--request-timeout can not be negative value")) + } + + if s.GoawayChance < 0 || s.GoawayChance > 0.02 { + errors = append(errors, fmt.Errorf("--goaway-chance can not be less than 0 or greater than 0.02")) + } + + if s.MinRequestTimeout < 0 { + errors = append(errors, fmt.Errorf("--min-request-timeout can not be negative value")) + } + + if s.ShutdownDelayDuration < 0 { + errors = append(errors, fmt.Errorf("--shutdown-delay-duration can not be negative value")) + } + + if s.JSONPatchMaxCopyBytes < 0 { + errors = append(errors, fmt.Errorf("--json-patch-max-copy-bytes can not be negative value")) + } + + if s.MaxRequestBodyBytes < 0 { + errors = append(errors, fmt.Errorf("--max-resource-write-bytes can not be negative value")) + } + + return errors +} + +// AddUniversalFlags adds flags for a specific APIServer to the specified FlagSet +func (s *ServerRunOptions) AddUniversalFlags(fs *pflag.FlagSet) { + // Note: the weird ""+ in below lines seems to be the only way to get gofmt to + // arrange these text blocks sensibly. Grrr. + + fs.IPVar(&s.AdvertiseAddress, "advertise-address", s.AdvertiseAddress, ""+ + "The IP address on which to advertise the apiserver to members of the cluster. This "+ + "address must be reachable by the rest of the cluster. If blank, the --bind-address "+ + "will be used. If --bind-address is unspecified, the host's default interface will "+ + "be used.") + + fs.StringSliceVar(&s.CorsAllowedOriginList, "cors-allowed-origins", s.CorsAllowedOriginList, ""+ + "List of allowed origins for CORS, comma separated. An allowed origin can be a regular "+ + "expression to support subdomain matching. If this list is empty CORS will not be enabled.") + + deprecatedTargetRAMMB := 0 + fs.IntVar(&deprecatedTargetRAMMB, "target-ram-mb", deprecatedTargetRAMMB, + "DEPRECATED: Memory limit for apiserver in MB (used to configure sizes of caches, etc.)") + fs.MarkDeprecated("target-ram-mb", "This flag will be removed in v1.23") + + fs.StringVar(&s.ExternalHost, "external-hostname", s.ExternalHost, + "The hostname to use when generating externalized URLs for this master (e.g. Swagger API Docs or OpenID Discovery).") + + deprecatedMasterServiceNamespace := metav1.NamespaceDefault + fs.StringVar(&deprecatedMasterServiceNamespace, "master-service-namespace", deprecatedMasterServiceNamespace, ""+ + "DEPRECATED: the namespace from which the Kubernetes master services should be injected into pods.") + + fs.IntVar(&s.MaxRequestsInFlight, "max-requests-inflight", s.MaxRequestsInFlight, ""+ + "The maximum number of non-mutating requests in flight at a given time. When the server exceeds this, "+ + "it rejects requests. Zero for no limit.") + + fs.IntVar(&s.MaxMutatingRequestsInFlight, "max-mutating-requests-inflight", s.MaxMutatingRequestsInFlight, ""+ + "The maximum number of mutating requests in flight at a given time. When the server exceeds this, "+ + "it rejects requests. Zero for no limit.") + + fs.DurationVar(&s.RequestTimeout, "request-timeout", s.RequestTimeout, ""+ + "An optional field indicating the duration a handler must keep a request open before timing "+ + "it out. This is the default request timeout for requests but may be overridden by flags such as "+ + "--min-request-timeout for specific types of requests.") + + fs.Float64Var(&s.GoawayChance, "goaway-chance", s.GoawayChance, ""+ + "To prevent HTTP/2 clients from getting stuck on a single apiserver, randomly close a connection (GOAWAY). "+ + "The client's other in-flight requests won't be affected, and the client will reconnect, likely landing on a different apiserver after going through the load balancer again. "+ + "This argument sets the fraction of requests that will be sent a GOAWAY. Clusters with single apiservers, or which don't use a load balancer, should NOT enable this. "+ + "Min is 0 (off), Max is .02 (1/50 requests); .001 (1/1000) is a recommended starting point.") + + fs.DurationVar(&s.LivezGracePeriod, "livez-grace-period", s.LivezGracePeriod, ""+ + "This option represents the maximum amount of time it should take for apiserver to complete its startup sequence "+ + "and become live. From apiserver's start time to when this amount of time has elapsed, /livez will assume "+ + "that unfinished post-start hooks will complete successfully and therefore return true.") + + fs.IntVar(&s.MinRequestTimeout, "min-request-timeout", s.MinRequestTimeout, ""+ + "An optional field indicating the minimum number of seconds a handler must keep "+ + "a request open before timing it out. Currently only honored by the watch request "+ + "handler, which picks a randomized value above this number as the connection timeout, "+ + "to spread out load.") + + fs.BoolVar(&s.EnablePriorityAndFairness, "enable-priority-and-fairness", s.EnablePriorityAndFairness, ""+ + "If true and the APIPriorityAndFairness feature gate is enabled, replace the max-in-flight handler with an enhanced one that queues and dispatches with priority and fairness") + + fs.DurationVar(&s.ShutdownDelayDuration, "shutdown-delay-duration", s.ShutdownDelayDuration, ""+ + "Time to delay the termination. During that time the server keeps serving requests normally. The endpoints /healthz and /livez "+ + "will return success, but /readyz immediately returns failure. Graceful termination starts after this delay "+ + "has elapsed. This can be used to allow load balancer to stop sending traffic to this server.") + + utilfeature.DefaultMutableFeatureGate.AddFlag(fs) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/serving.go b/vendor/k8s.io/apiserver/pkg/server/options/serving.go new file mode 100644 index 000000000..0dcbbb738 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/serving.go @@ -0,0 +1,356 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "context" + "fmt" + "net" + "path" + "strconv" + "strings" + + "github.com/spf13/pflag" + "k8s.io/klog/v2" + + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/server/dynamiccertificates" + certutil "k8s.io/client-go/util/cert" + "k8s.io/client-go/util/keyutil" + cliflag "k8s.io/component-base/cli/flag" +) + +type SecureServingOptions struct { + BindAddress net.IP + // BindPort is ignored when Listener is set, will serve https even with 0. + BindPort int + // BindNetwork is the type of network to bind to - defaults to "tcp", accepts "tcp", + // "tcp4", and "tcp6". + BindNetwork string + // Required set to true means that BindPort cannot be zero. + Required bool + // ExternalAddress is the address advertised, even if BindAddress is a loopback. By default this + // is set to BindAddress if the later no loopback, or to the first host interface address. + ExternalAddress net.IP + + // Listener is the secure server network listener. + // either Listener or BindAddress/BindPort/BindNetwork is set, + // if Listener is set, use it and omit BindAddress/BindPort/BindNetwork. + Listener net.Listener + + // ServerCert is the TLS cert info for serving secure traffic + ServerCert GeneratableKeyCert + // SNICertKeys are named CertKeys for serving secure traffic with SNI support. + SNICertKeys []cliflag.NamedCertKey + // CipherSuites is the list of allowed cipher suites for the server. + // Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). + CipherSuites []string + // MinTLSVersion is the minimum TLS version supported. + // Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). + MinTLSVersion string + + // HTTP2MaxStreamsPerConnection is the limit that the api server imposes on each client. + // A value of zero means to use the default provided by golang's HTTP/2 support. + HTTP2MaxStreamsPerConnection int + + // PermitPortSharing controls if SO_REUSEPORT is used when binding the port, which allows + // more than one instance to bind on the same address and port. + PermitPortSharing bool +} + +type CertKey struct { + // CertFile is a file containing a PEM-encoded certificate, and possibly the complete certificate chain + CertFile string + // KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile + KeyFile string +} + +type GeneratableKeyCert struct { + // CertKey allows setting an explicit cert/key file to use. + CertKey CertKey + + // CertDirectory specifies a directory to write generated certificates to if CertFile/KeyFile aren't explicitly set. + // PairName is used to determine the filenames within CertDirectory. + // If CertDirectory and PairName are not set, an in-memory certificate will be generated. + CertDirectory string + // PairName is the name which will be used with CertDirectory to make a cert and key filenames. + // It becomes CertDirectory/PairName.crt and CertDirectory/PairName.key + PairName string + + // GeneratedCert holds an in-memory generated certificate if CertFile/KeyFile aren't explicitly set, and CertDirectory/PairName are not set. + GeneratedCert dynamiccertificates.CertKeyContentProvider + + // FixtureDirectory is a directory that contains test fixture used to avoid regeneration of certs during tests. + // The format is: + // _-_-.crt + // _-_-.key + FixtureDirectory string +} + +func NewSecureServingOptions() *SecureServingOptions { + return &SecureServingOptions{ + BindAddress: net.ParseIP("0.0.0.0"), + BindPort: 443, + ServerCert: GeneratableKeyCert{ + PairName: "apiserver", + CertDirectory: "apiserver.local.config/certificates", + }, + } +} + +func (s *SecureServingOptions) DefaultExternalAddress() (net.IP, error) { + if s.ExternalAddress != nil && !s.ExternalAddress.IsUnspecified() { + return s.ExternalAddress, nil + } + return utilnet.ResolveBindAddress(s.BindAddress) +} + +func (s *SecureServingOptions) Validate() []error { + if s == nil { + return nil + } + + errors := []error{} + + if s.Required && s.BindPort < 1 || s.BindPort > 65535 { + errors = append(errors, fmt.Errorf("--secure-port %v must be between 1 and 65535, inclusive. It cannot be turned off with 0", s.BindPort)) + } else if s.BindPort < 0 || s.BindPort > 65535 { + errors = append(errors, fmt.Errorf("--secure-port %v must be between 0 and 65535, inclusive. 0 for turning off secure port", s.BindPort)) + } + + if (len(s.ServerCert.CertKey.CertFile) != 0 || len(s.ServerCert.CertKey.KeyFile) != 0) && s.ServerCert.GeneratedCert != nil { + errors = append(errors, fmt.Errorf("cert/key file and in-memory certificate cannot both be set")) + } + + return errors +} + +func (s *SecureServingOptions) AddFlags(fs *pflag.FlagSet) { + if s == nil { + return + } + + fs.IPVar(&s.BindAddress, "bind-address", s.BindAddress, ""+ + "The IP address on which to listen for the --secure-port port. The "+ + "associated interface(s) must be reachable by the rest of the cluster, and by CLI/web "+ + "clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used.") + + desc := "The port on which to serve HTTPS with authentication and authorization." + if s.Required { + desc += " It cannot be switched off with 0." + } else { + desc += " If 0, don't serve HTTPS at all." + } + fs.IntVar(&s.BindPort, "secure-port", s.BindPort, desc) + + fs.StringVar(&s.ServerCert.CertDirectory, "cert-dir", s.ServerCert.CertDirectory, ""+ + "The directory where the TLS certs are located. "+ + "If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored.") + + fs.StringVar(&s.ServerCert.CertKey.CertFile, "tls-cert-file", s.ServerCert.CertKey.CertFile, ""+ + "File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated "+ + "after server cert). If HTTPS serving is enabled, and --tls-cert-file and "+ + "--tls-private-key-file are not provided, a self-signed certificate and key "+ + "are generated for the public address and saved to the directory specified by --cert-dir.") + + fs.StringVar(&s.ServerCert.CertKey.KeyFile, "tls-private-key-file", s.ServerCert.CertKey.KeyFile, + "File containing the default x509 private key matching --tls-cert-file.") + + tlsCipherPreferredValues := cliflag.PreferredTLSCipherNames() + tlsCipherInsecureValues := cliflag.InsecureTLSCipherNames() + fs.StringSliceVar(&s.CipherSuites, "tls-cipher-suites", s.CipherSuites, + "Comma-separated list of cipher suites for the server. "+ + "If omitted, the default Go cipher suites will be used. \n"+ + "Preferred values: "+strings.Join(tlsCipherPreferredValues, ", ")+". \n"+ + "Insecure values: "+strings.Join(tlsCipherInsecureValues, ", ")+".") + + tlsPossibleVersions := cliflag.TLSPossibleVersions() + fs.StringVar(&s.MinTLSVersion, "tls-min-version", s.MinTLSVersion, + "Minimum TLS version supported. "+ + "Possible values: "+strings.Join(tlsPossibleVersions, ", ")) + + fs.Var(cliflag.NewNamedCertKeyArray(&s.SNICertKeys), "tls-sni-cert-key", ""+ + "A pair of x509 certificate and private key file paths, optionally suffixed with a list of "+ + "domain patterns which are fully qualified domain names, possibly with prefixed wildcard "+ + "segments. The domain patterns also allow IP addresses, but IPs should only be used if "+ + "the apiserver has visibility to the IP address requested by a client. "+ + "If no domain patterns are provided, the names of the certificate are "+ + "extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns "+ + "trump over extracted names. For multiple key/certificate pairs, use the "+ + "--tls-sni-cert-key multiple times. "+ + "Examples: \"example.crt,example.key\" or \"foo.crt,foo.key:*.foo.com,foo.com\".") + + fs.IntVar(&s.HTTP2MaxStreamsPerConnection, "http2-max-streams-per-connection", s.HTTP2MaxStreamsPerConnection, ""+ + "The limit that the server gives to clients for "+ + "the maximum number of streams in an HTTP/2 connection. "+ + "Zero means to use golang's default.") + + fs.BoolVar(&s.PermitPortSharing, "permit-port-sharing", s.PermitPortSharing, + "If true, SO_REUSEPORT will be used when binding the port, which allows "+ + "more than one instance to bind on the same address and port. [default=false]") +} + +// ApplyTo fills up serving information in the server configuration. +func (s *SecureServingOptions) ApplyTo(config **server.SecureServingInfo) error { + if s == nil { + return nil + } + if s.BindPort <= 0 && s.Listener == nil { + return nil + } + + if s.Listener == nil { + var err error + addr := net.JoinHostPort(s.BindAddress.String(), strconv.Itoa(s.BindPort)) + + c := net.ListenConfig{} + + if s.PermitPortSharing { + c.Control = permitPortReuse + } + + s.Listener, s.BindPort, err = CreateListener(s.BindNetwork, addr, c) + if err != nil { + return fmt.Errorf("failed to create listener: %v", err) + } + } else { + if _, ok := s.Listener.Addr().(*net.TCPAddr); !ok { + return fmt.Errorf("failed to parse ip and port from listener") + } + s.BindPort = s.Listener.Addr().(*net.TCPAddr).Port + s.BindAddress = s.Listener.Addr().(*net.TCPAddr).IP + } + + *config = &server.SecureServingInfo{ + Listener: s.Listener, + HTTP2MaxStreamsPerConnection: s.HTTP2MaxStreamsPerConnection, + } + c := *config + + serverCertFile, serverKeyFile := s.ServerCert.CertKey.CertFile, s.ServerCert.CertKey.KeyFile + // load main cert + if len(serverCertFile) != 0 || len(serverKeyFile) != 0 { + var err error + c.Cert, err = dynamiccertificates.NewDynamicServingContentFromFiles("serving-cert", serverCertFile, serverKeyFile) + if err != nil { + return err + } + } else if s.ServerCert.GeneratedCert != nil { + c.Cert = s.ServerCert.GeneratedCert + } + + if len(s.CipherSuites) != 0 { + cipherSuites, err := cliflag.TLSCipherSuites(s.CipherSuites) + if err != nil { + return err + } + c.CipherSuites = cipherSuites + } + + var err error + c.MinTLSVersion, err = cliflag.TLSVersion(s.MinTLSVersion) + if err != nil { + return err + } + + // load SNI certs + namedTLSCerts := make([]dynamiccertificates.SNICertKeyContentProvider, 0, len(s.SNICertKeys)) + for _, nck := range s.SNICertKeys { + tlsCert, err := dynamiccertificates.NewDynamicSNIContentFromFiles("sni-serving-cert", nck.CertFile, nck.KeyFile, nck.Names...) + namedTLSCerts = append(namedTLSCerts, tlsCert) + if err != nil { + return fmt.Errorf("failed to load SNI cert and key: %v", err) + } + } + c.SNICerts = namedTLSCerts + + return nil +} + +func (s *SecureServingOptions) MaybeDefaultWithSelfSignedCerts(publicAddress string, alternateDNS []string, alternateIPs []net.IP) error { + if s == nil || (s.BindPort == 0 && s.Listener == nil) { + return nil + } + keyCert := &s.ServerCert.CertKey + if len(keyCert.CertFile) != 0 || len(keyCert.KeyFile) != 0 { + return nil + } + + canReadCertAndKey := false + if len(s.ServerCert.CertDirectory) > 0 { + if len(s.ServerCert.PairName) == 0 { + return fmt.Errorf("PairName is required if CertDirectory is set") + } + keyCert.CertFile = path.Join(s.ServerCert.CertDirectory, s.ServerCert.PairName+".crt") + keyCert.KeyFile = path.Join(s.ServerCert.CertDirectory, s.ServerCert.PairName+".key") + if canRead, err := certutil.CanReadCertAndKey(keyCert.CertFile, keyCert.KeyFile); err != nil { + return err + } else { + canReadCertAndKey = canRead + } + } + + if !canReadCertAndKey { + // add either the bind address or localhost to the valid alternates + if s.BindAddress.IsUnspecified() { + alternateDNS = append(alternateDNS, "localhost") + } else { + alternateIPs = append(alternateIPs, s.BindAddress) + } + + if cert, key, err := certutil.GenerateSelfSignedCertKeyWithFixtures(publicAddress, alternateIPs, alternateDNS, s.ServerCert.FixtureDirectory); err != nil { + return fmt.Errorf("unable to generate self signed cert: %v", err) + } else if len(keyCert.CertFile) > 0 && len(keyCert.KeyFile) > 0 { + if err := certutil.WriteCert(keyCert.CertFile, cert); err != nil { + return err + } + if err := keyutil.WriteKey(keyCert.KeyFile, key); err != nil { + return err + } + klog.Infof("Generated self-signed cert (%s, %s)", keyCert.CertFile, keyCert.KeyFile) + } else { + s.ServerCert.GeneratedCert, err = dynamiccertificates.NewStaticCertKeyContent("Generated self signed cert", cert, key) + if err != nil { + return err + } + klog.Infof("Generated self-signed cert in-memory") + } + } + + return nil +} + +func CreateListener(network, addr string, config net.ListenConfig) (net.Listener, int, error) { + if len(network) == 0 { + network = "tcp" + } + + ln, err := config.Listen(context.TODO(), network, addr) + if err != nil { + return nil, 0, fmt.Errorf("failed to listen on %v: %v", addr, err) + } + + // get port + tcpAddr, ok := ln.Addr().(*net.TCPAddr) + if !ok { + ln.Close() + return nil, 0, fmt.Errorf("invalid listen address: %q", ln.Addr().String()) + } + + return ln, tcpAddr.Port, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/serving_unix.go b/vendor/k8s.io/apiserver/pkg/server/options/serving_unix.go new file mode 100644 index 000000000..221a5474b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/serving_unix.go @@ -0,0 +1,31 @@ +// +build !windows + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +func permitPortReuse(network, addr string, conn syscall.RawConn) error { + return conn.Control(func(fd uintptr) { + syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEPORT, 1) + }) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/serving_windows.go b/vendor/k8s.io/apiserver/pkg/server/options/serving_windows.go new file mode 100644 index 000000000..194189023 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/serving_windows.go @@ -0,0 +1,30 @@ +// +build windows + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "syscall" +) + +// Windows only supports SO_REUSEADDR, which may cause undefined behavior, as +// there is no protection against port hijacking. +func permitPortReuse(network, address string, c syscall.RawConn) error { + return fmt.Errorf("port reuse is not supported on Windows") +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/serving_with_loopback.go b/vendor/k8s.io/apiserver/pkg/server/options/serving_with_loopback.go new file mode 100644 index 000000000..9f9a42f81 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/serving_with_loopback.go @@ -0,0 +1,79 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + + "github.com/google/uuid" + + "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/server/dynamiccertificates" + "k8s.io/client-go/rest" + certutil "k8s.io/client-go/util/cert" +) + +type SecureServingOptionsWithLoopback struct { + *SecureServingOptions +} + +func (o *SecureServingOptions) WithLoopback() *SecureServingOptionsWithLoopback { + return &SecureServingOptionsWithLoopback{o} +} + +// ApplyTo fills up serving information in the server configuration. +func (s *SecureServingOptionsWithLoopback) ApplyTo(secureServingInfo **server.SecureServingInfo, loopbackClientConfig **rest.Config) error { + if s == nil || s.SecureServingOptions == nil || secureServingInfo == nil { + return nil + } + + if err := s.SecureServingOptions.ApplyTo(secureServingInfo); err != nil { + return err + } + + if *secureServingInfo == nil || loopbackClientConfig == nil { + return nil + } + + // create self-signed cert+key with the fake server.LoopbackClientServerNameOverride and + // let the server return it when the loopback client connects. + certPem, keyPem, err := certutil.GenerateSelfSignedCertKey(server.LoopbackClientServerNameOverride, nil, nil) + if err != nil { + return fmt.Errorf("failed to generate self-signed certificate for loopback connection: %v", err) + } + certProvider, err := dynamiccertificates.NewStaticSNICertKeyContent("self-signed loopback", certPem, keyPem, server.LoopbackClientServerNameOverride) + if err != nil { + return fmt.Errorf("failed to generate self-signed certificate for loopback connection: %v", err) + } + + secureLoopbackClientConfig, err := (*secureServingInfo).NewLoopbackClientConfig(uuid.New().String(), certPem) + switch { + // if we failed and there's no fallback loopback client config, we need to fail + case err != nil && *loopbackClientConfig == nil: + return err + + // if we failed, but we already have a fallback loopback client config (usually insecure), allow it + case err != nil && *loopbackClientConfig != nil: + + default: + *loopbackClientConfig = secureLoopbackClientConfig + // Write to the front of SNICerts so that this overrides any other certs with the same name + (*secureServingInfo).SNICerts = append([]dynamiccertificates.SNICertKeyContentProvider{certProvider}, (*secureServingInfo).SNICerts...) + } + + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/plugins.go b/vendor/k8s.io/apiserver/pkg/server/plugins.go new file mode 100644 index 000000000..7a9094337 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/plugins.go @@ -0,0 +1,32 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +// This file exists to force the desired plugin implementations to be linked into genericapi pkg. +import ( + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle" + mutatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/mutating" + validatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/validating" +) + +// RegisterAllAdmissionPlugins registers all admission plugins +func RegisterAllAdmissionPlugins(plugins *admission.Plugins) { + lifecycle.Register(plugins) + validatingwebhook.Register(plugins) + mutatingwebhook.Register(plugins) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/resourceconfig/doc.go b/vendor/k8s.io/apiserver/pkg/server/resourceconfig/doc.go new file mode 100644 index 000000000..0dae21535 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/resourceconfig/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package resourceconfig contains the resource config related helper functions. +package resourceconfig // import "k8s.io/apiserver/pkg/server/resourceconfig" diff --git a/vendor/k8s.io/apiserver/pkg/server/resourceconfig/helpers.go b/vendor/k8s.io/apiserver/pkg/server/resourceconfig/helpers.go new file mode 100644 index 000000000..bfcce54b8 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/resourceconfig/helpers.go @@ -0,0 +1,201 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resourceconfig + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + serverstore "k8s.io/apiserver/pkg/server/storage" + cliflag "k8s.io/component-base/cli/flag" + "k8s.io/klog/v2" +) + +// GroupVersionRegistry provides access to registered group versions. +type GroupVersionRegistry interface { + // IsGroupRegistered returns true if given group is registered. + IsGroupRegistered(group string) bool + // IsVersionRegistered returns true if given version is registered. + IsVersionRegistered(v schema.GroupVersion) bool + // PrioritizedVersionsAllGroups returns all registered group versions. + PrioritizedVersionsAllGroups() []schema.GroupVersion +} + +// MergeResourceEncodingConfigs merges the given defaultResourceConfig with specific GroupVersionResource overrides. +func MergeResourceEncodingConfigs( + defaultResourceEncoding *serverstore.DefaultResourceEncodingConfig, + resourceEncodingOverrides []schema.GroupVersionResource, +) *serverstore.DefaultResourceEncodingConfig { + resourceEncodingConfig := defaultResourceEncoding + for _, gvr := range resourceEncodingOverrides { + resourceEncodingConfig.SetResourceEncoding(gvr.GroupResource(), gvr.GroupVersion(), + schema.GroupVersion{Group: gvr.Group, Version: runtime.APIVersionInternal}) + } + return resourceEncodingConfig +} + +// Recognized values for the --runtime-config parameter to enable/disable groups of APIs +const ( + APIAll = "api/all" + APIGA = "api/ga" + APIBeta = "api/beta" + APIAlpha = "api/alpha" +) + +var ( + gaPattern = regexp.MustCompile(`^v\d+$`) + betaPattern = regexp.MustCompile(`^v\d+beta\d+$`) + alphaPattern = regexp.MustCompile(`^v\d+alpha\d+$`) + + matchers = map[string]func(gv schema.GroupVersion) bool{ + // allows users to address all api versions + APIAll: func(gv schema.GroupVersion) bool { return true }, + // allows users to address all api versions in the form v[0-9]+ + APIGA: func(gv schema.GroupVersion) bool { return gaPattern.MatchString(gv.Version) }, + // allows users to address all beta api versions + APIBeta: func(gv schema.GroupVersion) bool { return betaPattern.MatchString(gv.Version) }, + // allows users to address all alpha api versions + APIAlpha: func(gv schema.GroupVersion) bool { return alphaPattern.MatchString(gv.Version) }, + } + + matcherOrder = []string{APIAll, APIGA, APIBeta, APIAlpha} +) + +// MergeAPIResourceConfigs merges the given defaultAPIResourceConfig with the given resourceConfigOverrides. +// Exclude the groups not registered in registry, and check if version is +// not registered in group, then it will fail. +func MergeAPIResourceConfigs( + defaultAPIResourceConfig *serverstore.ResourceConfig, + resourceConfigOverrides cliflag.ConfigurationMap, + registry GroupVersionRegistry, +) (*serverstore.ResourceConfig, error) { + resourceConfig := defaultAPIResourceConfig + overrides := resourceConfigOverrides + + for _, flag := range matcherOrder { + if value, ok := overrides[flag]; ok { + if value == "false" { + resourceConfig.DisableMatchingVersions(matchers[flag]) + } else if value == "true" { + resourceConfig.EnableMatchingVersions(matchers[flag]) + } else { + return nil, fmt.Errorf("invalid value %v=%v", flag, value) + } + } + } + + // "={true|false} allows users to enable/disable API. + // This takes preference over api/all, if specified. + // Iterate through all group/version overrides specified in runtimeConfig. + for key := range overrides { + // Have already handled them above. Can skip them here. + if _, ok := matchers[key]; ok { + continue + } + + tokens := strings.Split(key, "/") + if len(tokens) < 2 { + continue + } + groupVersionString := tokens[0] + "/" + tokens[1] + groupVersion, err := schema.ParseGroupVersion(groupVersionString) + if err != nil { + return nil, fmt.Errorf("invalid key %s", key) + } + + // individual resource enablement/disablement is only supported in the extensions/v1beta1 API group for legacy reasons. + // all other API groups are expected to contain coherent sets of resources that are enabled/disabled together. + if len(tokens) > 2 && (groupVersion != schema.GroupVersion{Group: "extensions", Version: "v1beta1"}) { + klog.Warningf("ignoring invalid key %s, individual resource enablement/disablement is not supported in %s, and will prevent starting in future releases", key, groupVersion.String()) + continue + } + + // Exclude group not registered into the registry. + if !registry.IsGroupRegistered(groupVersion.Group) { + continue + } + + // Verify that the groupVersion is registered into registry. + if !registry.IsVersionRegistered(groupVersion) { + return nil, fmt.Errorf("group version %s that has not been registered", groupVersion.String()) + } + enabled, err := getRuntimeConfigValue(overrides, key, false) + if err != nil { + return nil, err + } + if enabled { + // enable the groupVersion for "group/version=true" and "group/version/resource=true" + resourceConfig.EnableVersions(groupVersion) + } else if len(tokens) == 2 { + // disable the groupVersion only for "group/version=false", not "group/version/resource=false" + resourceConfig.DisableVersions(groupVersion) + } + + if len(tokens) < 3 { + continue + } + groupVersionResource := groupVersion.WithResource(tokens[2]) + if enabled { + resourceConfig.EnableResources(groupVersionResource) + } else { + resourceConfig.DisableResources(groupVersionResource) + } + } + + return resourceConfig, nil +} + +func getRuntimeConfigValue(overrides cliflag.ConfigurationMap, apiKey string, defaultValue bool) (bool, error) { + flagValue, ok := overrides[apiKey] + if ok { + if flagValue == "" { + return true, nil + } + boolValue, err := strconv.ParseBool(flagValue) + if err != nil { + return false, fmt.Errorf("invalid value of %s: %s, err: %v", apiKey, flagValue, err) + } + return boolValue, nil + } + return defaultValue, nil +} + +// ParseGroups takes in resourceConfig and returns parsed groups. +func ParseGroups(resourceConfig cliflag.ConfigurationMap) ([]string, error) { + groups := []string{} + for key := range resourceConfig { + if _, ok := matchers[key]; ok { + continue + } + tokens := strings.Split(key, "/") + if len(tokens) != 2 && len(tokens) != 3 { + return groups, fmt.Errorf("runtime-config invalid key %s", key) + } + groupVersionString := tokens[0] + "/" + tokens[1] + groupVersion, err := schema.ParseGroupVersion(groupVersionString) + if err != nil { + return nil, fmt.Errorf("runtime-config invalid key %s", key) + } + groups = append(groups, groupVersion.Group) + } + + return groups, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/OWNERS b/vendor/k8s.io/apiserver/pkg/server/routes/OWNERS new file mode 100644 index 000000000..4da107c8c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/routes/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- sttts diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/doc.go b/vendor/k8s.io/apiserver/pkg/server/routes/doc.go new file mode 100644 index 000000000..603e899cd --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/routes/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package routes holds a collection of optional genericapiserver http handlers. +package routes // import "k8s.io/apiserver/pkg/server/routes" diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/flags.go b/vendor/k8s.io/apiserver/pkg/server/routes/flags.go new file mode 100644 index 000000000..55835a6e8 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/routes/flags.go @@ -0,0 +1,127 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package routes + +import ( + "fmt" + "html/template" + "io/ioutil" + "net/http" + "path" + "sync" + + "k8s.io/klog/v2" + + "k8s.io/apiserver/pkg/server/mux" +) + +var ( + lock = &sync.RWMutex{} + registeredFlags = map[string]debugFlag{} +) + +// DebugFlags adds handlers for flags under /debug/flags. +type DebugFlags struct { +} + +// Install registers the APIServer's flags handler. +func (f DebugFlags) Install(c *mux.PathRecorderMux, flag string, handler func(http.ResponseWriter, *http.Request)) { + c.UnlistedHandle("/debug/flags", http.HandlerFunc(f.Index)) + c.UnlistedHandlePrefix("/debug/flags/", http.HandlerFunc(f.Index)) + + url := path.Join("/debug/flags", flag) + c.UnlistedHandleFunc(url, handler) + + f.addFlag(flag) +} + +// Index responds with the `/debug/flags` request. +// For example, "/debug/flags/v" serves the "--v" flag. +// Index responds to a request for "/debug/flags/" with an HTML page +// listing the available flags. +func (f DebugFlags) Index(w http.ResponseWriter, r *http.Request) { + lock.RLock() + defer lock.RUnlock() + if err := indexTmpl.Execute(w, registeredFlags); err != nil { + klog.Error(err) + } +} + +var indexTmpl = template.Must(template.New("index").Parse(` + +/debug/flags/ + + +/debug/flags/
+
+flags:
+ +{{range .}} +{{.Flag}}
+{{end}} +
+
+full flags configurable
+ + +`)) + +type debugFlag struct { + Flag string +} + +func (f DebugFlags) addFlag(flag string) { + lock.Lock() + defer lock.Unlock() + registeredFlags[flag] = debugFlag{flag} +} + +// StringFlagSetterFunc is a func used for setting string type flag. +type StringFlagSetterFunc func(string) (string, error) + +// StringFlagPutHandler wraps an http Handler to set string type flag. +func StringFlagPutHandler(setter StringFlagSetterFunc) http.HandlerFunc { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + switch { + case req.Method == "PUT": + body, err := ioutil.ReadAll(req.Body) + if err != nil { + writePlainText(http.StatusBadRequest, "error reading request body: "+err.Error(), w) + return + } + defer req.Body.Close() + response, err := setter(string(body)) + if err != nil { + writePlainText(http.StatusBadRequest, err.Error(), w) + return + } + writePlainText(http.StatusOK, response, w) + return + default: + writePlainText(http.StatusNotAcceptable, "unsupported http method", w) + return + } + }) +} + +// writePlainText renders a simple string response. +func writePlainText(statusCode int, text string, w http.ResponseWriter) { + w.Header().Set("Content-Type", "text/plain") + w.Header().Set("X-Content-Type-Options", "nosniff") + w.WriteHeader(statusCode) + fmt.Fprintln(w, text) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/index.go b/vendor/k8s.io/apiserver/pkg/server/routes/index.go new file mode 100644 index 000000000..140757988 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/routes/index.go @@ -0,0 +1,69 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package routes + +import ( + "net/http" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + "k8s.io/apiserver/pkg/server/mux" +) + +// ListedPathProvider is an interface for providing paths that should be reported at /. +type ListedPathProvider interface { + // ListedPaths is an alphabetically sorted list of paths to be reported at /. + ListedPaths() []string +} + +// ListedPathProviders is a convenient way to combine multiple ListedPathProviders +type ListedPathProviders []ListedPathProvider + +// ListedPaths unions and sorts the included paths. +func (p ListedPathProviders) ListedPaths() []string { + ret := sets.String{} + for _, provider := range p { + for _, path := range provider.ListedPaths() { + ret.Insert(path) + } + } + + return ret.List() +} + +// Index provides a webservice for the http root / listing all known paths. +type Index struct{} + +// Install adds the Index webservice to the given mux. +func (i Index) Install(pathProvider ListedPathProvider, mux *mux.PathRecorderMux) { + handler := IndexLister{StatusCode: http.StatusOK, PathProvider: pathProvider} + + mux.UnlistedHandle("/", handler) + mux.UnlistedHandle("/index.html", handler) +} + +// IndexLister lists the available indexes with the status code provided +type IndexLister struct { + StatusCode int + PathProvider ListedPathProvider +} + +// ServeHTTP serves the available paths. +func (i IndexLister) ServeHTTP(w http.ResponseWriter, r *http.Request) { + responsewriters.WriteRawJSON(i.StatusCode, metav1.RootPaths{Paths: i.PathProvider.ListedPaths()}, w) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/metrics.go b/vendor/k8s.io/apiserver/pkg/server/routes/metrics.go new file mode 100644 index 000000000..1121e95c3 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/routes/metrics.go @@ -0,0 +1,51 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package routes + +import ( + apimetrics "k8s.io/apiserver/pkg/endpoints/metrics" + "k8s.io/apiserver/pkg/server/mux" + etcd3metrics "k8s.io/apiserver/pkg/storage/etcd3/metrics" + flowcontrolmetrics "k8s.io/apiserver/pkg/util/flowcontrol/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +// DefaultMetrics installs the default prometheus metrics handler +type DefaultMetrics struct{} + +// Install adds the DefaultMetrics handler +func (m DefaultMetrics) Install(c *mux.PathRecorderMux) { + register() + c.Handle("/metrics", legacyregistry.Handler()) +} + +// MetricsWithReset install the prometheus metrics handler extended with support for the DELETE method +// which resets the metrics. +type MetricsWithReset struct{} + +// Install adds the MetricsWithReset handler +func (m MetricsWithReset) Install(c *mux.PathRecorderMux) { + register() + c.Handle("/metrics", legacyregistry.HandlerWithReset()) +} + +// register apiserver and etcd metrics +func register() { + apimetrics.Register() + etcd3metrics.Register() + flowcontrolmetrics.Register() +} diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go b/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go new file mode 100644 index 000000000..c9fb43b95 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package routes + +import ( + restful "github.com/emicklei/go-restful" + "github.com/go-openapi/spec" + "k8s.io/klog/v2" + + "k8s.io/apiserver/pkg/server/mux" + "k8s.io/kube-openapi/pkg/builder" + "k8s.io/kube-openapi/pkg/common" + "k8s.io/kube-openapi/pkg/handler" +) + +// OpenAPI installs spec endpoints for each web service. +type OpenAPI struct { + Config *common.Config +} + +// Install adds the SwaggerUI webservice to the given mux. +func (oa OpenAPI) Install(c *restful.Container, mux *mux.PathRecorderMux) (*handler.OpenAPIService, *spec.Swagger) { + spec, err := builder.BuildOpenAPISpec(c.RegisteredWebServices(), oa.Config) + if err != nil { + klog.Fatalf("Failed to build open api spec for root: %v", err) + } + spec.Definitions = handler.PruneDefaults(spec.Definitions) + openAPIVersionedService, err := handler.NewOpenAPIService(spec) + if err != nil { + klog.Fatalf("Failed to create OpenAPIService: %v", err) + } + + err = openAPIVersionedService.RegisterOpenAPIVersionedService("/openapi/v2", mux) + if err != nil { + klog.Fatalf("Failed to register versioned open api spec for root: %v", err) + } + + return openAPIVersionedService, spec +} diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/profiling.go b/vendor/k8s.io/apiserver/pkg/server/routes/profiling.go new file mode 100644 index 000000000..b57d590f5 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/routes/profiling.go @@ -0,0 +1,43 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package routes + +import ( + "net/http" + "net/http/pprof" + + "k8s.io/apiserver/pkg/server/mux" +) + +// Profiling adds handlers for pprof under /debug/pprof. +type Profiling struct{} + +// Install adds the Profiling webservice to the given mux. +func (d Profiling) Install(c *mux.PathRecorderMux) { + c.UnlistedHandleFunc("/debug/pprof", redirectTo("/debug/pprof/")) + c.UnlistedHandlePrefix("/debug/pprof/", http.HandlerFunc(pprof.Index)) + c.UnlistedHandleFunc("/debug/pprof/profile", pprof.Profile) + c.UnlistedHandleFunc("/debug/pprof/symbol", pprof.Symbol) + c.UnlistedHandleFunc("/debug/pprof/trace", pprof.Trace) +} + +// redirectTo redirects request to a certain destination. +func redirectTo(to string) func(http.ResponseWriter, *http.Request) { + return func(rw http.ResponseWriter, req *http.Request) { + http.Redirect(rw, req, to, http.StatusFound) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/version.go b/vendor/k8s.io/apiserver/pkg/server/routes/version.go new file mode 100644 index 000000000..dd9ab0dcf --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/routes/version.go @@ -0,0 +1,57 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package routes + +import ( + "net/http" + + "github.com/emicklei/go-restful" + + "k8s.io/apimachinery/pkg/version" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" +) + +// Version provides a webservice with version information. +type Version struct { + Version *version.Info +} + +// Install registers the APIServer's `/version` handler. +func (v Version) Install(c *restful.Container) { + if v.Version == nil { + return + } + + // Set up a service to return the git code version. + versionWS := new(restful.WebService) + versionWS.Path("/version") + versionWS.Doc("git code version from which this is built") + versionWS.Route( + versionWS.GET("/").To(v.handleVersion). + Doc("get the code version"). + Operation("getCodeVersion"). + Produces(restful.MIME_JSON). + Consumes(restful.MIME_JSON). + Writes(version.Info{})) + + c.Add(versionWS) +} + +// handleVersion writes the server's version information. +func (v Version) handleVersion(req *restful.Request, resp *restful.Response) { + responsewriters.WriteRawJSON(http.StatusOK, *v.Version, resp.ResponseWriter) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/secure_serving.go b/vendor/k8s.io/apiserver/pkg/server/secure_serving.go new file mode 100644 index 000000000..38341eb03 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/secure_serving.go @@ -0,0 +1,289 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "context" + "crypto/tls" + "fmt" + "io" + "log" + "net" + "net/http" + "os" + "strings" + "time" + + "golang.org/x/net/http2" + "k8s.io/component-base/cli/flag" + "k8s.io/klog/v2" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/endpoints/metrics" + "k8s.io/apiserver/pkg/server/dynamiccertificates" +) + +const ( + defaultKeepAlivePeriod = 3 * time.Minute +) + +// tlsConfig produces the tls.Config to serve with. +func (s *SecureServingInfo) tlsConfig(stopCh <-chan struct{}) (*tls.Config, error) { + tlsConfig := &tls.Config{ + // Can't use SSLv3 because of POODLE and BEAST + // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher + // Can't use TLSv1.1 because of RC4 cipher usage + MinVersion: tls.VersionTLS12, + // enable HTTP2 for go's 1.7 HTTP Server + NextProtos: []string{"h2", "http/1.1"}, + } + + // these are static aspects of the tls.Config + if s.DisableHTTP2 { + klog.Info("Forcing use of http/1.1 only") + tlsConfig.NextProtos = []string{"http/1.1"} + } + if s.MinTLSVersion > 0 { + tlsConfig.MinVersion = s.MinTLSVersion + } + if len(s.CipherSuites) > 0 { + tlsConfig.CipherSuites = s.CipherSuites + insecureCiphers := flag.InsecureTLSCiphers() + for i := 0; i < len(s.CipherSuites); i++ { + for cipherName, cipherID := range insecureCiphers { + if s.CipherSuites[i] == cipherID { + klog.Warningf("Use of insecure cipher '%s' detected.", cipherName) + } + } + } + } + + if s.ClientCA != nil { + // Populate PeerCertificates in requests, but don't reject connections without certificates + // This allows certificates to be validated by authenticators, while still allowing other auth types + tlsConfig.ClientAuth = tls.RequestClientCert + } + + if s.ClientCA != nil || s.Cert != nil || len(s.SNICerts) > 0 { + dynamicCertificateController := dynamiccertificates.NewDynamicServingCertificateController( + tlsConfig, + s.ClientCA, + s.Cert, + s.SNICerts, + nil, // TODO see how to plumb an event recorder down in here. For now this results in simply klog messages. + ) + // register if possible + if notifier, ok := s.ClientCA.(dynamiccertificates.Notifier); ok { + notifier.AddListener(dynamicCertificateController) + } + if notifier, ok := s.Cert.(dynamiccertificates.Notifier); ok { + notifier.AddListener(dynamicCertificateController) + } + // start controllers if possible + if controller, ok := s.ClientCA.(dynamiccertificates.ControllerRunner); ok { + // runonce to try to prime data. If this fails, it's ok because we fail closed. + // Files are required to be populated already, so this is for convenience. + if err := controller.RunOnce(); err != nil { + klog.Warningf("Initial population of client CA failed: %v", err) + } + + go controller.Run(1, stopCh) + } + if controller, ok := s.Cert.(dynamiccertificates.ControllerRunner); ok { + // runonce to try to prime data. If this fails, it's ok because we fail closed. + // Files are required to be populated already, so this is for convenience. + if err := controller.RunOnce(); err != nil { + klog.Warningf("Initial population of default serving certificate failed: %v", err) + } + + go controller.Run(1, stopCh) + } + for _, sniCert := range s.SNICerts { + if notifier, ok := sniCert.(dynamiccertificates.Notifier); ok { + notifier.AddListener(dynamicCertificateController) + } + + if controller, ok := sniCert.(dynamiccertificates.ControllerRunner); ok { + // runonce to try to prime data. If this fails, it's ok because we fail closed. + // Files are required to be populated already, so this is for convenience. + if err := controller.RunOnce(); err != nil { + klog.Warningf("Initial population of SNI serving certificate failed: %v", err) + } + + go controller.Run(1, stopCh) + } + } + + // runonce to try to prime data. If this fails, it's ok because we fail closed. + // Files are required to be populated already, so this is for convenience. + if err := dynamicCertificateController.RunOnce(); err != nil { + klog.Warningf("Initial population of dynamic certificates failed: %v", err) + } + go dynamicCertificateController.Run(1, stopCh) + + tlsConfig.GetConfigForClient = dynamicCertificateController.GetConfigForClient + } + + return tlsConfig, nil +} + +// Serve runs the secure http server. It fails only if certificates cannot be loaded or the initial listen call fails. +// The actual server loop (stoppable by closing stopCh) runs in a go routine, i.e. Serve does not block. +// It returns a stoppedCh that is closed when all non-hijacked active requests have been processed. +func (s *SecureServingInfo) Serve(handler http.Handler, shutdownTimeout time.Duration, stopCh <-chan struct{}) (<-chan struct{}, error) { + if s.Listener == nil { + return nil, fmt.Errorf("listener must not be nil") + } + + tlsConfig, err := s.tlsConfig(stopCh) + if err != nil { + return nil, err + } + + secureServer := &http.Server{ + Addr: s.Listener.Addr().String(), + Handler: handler, + MaxHeaderBytes: 1 << 20, + TLSConfig: tlsConfig, + } + + // At least 99% of serialized resources in surveyed clusters were smaller than 256kb. + // This should be big enough to accommodate most API POST requests in a single frame, + // and small enough to allow a per connection buffer of this size multiplied by `MaxConcurrentStreams`. + const resourceBody99Percentile = 256 * 1024 + + http2Options := &http2.Server{} + + // shrink the per-stream buffer and max framesize from the 1MB default while still accommodating most API POST requests in a single frame + http2Options.MaxUploadBufferPerStream = resourceBody99Percentile + http2Options.MaxReadFrameSize = resourceBody99Percentile + + // use the overridden concurrent streams setting or make the default of 250 explicit so we can size MaxUploadBufferPerConnection appropriately + if s.HTTP2MaxStreamsPerConnection > 0 { + http2Options.MaxConcurrentStreams = uint32(s.HTTP2MaxStreamsPerConnection) + } else { + http2Options.MaxConcurrentStreams = 250 + } + + // increase the connection buffer size from the 1MB default to handle the specified number of concurrent streams + http2Options.MaxUploadBufferPerConnection = http2Options.MaxUploadBufferPerStream * int32(http2Options.MaxConcurrentStreams) + + if !s.DisableHTTP2 { + // apply settings to the server + if err := http2.ConfigureServer(secureServer, http2Options); err != nil { + return nil, fmt.Errorf("error configuring http2: %v", err) + } + } + + // use tlsHandshakeErrorWriter to handle messages of tls handshake error + tlsErrorWriter := &tlsHandshakeErrorWriter{os.Stderr} + tlsErrorLogger := log.New(tlsErrorWriter, "", 0) + secureServer.ErrorLog = tlsErrorLogger + + klog.Infof("Serving securely on %s", secureServer.Addr) + return RunServer(secureServer, s.Listener, shutdownTimeout, stopCh) +} + +// RunServer spawns a go-routine continuously serving until the stopCh is +// closed. +// It returns a stoppedCh that is closed when all non-hijacked active requests +// have been processed. +// This function does not block +// TODO: make private when insecure serving is gone from the kube-apiserver +func RunServer( + server *http.Server, + ln net.Listener, + shutDownTimeout time.Duration, + stopCh <-chan struct{}, +) (<-chan struct{}, error) { + if ln == nil { + return nil, fmt.Errorf("listener must not be nil") + } + + // Shutdown server gracefully. + stoppedCh := make(chan struct{}) + go func() { + defer close(stoppedCh) + <-stopCh + ctx, cancel := context.WithTimeout(context.Background(), shutDownTimeout) + server.Shutdown(ctx) + cancel() + }() + + go func() { + defer utilruntime.HandleCrash() + + var listener net.Listener + listener = tcpKeepAliveListener{ln} + if server.TLSConfig != nil { + listener = tls.NewListener(listener, server.TLSConfig) + } + + err := server.Serve(listener) + + msg := fmt.Sprintf("Stopped listening on %s", ln.Addr().String()) + select { + case <-stopCh: + klog.Info(msg) + default: + panic(fmt.Sprintf("%s due to error: %v", msg, err)) + } + }() + + return stoppedCh, nil +} + +// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted +// connections. It's used by ListenAndServe and ListenAndServeTLS so +// dead TCP connections (e.g. closing laptop mid-download) eventually +// go away. +// +// Copied from Go 1.7.2 net/http/server.go +type tcpKeepAliveListener struct { + net.Listener +} + +func (ln tcpKeepAliveListener) Accept() (net.Conn, error) { + c, err := ln.Listener.Accept() + if err != nil { + return nil, err + } + if tc, ok := c.(*net.TCPConn); ok { + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(defaultKeepAlivePeriod) + } + return c, nil +} + +// tlsHandshakeErrorWriter writes TLS handshake errors to klog with +// trace level - V(5), to avoid flooding of tls handshake errors. +type tlsHandshakeErrorWriter struct { + out io.Writer +} + +const tlsHandshakeErrorPrefix = "http: TLS handshake error" + +func (w *tlsHandshakeErrorWriter) Write(p []byte) (int, error) { + if strings.Contains(string(p), tlsHandshakeErrorPrefix) { + klog.V(5).Info(string(p)) + metrics.TLSHandshakeErrors.Inc() + return len(p), nil + } + + // for non tls handshake error, log it as usual + return w.out.Write(p) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/signal.go b/vendor/k8s.io/apiserver/pkg/server/signal.go new file mode 100644 index 000000000..e5334ae4c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/signal.go @@ -0,0 +1,69 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "context" + "os" + "os/signal" +) + +var onlyOneSignalHandler = make(chan struct{}) +var shutdownHandler chan os.Signal + +// SetupSignalHandler registered for SIGTERM and SIGINT. A stop channel is returned +// which is closed on one of these signals. If a second signal is caught, the program +// is terminated with exit code 1. +// Only one of SetupSignalContext and SetupSignalHandler should be called, and only can +// be called once. +func SetupSignalHandler() <-chan struct{} { + return SetupSignalContext().Done() +} + +// SetupSignalContext is same as SetupSignalHandler, but a context.Context is returned. +// Only one of SetupSignalContext and SetupSignalHandler should be called, and only can +// be called once. +func SetupSignalContext() context.Context { + close(onlyOneSignalHandler) // panics when called twice + + shutdownHandler = make(chan os.Signal, 2) + + ctx, cancel := context.WithCancel(context.Background()) + signal.Notify(shutdownHandler, shutdownSignals...) + go func() { + <-shutdownHandler + cancel() + <-shutdownHandler + os.Exit(1) // second signal. Exit directly. + }() + + return ctx +} + +// RequestShutdown emulates a received event that is considered as shutdown signal (SIGTERM/SIGINT) +// This returns whether a handler was notified +func RequestShutdown() bool { + if shutdownHandler != nil { + select { + case shutdownHandler <- shutdownSignals[0]: + return true + default: + } + } + + return false +} diff --git a/vendor/k8s.io/apiserver/pkg/server/signal_posix.go b/vendor/k8s.io/apiserver/pkg/server/signal_posix.go new file mode 100644 index 000000000..11b3bba65 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/signal_posix.go @@ -0,0 +1,26 @@ +// +build !windows + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "os" + "syscall" +) + +var shutdownSignals = []os.Signal{os.Interrupt, syscall.SIGTERM} diff --git a/vendor/k8s.io/apiserver/pkg/server/signal_windows.go b/vendor/k8s.io/apiserver/pkg/server/signal_windows.go new file mode 100644 index 000000000..e7645a208 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/signal_windows.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "os" +) + +var shutdownSignals = []os.Signal{os.Interrupt} diff --git a/vendor/k8s.io/apiserver/pkg/server/storage/doc.go b/vendor/k8s.io/apiserver/pkg/server/storage/doc.go new file mode 100644 index 000000000..36b0d2252 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/storage/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package storage contains the plumbing to setup the etcd storage of the apiserver. +package storage // import "k8s.io/apiserver/pkg/server/storage" diff --git a/vendor/k8s.io/apiserver/pkg/server/storage/resource_config.go b/vendor/k8s.io/apiserver/pkg/server/storage/resource_config.go new file mode 100644 index 000000000..bd85ac230 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/storage/resource_config.go @@ -0,0 +1,124 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// APIResourceConfigSource is the interface to determine which groups and versions are enabled +type APIResourceConfigSource interface { + VersionEnabled(version schema.GroupVersion) bool + ResourceEnabled(resource schema.GroupVersionResource) bool + AnyVersionForGroupEnabled(group string) bool +} + +var _ APIResourceConfigSource = &ResourceConfig{} + +type ResourceConfig struct { + GroupVersionConfigs map[schema.GroupVersion]bool + ResourceConfigs map[schema.GroupVersionResource]bool +} + +func NewResourceConfig() *ResourceConfig { + return &ResourceConfig{GroupVersionConfigs: map[schema.GroupVersion]bool{}, ResourceConfigs: map[schema.GroupVersionResource]bool{}} +} + +// DisableAll disables all group/versions. It does not modify individual resource enablement/disablement. +func (o *ResourceConfig) DisableAll() { + for k := range o.GroupVersionConfigs { + o.GroupVersionConfigs[k] = false + } +} + +// EnableAll enables all group/versions. It does not modify individual resource enablement/disablement. +func (o *ResourceConfig) EnableAll() { + for k := range o.GroupVersionConfigs { + o.GroupVersionConfigs[k] = true + } +} + +// DisableMatchingVersions disables all group/versions for which the matcher function returns true. It does not modify individual resource enablement/disablement. +func (o *ResourceConfig) DisableMatchingVersions(matcher func(gv schema.GroupVersion) bool) { + for k := range o.GroupVersionConfigs { + if matcher(k) { + o.GroupVersionConfigs[k] = false + } + } +} + +// EnableMatchingVersions enables all group/versions for which the matcher function returns true. It does not modify individual resource enablement/disablement. +func (o *ResourceConfig) EnableMatchingVersions(matcher func(gv schema.GroupVersion) bool) { + for k := range o.GroupVersionConfigs { + if matcher(k) { + o.GroupVersionConfigs[k] = true + } + } +} + +// DisableVersions disables the versions entirely. +func (o *ResourceConfig) DisableVersions(versions ...schema.GroupVersion) { + for _, version := range versions { + o.GroupVersionConfigs[version] = false + } +} + +func (o *ResourceConfig) EnableVersions(versions ...schema.GroupVersion) { + for _, version := range versions { + o.GroupVersionConfigs[version] = true + } +} + +func (o *ResourceConfig) VersionEnabled(version schema.GroupVersion) bool { + enabled, _ := o.GroupVersionConfigs[version] + return enabled +} + +func (o *ResourceConfig) DisableResources(resources ...schema.GroupVersionResource) { + for _, resource := range resources { + o.ResourceConfigs[resource] = false + } +} + +func (o *ResourceConfig) EnableResources(resources ...schema.GroupVersionResource) { + for _, resource := range resources { + o.ResourceConfigs[resource] = true + } +} + +func (o *ResourceConfig) ResourceEnabled(resource schema.GroupVersionResource) bool { + if !o.VersionEnabled(resource.GroupVersion()) { + return false + } + resourceEnabled, explicitlySet := o.ResourceConfigs[resource] + if !explicitlySet { + return true + } + return resourceEnabled +} + +func (o *ResourceConfig) AnyVersionForGroupEnabled(group string) bool { + for version := range o.GroupVersionConfigs { + if version.Group == group { + if o.VersionEnabled(version) { + return true + } + } + } + + return false +} diff --git a/vendor/k8s.io/apiserver/pkg/server/storage/resource_encoding_config.go b/vendor/k8s.io/apiserver/pkg/server/storage/resource_encoding_config.go new file mode 100644 index 000000000..efb22fbc8 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/storage/resource_encoding_config.go @@ -0,0 +1,84 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type ResourceEncodingConfig interface { + // StorageEncoding returns the serialization format for the resource. + // TODO this should actually return a GroupVersionKind since you can logically have multiple "matching" Kinds + // For now, it returns just the GroupVersion for consistency with old behavior + StorageEncodingFor(schema.GroupResource) (schema.GroupVersion, error) + + // InMemoryEncodingFor returns the groupVersion for the in memory representation the storage should convert to. + InMemoryEncodingFor(schema.GroupResource) (schema.GroupVersion, error) +} + +type DefaultResourceEncodingConfig struct { + // resources records the overriding encoding configs for individual resources. + resources map[schema.GroupResource]*OverridingResourceEncoding + scheme *runtime.Scheme +} + +type OverridingResourceEncoding struct { + ExternalResourceEncoding schema.GroupVersion + InternalResourceEncoding schema.GroupVersion +} + +var _ ResourceEncodingConfig = &DefaultResourceEncodingConfig{} + +func NewDefaultResourceEncodingConfig(scheme *runtime.Scheme) *DefaultResourceEncodingConfig { + return &DefaultResourceEncodingConfig{resources: map[schema.GroupResource]*OverridingResourceEncoding{}, scheme: scheme} +} + +func (o *DefaultResourceEncodingConfig) SetResourceEncoding(resourceBeingStored schema.GroupResource, externalEncodingVersion, internalVersion schema.GroupVersion) { + o.resources[resourceBeingStored] = &OverridingResourceEncoding{ + ExternalResourceEncoding: externalEncodingVersion, + InternalResourceEncoding: internalVersion, + } +} + +func (o *DefaultResourceEncodingConfig) StorageEncodingFor(resource schema.GroupResource) (schema.GroupVersion, error) { + if !o.scheme.IsGroupRegistered(resource.Group) { + return schema.GroupVersion{}, fmt.Errorf("group %q is not registered in scheme", resource.Group) + } + + resourceOverride, resourceExists := o.resources[resource] + if resourceExists { + return resourceOverride.ExternalResourceEncoding, nil + } + + // return the most preferred external version for the group + return o.scheme.PrioritizedVersionsForGroup(resource.Group)[0], nil +} + +func (o *DefaultResourceEncodingConfig) InMemoryEncodingFor(resource schema.GroupResource) (schema.GroupVersion, error) { + if !o.scheme.IsGroupRegistered(resource.Group) { + return schema.GroupVersion{}, fmt.Errorf("group %q is not registered in scheme", resource.Group) + } + + resourceOverride, resourceExists := o.resources[resource] + if resourceExists { + return resourceOverride.InternalResourceEncoding, nil + } + return schema.GroupVersion{Group: resource.Group, Version: runtime.APIVersionInternal}, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/storage/storage_codec.go b/vendor/k8s.io/apiserver/pkg/server/storage/storage_codec.go new file mode 100644 index 000000000..96faa1712 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/storage/storage_codec.go @@ -0,0 +1,98 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "fmt" + "mime" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/recognizer" + "k8s.io/apiserver/pkg/storage/storagebackend" +) + +// StorageCodecConfig are the arguments passed to newStorageCodecFn +type StorageCodecConfig struct { + StorageMediaType string + StorageSerializer runtime.StorageSerializer + StorageVersion schema.GroupVersion + MemoryVersion schema.GroupVersion + Config storagebackend.Config + + EncoderDecoratorFn func(runtime.Encoder) runtime.Encoder + DecoderDecoratorFn func([]runtime.Decoder) []runtime.Decoder +} + +// NewStorageCodec assembles a storage codec for the provided storage media type, the provided serializer, and the requested +// storage and memory versions. +func NewStorageCodec(opts StorageCodecConfig) (runtime.Codec, runtime.GroupVersioner, error) { + mediaType, _, err := mime.ParseMediaType(opts.StorageMediaType) + if err != nil { + return nil, nil, fmt.Errorf("%q is not a valid mime-type", opts.StorageMediaType) + } + + serializer, ok := runtime.SerializerInfoForMediaType(opts.StorageSerializer.SupportedMediaTypes(), mediaType) + if !ok { + return nil, nil, fmt.Errorf("unable to find serializer for %q", mediaType) + } + + s := serializer.Serializer + + // Give callers the opportunity to wrap encoders and decoders. For decoders, each returned decoder will + // be passed to the recognizer so that multiple decoders are available. + var encoder runtime.Encoder = s + if opts.EncoderDecoratorFn != nil { + encoder = opts.EncoderDecoratorFn(encoder) + } + decoders := []runtime.Decoder{ + // selected decoder as the primary + s, + // universal deserializer as a fallback + opts.StorageSerializer.UniversalDeserializer(), + // base64-wrapped universal deserializer as a last resort. + // this allows reading base64-encoded protobuf, which should only exist if etcd2+protobuf was used at some point. + // data written that way could exist in etcd2, or could have been migrated to etcd3. + // TODO: flag this type of data if we encounter it, require migration (read to decode, write to persist using a supported encoder), and remove in 1.8 + runtime.NewBase64Serializer(nil, opts.StorageSerializer.UniversalDeserializer()), + } + if opts.DecoderDecoratorFn != nil { + decoders = opts.DecoderDecoratorFn(decoders) + } + + encodeVersioner := runtime.NewMultiGroupVersioner( + opts.StorageVersion, + schema.GroupKind{Group: opts.StorageVersion.Group}, + schema.GroupKind{Group: opts.MemoryVersion.Group}, + ) + + // Ensure the storage receives the correct version. + encoder = opts.StorageSerializer.EncoderForVersion( + encoder, + encodeVersioner, + ) + decoder := opts.StorageSerializer.DecoderToVersion( + recognizer.NewDecoder(decoders...), + runtime.NewCoercingMultiGroupVersioner( + opts.MemoryVersion, + schema.GroupKind{Group: opts.MemoryVersion.Group}, + schema.GroupKind{Group: opts.StorageVersion.Group}, + ), + ) + + return runtime.NewCodec(encoder, decoder), encodeVersioner, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go b/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go new file mode 100644 index 000000000..689b51322 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go @@ -0,0 +1,350 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "strings" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/storage/storagebackend" + "k8s.io/apiserver/pkg/storage/value" + utilfeature "k8s.io/apiserver/pkg/util/feature" +) + +// Backend describes the storage servers, the information here should be enough +// for health validations. +type Backend struct { + // the url of storage backend like: https://etcd.domain:2379 + Server string + // the required tls config + TLSConfig *tls.Config +} + +// StorageFactory is the interface to locate the storage for a given GroupResource +type StorageFactory interface { + // New finds the storage destination for the given group and resource. It will + // return an error if the group has no storage destination configured. + NewConfig(groupResource schema.GroupResource) (*storagebackend.Config, error) + + // ResourcePrefix returns the overridden resource prefix for the GroupResource + // This allows for cohabitation of resources with different native types and provides + // centralized control over the shape of etcd directories + ResourcePrefix(groupResource schema.GroupResource) string + + // Backends gets all backends for all registered storage destinations. + // Used for getting all instances for health validations. + Backends() []Backend +} + +// DefaultStorageFactory takes a GroupResource and returns back its storage interface. This result includes: +// 1. Merged etcd config, including: auth, server locations, prefixes +// 2. Resource encodings for storage: group,version,kind to store as +// 3. Cohabitating default: some resources like hpa are exposed through multiple APIs. They must agree on 1 and 2 +type DefaultStorageFactory struct { + // StorageConfig describes how to create a storage backend in general. + // Its authentication information will be used for every storage.Interface returned. + StorageConfig storagebackend.Config + + Overrides map[schema.GroupResource]groupResourceOverrides + + DefaultResourcePrefixes map[schema.GroupResource]string + + // DefaultMediaType is the media type used to store resources. If it is not set, "application/json" is used. + DefaultMediaType string + + // DefaultSerializer is used to create encoders and decoders for the storage.Interface. + DefaultSerializer runtime.StorageSerializer + + // ResourceEncodingConfig describes how to encode a particular GroupVersionResource + ResourceEncodingConfig ResourceEncodingConfig + + // APIResourceConfigSource indicates whether the *storage* is enabled, NOT the API + // This is discrete from resource enablement because those are separate concerns. How this source is configured + // is left to the caller. + APIResourceConfigSource APIResourceConfigSource + + // newStorageCodecFn exists to be overwritten for unit testing. + newStorageCodecFn func(opts StorageCodecConfig) (codec runtime.Codec, encodeVersioner runtime.GroupVersioner, err error) +} + +type groupResourceOverrides struct { + // etcdLocation contains the list of "special" locations that are used for particular GroupResources + // These are merged on top of the StorageConfig when requesting the storage.Interface for a given GroupResource + etcdLocation []string + // etcdPrefix is the base location for a GroupResource. + etcdPrefix string + // etcdResourcePrefix is the location to use to store a particular type under the `etcdPrefix` location + // If empty, the default mapping is used. If the default mapping doesn't contain an entry, it will use + // the ToLowered name of the resource, not including the group. + etcdResourcePrefix string + // mediaType is the desired serializer to choose. If empty, the default is chosen. + mediaType string + // serializer contains the list of "special" serializers for a GroupResource. Resource=* means for the entire group + serializer runtime.StorageSerializer + // cohabitatingResources keeps track of which resources must be stored together. This happens when we have multiple ways + // of exposing one set of concepts. autoscaling.HPA and extensions.HPA as a for instance + // The order of the slice matters! It is the priority order of lookup for finding a storage location + cohabitatingResources []schema.GroupResource + // encoderDecoratorFn is optional and may wrap the provided encoder prior to being serialized. + encoderDecoratorFn func(runtime.Encoder) runtime.Encoder + // decoderDecoratorFn is optional and may wrap the provided decoders (can add new decoders). The order of + // returned decoders will be priority for attempt to decode. + decoderDecoratorFn func([]runtime.Decoder) []runtime.Decoder + // transformer is optional and shall encrypt that resource at rest. + transformer value.Transformer + // disablePaging will prevent paging on the provided resource. + disablePaging bool +} + +// Apply overrides the provided config and options if the override has a value in that position +func (o groupResourceOverrides) Apply(config *storagebackend.Config, options *StorageCodecConfig) { + if len(o.etcdLocation) > 0 { + config.Transport.ServerList = o.etcdLocation + } + if len(o.etcdPrefix) > 0 { + config.Prefix = o.etcdPrefix + } + + if len(o.mediaType) > 0 { + options.StorageMediaType = o.mediaType + } + if o.serializer != nil { + options.StorageSerializer = o.serializer + } + if o.encoderDecoratorFn != nil { + options.EncoderDecoratorFn = o.encoderDecoratorFn + } + if o.decoderDecoratorFn != nil { + options.DecoderDecoratorFn = o.decoderDecoratorFn + } + if o.transformer != nil { + config.Transformer = o.transformer + } + if o.disablePaging { + config.Paging = false + } +} + +var _ StorageFactory = &DefaultStorageFactory{} + +const AllResources = "*" + +func NewDefaultStorageFactory( + config storagebackend.Config, + defaultMediaType string, + defaultSerializer runtime.StorageSerializer, + resourceEncodingConfig ResourceEncodingConfig, + resourceConfig APIResourceConfigSource, + specialDefaultResourcePrefixes map[schema.GroupResource]string, +) *DefaultStorageFactory { + config.Paging = utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking) + if len(defaultMediaType) == 0 { + defaultMediaType = runtime.ContentTypeJSON + } + return &DefaultStorageFactory{ + StorageConfig: config, + Overrides: map[schema.GroupResource]groupResourceOverrides{}, + DefaultMediaType: defaultMediaType, + DefaultSerializer: defaultSerializer, + ResourceEncodingConfig: resourceEncodingConfig, + APIResourceConfigSource: resourceConfig, + DefaultResourcePrefixes: specialDefaultResourcePrefixes, + + newStorageCodecFn: NewStorageCodec, + } +} + +func (s *DefaultStorageFactory) SetEtcdLocation(groupResource schema.GroupResource, location []string) { + overrides := s.Overrides[groupResource] + overrides.etcdLocation = location + s.Overrides[groupResource] = overrides +} + +func (s *DefaultStorageFactory) SetEtcdPrefix(groupResource schema.GroupResource, prefix string) { + overrides := s.Overrides[groupResource] + overrides.etcdPrefix = prefix + s.Overrides[groupResource] = overrides +} + +// SetDisableAPIListChunking allows a specific resource to disable paging at the storage layer, to prevent +// exposure of key names in continuations. This may be overridden by feature gates. +func (s *DefaultStorageFactory) SetDisableAPIListChunking(groupResource schema.GroupResource) { + overrides := s.Overrides[groupResource] + overrides.disablePaging = true + s.Overrides[groupResource] = overrides +} + +// SetResourceEtcdPrefix sets the prefix for a resource, but not the base-dir. You'll end up in `etcdPrefix/resourceEtcdPrefix`. +func (s *DefaultStorageFactory) SetResourceEtcdPrefix(groupResource schema.GroupResource, prefix string) { + overrides := s.Overrides[groupResource] + overrides.etcdResourcePrefix = prefix + s.Overrides[groupResource] = overrides +} + +func (s *DefaultStorageFactory) SetSerializer(groupResource schema.GroupResource, mediaType string, serializer runtime.StorageSerializer) { + overrides := s.Overrides[groupResource] + overrides.mediaType = mediaType + overrides.serializer = serializer + s.Overrides[groupResource] = overrides +} + +func (s *DefaultStorageFactory) SetTransformer(groupResource schema.GroupResource, transformer value.Transformer) { + overrides := s.Overrides[groupResource] + overrides.transformer = transformer + s.Overrides[groupResource] = overrides +} + +// AddCohabitatingResources links resources together the order of the slice matters! its the priority order of lookup for finding a storage location +func (s *DefaultStorageFactory) AddCohabitatingResources(groupResources ...schema.GroupResource) { + for _, groupResource := range groupResources { + overrides := s.Overrides[groupResource] + overrides.cohabitatingResources = groupResources + s.Overrides[groupResource] = overrides + } +} + +func (s *DefaultStorageFactory) AddSerializationChains(encoderDecoratorFn func(runtime.Encoder) runtime.Encoder, decoderDecoratorFn func([]runtime.Decoder) []runtime.Decoder, groupResources ...schema.GroupResource) { + for _, groupResource := range groupResources { + overrides := s.Overrides[groupResource] + overrides.encoderDecoratorFn = encoderDecoratorFn + overrides.decoderDecoratorFn = decoderDecoratorFn + s.Overrides[groupResource] = overrides + } +} + +func getAllResourcesAlias(resource schema.GroupResource) schema.GroupResource { + return schema.GroupResource{Group: resource.Group, Resource: AllResources} +} + +func (s *DefaultStorageFactory) getStorageGroupResource(groupResource schema.GroupResource) schema.GroupResource { + for _, potentialStorageResource := range s.Overrides[groupResource].cohabitatingResources { + if s.APIResourceConfigSource.AnyVersionForGroupEnabled(potentialStorageResource.Group) { + return potentialStorageResource + } + } + + return groupResource +} + +// New finds the storage destination for the given group and resource. It will +// return an error if the group has no storage destination configured. +func (s *DefaultStorageFactory) NewConfig(groupResource schema.GroupResource) (*storagebackend.Config, error) { + chosenStorageResource := s.getStorageGroupResource(groupResource) + + // operate on copy + storageConfig := s.StorageConfig + codecConfig := StorageCodecConfig{ + StorageMediaType: s.DefaultMediaType, + StorageSerializer: s.DefaultSerializer, + } + + if override, ok := s.Overrides[getAllResourcesAlias(chosenStorageResource)]; ok { + override.Apply(&storageConfig, &codecConfig) + } + if override, ok := s.Overrides[chosenStorageResource]; ok { + override.Apply(&storageConfig, &codecConfig) + } + + var err error + codecConfig.StorageVersion, err = s.ResourceEncodingConfig.StorageEncodingFor(chosenStorageResource) + if err != nil { + return nil, err + } + codecConfig.MemoryVersion, err = s.ResourceEncodingConfig.InMemoryEncodingFor(groupResource) + if err != nil { + return nil, err + } + codecConfig.Config = storageConfig + + storageConfig.Codec, storageConfig.EncodeVersioner, err = s.newStorageCodecFn(codecConfig) + if err != nil { + return nil, err + } + klog.V(3).Infof("storing %v in %v, reading as %v from %#v", groupResource, codecConfig.StorageVersion, codecConfig.MemoryVersion, codecConfig.Config) + + return &storageConfig, nil +} + +// Backends returns all backends for all registered storage destinations. +// Used for getting all instances for health validations. +func (s *DefaultStorageFactory) Backends() []Backend { + servers := sets.NewString(s.StorageConfig.Transport.ServerList...) + + for _, overrides := range s.Overrides { + servers.Insert(overrides.etcdLocation...) + } + + tlsConfig := &tls.Config{ + InsecureSkipVerify: true, + } + if len(s.StorageConfig.Transport.CertFile) > 0 && len(s.StorageConfig.Transport.KeyFile) > 0 { + cert, err := tls.LoadX509KeyPair(s.StorageConfig.Transport.CertFile, s.StorageConfig.Transport.KeyFile) + if err != nil { + klog.Errorf("failed to load key pair while getting backends: %s", err) + } else { + tlsConfig.Certificates = []tls.Certificate{cert} + } + } + if len(s.StorageConfig.Transport.TrustedCAFile) > 0 { + if caCert, err := ioutil.ReadFile(s.StorageConfig.Transport.TrustedCAFile); err != nil { + klog.Errorf("failed to read ca file while getting backends: %s", err) + } else { + caPool := x509.NewCertPool() + caPool.AppendCertsFromPEM(caCert) + tlsConfig.RootCAs = caPool + tlsConfig.InsecureSkipVerify = false + } + } + + backends := []Backend{} + for server := range servers { + backends = append(backends, Backend{ + Server: server, + // We can't share TLSConfig across different backends to avoid races. + // For more details see: http://pr.k8s.io/59338 + TLSConfig: tlsConfig.Clone(), + }) + } + return backends +} + +func (s *DefaultStorageFactory) ResourcePrefix(groupResource schema.GroupResource) string { + chosenStorageResource := s.getStorageGroupResource(groupResource) + groupOverride := s.Overrides[getAllResourcesAlias(chosenStorageResource)] + exactResourceOverride := s.Overrides[chosenStorageResource] + + etcdResourcePrefix := s.DefaultResourcePrefixes[chosenStorageResource] + if len(groupOverride.etcdResourcePrefix) > 0 { + etcdResourcePrefix = groupOverride.etcdResourcePrefix + } + if len(exactResourceOverride.etcdResourcePrefix) > 0 { + etcdResourcePrefix = exactResourceOverride.etcdResourcePrefix + } + if len(etcdResourcePrefix) == 0 { + etcdResourcePrefix = strings.ToLower(chosenStorageResource.Resource) + } + + return etcdResourcePrefix +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/OWNERS b/vendor/k8s.io/apiserver/pkg/storage/OWNERS new file mode 100644 index 000000000..167792c32 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/OWNERS @@ -0,0 +1,26 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- lavalamp +- liggitt +- timothysc +- wojtek-t +- xiang90 +reviewers: +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- caesarxuchao +- mikedanese +- liggitt +- ncdc +- timothysc +- hongchaodeng +- krousey +- xiang90 +- mml +- ingvagabund +- resouer +- mbohlool +- enj diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go new file mode 100644 index 000000000..459517767 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go @@ -0,0 +1,1441 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cacher + +import ( + "context" + "fmt" + "net/http" + "reflect" + "sync" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/clock" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/storage" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + utiltrace "k8s.io/utils/trace" +) + +var ( + emptyFunc = func() {} +) + +const ( + // storageWatchListPageSize is the cacher's request chunk size of + // initial and resync watch lists to storage. + storageWatchListPageSize = int64(10000) + // defaultBookmarkFrequency defines how frequently watch bookmarks should be send + // in addition to sending a bookmark right before watch deadline. + // + // NOTE: Update `eventFreshDuration` when changing this value. + defaultBookmarkFrequency = time.Minute +) + +// Config contains the configuration for a given Cache. +type Config struct { + // An underlying storage.Interface. + Storage storage.Interface + + // An underlying storage.Versioner. + Versioner storage.Versioner + + // The Cache will be caching objects of a given Type and assumes that they + // are all stored under ResourcePrefix directory in the underlying database. + ResourcePrefix string + + // KeyFunc is used to get a key in the underlying storage for a given object. + KeyFunc func(runtime.Object) (string, error) + + // GetAttrsFunc is used to get object labels, fields + GetAttrsFunc func(runtime.Object) (label labels.Set, field fields.Set, err error) + + // IndexerFuncs is used for optimizing amount of watchers that + // needs to process an incoming event. + IndexerFuncs storage.IndexerFuncs + + // Indexers is used to accelerate the list operation, falls back to regular list + // operation if no indexer found. + Indexers *cache.Indexers + + // NewFunc is a function that creates new empty object storing a object of type Type. + NewFunc func() runtime.Object + + // NewList is a function that creates new empty object storing a list of + // objects of type Type. + NewListFunc func() runtime.Object + + Codec runtime.Codec + + Clock clock.Clock +} + +type watchersMap map[int]*cacheWatcher + +func (wm watchersMap) addWatcher(w *cacheWatcher, number int) { + wm[number] = w +} + +func (wm watchersMap) deleteWatcher(number int, done func(*cacheWatcher)) { + if watcher, ok := wm[number]; ok { + delete(wm, number) + done(watcher) + } +} + +func (wm watchersMap) terminateAll(done func(*cacheWatcher)) { + for key, watcher := range wm { + delete(wm, key) + done(watcher) + } +} + +type indexedWatchers struct { + allWatchers watchersMap + valueWatchers map[string]watchersMap +} + +func (i *indexedWatchers) addWatcher(w *cacheWatcher, number int, value string, supported bool) { + if supported { + if _, ok := i.valueWatchers[value]; !ok { + i.valueWatchers[value] = watchersMap{} + } + i.valueWatchers[value].addWatcher(w, number) + } else { + i.allWatchers.addWatcher(w, number) + } +} + +func (i *indexedWatchers) deleteWatcher(number int, value string, supported bool, done func(*cacheWatcher)) { + if supported { + i.valueWatchers[value].deleteWatcher(number, done) + if len(i.valueWatchers[value]) == 0 { + delete(i.valueWatchers, value) + } + } else { + i.allWatchers.deleteWatcher(number, done) + } +} + +func (i *indexedWatchers) terminateAll(objectType reflect.Type, done func(*cacheWatcher)) { + if len(i.allWatchers) > 0 || len(i.valueWatchers) > 0 { + klog.Warningf("Terminating all watchers from cacher %v", objectType) + } + i.allWatchers.terminateAll(done) + for _, watchers := range i.valueWatchers { + watchers.terminateAll(done) + } + i.valueWatchers = map[string]watchersMap{} +} + +// As we don't need a high precision here, we keep all watchers timeout within a +// second in a bucket, and pop up them once at the timeout. To be more specific, +// if you set fire time at X, you can get the bookmark within (X-1,X+1) period. +type watcherBookmarkTimeBuckets struct { + lock sync.Mutex + // the key of watcherBuckets is the number of seconds since createTime + watchersBuckets map[int64][]*cacheWatcher + createTime time.Time + startBucketID int64 + clock clock.Clock + bookmarkFrequency time.Duration +} + +func newTimeBucketWatchers(clock clock.Clock, bookmarkFrequency time.Duration) *watcherBookmarkTimeBuckets { + return &watcherBookmarkTimeBuckets{ + watchersBuckets: make(map[int64][]*cacheWatcher), + createTime: clock.Now(), + startBucketID: 0, + clock: clock, + bookmarkFrequency: bookmarkFrequency, + } +} + +// adds a watcher to the bucket, if the deadline is before the start, it will be +// added to the first one. +func (t *watcherBookmarkTimeBuckets) addWatcher(w *cacheWatcher) bool { + nextTime, ok := w.nextBookmarkTime(t.clock.Now(), t.bookmarkFrequency) + if !ok { + return false + } + bucketID := int64(nextTime.Sub(t.createTime) / time.Second) + t.lock.Lock() + defer t.lock.Unlock() + if bucketID < t.startBucketID { + bucketID = t.startBucketID + } + watchers, _ := t.watchersBuckets[bucketID] + t.watchersBuckets[bucketID] = append(watchers, w) + return true +} + +func (t *watcherBookmarkTimeBuckets) popExpiredWatchers() [][]*cacheWatcher { + currentBucketID := int64(t.clock.Since(t.createTime) / time.Second) + // There should be one or two elements in almost all cases + expiredWatchers := make([][]*cacheWatcher, 0, 2) + t.lock.Lock() + defer t.lock.Unlock() + for ; t.startBucketID <= currentBucketID; t.startBucketID++ { + if watchers, ok := t.watchersBuckets[t.startBucketID]; ok { + delete(t.watchersBuckets, t.startBucketID) + expiredWatchers = append(expiredWatchers, watchers) + } + } + return expiredWatchers +} + +type filterWithAttrsFunc func(key string, l labels.Set, f fields.Set) bool + +type indexedTriggerFunc struct { + indexName string + indexerFunc storage.IndexerFunc +} + +// Cacher is responsible for serving WATCH and LIST requests for a given +// resource from its internal cache and updating its cache in the background +// based on the underlying storage contents. +// Cacher implements storage.Interface (although most of the calls are just +// delegated to the underlying storage). +type Cacher struct { + // HighWaterMarks for performance debugging. + // Important: Since HighWaterMark is using sync/atomic, it has to be at the top of the struct due to a bug on 32-bit platforms + // See: https://golang.org/pkg/sync/atomic/ for more information + incomingHWM storage.HighWaterMark + // Incoming events that should be dispatched to watchers. + incoming chan watchCacheEvent + + sync.RWMutex + + // Before accessing the cacher's cache, wait for the ready to be ok. + // This is necessary to prevent users from accessing structures that are + // uninitialized or are being repopulated right now. + // ready needs to be set to false when the cacher is paused or stopped. + // ready needs to be set to true when the cacher is ready to use after + // initialization. + ready *ready + + // Underlying storage.Interface. + storage storage.Interface + + // Expected type of objects in the underlying cache. + objectType reflect.Type + + // "sliding window" of recent changes of objects and the current state. + watchCache *watchCache + reflector *cache.Reflector + + // Versioner is used to handle resource versions. + versioner storage.Versioner + + // newFunc is a function that creates new empty object storing a object of type Type. + newFunc func() runtime.Object + + // indexedTrigger is used for optimizing amount of watchers that needs to process + // an incoming event. + indexedTrigger *indexedTriggerFunc + // watchers is mapping from the value of trigger function that a + // watcher is interested into the watchers + watcherIdx int + watchers indexedWatchers + + // Defines a time budget that can be spend on waiting for not-ready watchers + // while dispatching event before shutting them down. + dispatchTimeoutBudget timeBudget + + // Handling graceful termination. + stopLock sync.RWMutex + stopped bool + stopCh chan struct{} + stopWg sync.WaitGroup + + clock clock.Clock + // timer is used to avoid unnecessary allocations in underlying watchers. + timer *time.Timer + + // dispatching determines whether there is currently dispatching of + // any event in flight. + dispatching bool + // watchersBuffer is a list of watchers potentially interested in currently + // dispatched event. + watchersBuffer []*cacheWatcher + // blockedWatchers is a list of watchers whose buffer is currently full. + blockedWatchers []*cacheWatcher + // watchersToStop is a list of watchers that were supposed to be stopped + // during current dispatching, but stopping was deferred to the end of + // dispatching that event to avoid race with closing channels in watchers. + watchersToStop []*cacheWatcher + // Maintain a timeout queue to send the bookmark event before the watcher times out. + bookmarkWatchers *watcherBookmarkTimeBuckets +} + +// NewCacherFromConfig creates a new Cacher responsible for servicing WATCH and LIST requests from +// its internal cache and updating its cache in the background based on the +// given configuration. +func NewCacherFromConfig(config Config) (*Cacher, error) { + stopCh := make(chan struct{}) + obj := config.NewFunc() + // Give this error when it is constructed rather than when you get the + // first watch item, because it's much easier to track down that way. + if err := runtime.CheckCodec(config.Codec, obj); err != nil { + return nil, fmt.Errorf("storage codec doesn't seem to match given type: %v", err) + } + + var indexedTrigger *indexedTriggerFunc + if config.IndexerFuncs != nil { + // For now, we don't support multiple trigger functions defined + // for a given resource. + if len(config.IndexerFuncs) > 1 { + return nil, fmt.Errorf("cacher %s doesn't support more than one IndexerFunc: ", reflect.TypeOf(obj).String()) + } + for key, value := range config.IndexerFuncs { + if value != nil { + indexedTrigger = &indexedTriggerFunc{ + indexName: key, + indexerFunc: value, + } + } + } + } + + if config.Clock == nil { + config.Clock = clock.RealClock{} + } + objType := reflect.TypeOf(obj) + cacher := &Cacher{ + ready: newReady(), + storage: config.Storage, + objectType: objType, + versioner: config.Versioner, + newFunc: config.NewFunc, + indexedTrigger: indexedTrigger, + watcherIdx: 0, + watchers: indexedWatchers{ + allWatchers: make(map[int]*cacheWatcher), + valueWatchers: make(map[string]watchersMap), + }, + // TODO: Figure out the correct value for the buffer size. + incoming: make(chan watchCacheEvent, 100), + dispatchTimeoutBudget: newTimeBudget(stopCh), + // We need to (potentially) stop both: + // - wait.Until go-routine + // - reflector.ListAndWatch + // and there are no guarantees on the order that they will stop. + // So we will be simply closing the channel, and synchronizing on the WaitGroup. + stopCh: stopCh, + clock: config.Clock, + timer: time.NewTimer(time.Duration(0)), + bookmarkWatchers: newTimeBucketWatchers(config.Clock, defaultBookmarkFrequency), + } + + // Ensure that timer is stopped. + if !cacher.timer.Stop() { + // Consume triggered (but not yet received) timer event + // so that future reuse does not get a spurious timeout. + <-cacher.timer.C + } + + watchCache := newWatchCache( + config.KeyFunc, cacher.processEvent, config.GetAttrsFunc, config.Versioner, config.Indexers, config.Clock, objType) + listerWatcher := NewCacherListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc) + reflectorName := "storage/cacher.go:" + config.ResourcePrefix + + reflector := cache.NewNamedReflector(reflectorName, listerWatcher, obj, watchCache, 0) + // Configure reflector's pager to for an appropriate pagination chunk size for fetching data from + // storage. The pager falls back to full list if paginated list calls fail due to an "Expired" error. + reflector.WatchListPageSize = storageWatchListPageSize + + cacher.watchCache = watchCache + cacher.reflector = reflector + + go cacher.dispatchEvents() + + cacher.stopWg.Add(1) + go func() { + defer cacher.stopWg.Done() + defer cacher.terminateAllWatchers() + wait.Until( + func() { + if !cacher.isStopped() { + cacher.startCaching(stopCh) + } + }, time.Second, stopCh, + ) + }() + + return cacher, nil +} + +func (c *Cacher) startCaching(stopChannel <-chan struct{}) { + // The 'usable' lock is always 'RLock'able when it is safe to use the cache. + // It is safe to use the cache after a successful list until a disconnection. + // We start with usable (write) locked. The below OnReplace function will + // unlock it after a successful list. The below defer will then re-lock + // it when this function exits (always due to disconnection), only if + // we actually got a successful list. This cycle will repeat as needed. + successfulList := false + c.watchCache.SetOnReplace(func() { + successfulList = true + c.ready.set(true) + klog.V(1).Infof("cacher (%v): initialized", c.objectType.String()) + }) + defer func() { + if successfulList { + c.ready.set(false) + } + }() + + c.terminateAllWatchers() + // Note that since onReplace may be not called due to errors, we explicitly + // need to retry it on errors under lock. + // Also note that startCaching is called in a loop, so there's no need + // to have another loop here. + if err := c.reflector.ListAndWatch(stopChannel); err != nil { + klog.Errorf("cacher (%v): unexpected ListAndWatch error: %v; reinitializing...", c.objectType.String(), err) + } +} + +// Versioner implements storage.Interface. +func (c *Cacher) Versioner() storage.Versioner { + return c.storage.Versioner() +} + +// Create implements storage.Interface. +func (c *Cacher) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error { + return c.storage.Create(ctx, key, obj, out, ttl) +} + +// Delete implements storage.Interface. +func (c *Cacher) Delete(ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions, validateDeletion storage.ValidateObjectFunc) error { + return c.storage.Delete(ctx, key, out, preconditions, validateDeletion) +} + +// Watch implements storage.Interface. +func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) { + pred := opts.Predicate + watchRV, err := c.versioner.ParseResourceVersion(opts.ResourceVersion) + if err != nil { + return nil, err + } + + c.ready.wait() + + triggerValue, triggerSupported := "", false + if c.indexedTrigger != nil { + for _, field := range pred.IndexFields { + if field == c.indexedTrigger.indexName { + if value, ok := pred.Field.RequiresExactMatch(field); ok { + triggerValue, triggerSupported = value, true + } + } + } + } + + // If there is indexedTrigger defined, but triggerSupported is false, + // we can't narrow the amount of events significantly at this point. + // + // That said, currently indexedTrigger is defined only for couple resources: + // Pods, Nodes, Secrets and ConfigMaps and there is only a constant + // number of watchers for which triggerSupported is false (excluding those + // issued explicitly by users). + // Thus, to reduce the risk of those watchers blocking all watchers of a + // given resource in the system, we increase the sizes of buffers for them. + chanSize := 10 + if c.indexedTrigger != nil && !triggerSupported { + // TODO: We should tune this value and ideally make it dependent on the + // number of objects of a given type and/or their churn. + chanSize = 1000 + } + + // Determine watch timeout('0' means deadline is not set, ignore checking) + deadline, _ := ctx.Deadline() + // Create a watcher here to reduce memory allocations under lock, + // given that memory allocation may trigger GC and block the thread. + // Also note that emptyFunc is a placeholder, until we will be able + // to compute watcher.forget function (which has to happen under lock). + watcher := newCacheWatcher(chanSize, filterWithAttrsFunction(key, pred), emptyFunc, c.versioner, deadline, pred.AllowWatchBookmarks, c.objectType) + + // We explicitly use thread unsafe version and do locking ourself to ensure that + // no new events will be processed in the meantime. The watchCache will be unlocked + // on return from this function. + // Note that we cannot do it under Cacher lock, to avoid a deadlock, since the + // underlying watchCache is calling processEvent under its lock. + c.watchCache.RLock() + defer c.watchCache.RUnlock() + initEvents, err := c.watchCache.GetAllEventsSinceThreadUnsafe(watchRV) + if err != nil { + // To match the uncached watch implementation, once we have passed authn/authz/admission, + // and successfully parsed a resource version, other errors must fail with a watch event of type ERROR, + // rather than a directly returned error. + return newErrWatcher(err), nil + } + + // With some events already sent, update resourceVersion so that + // events that were buffered and not yet processed won't be delivered + // to this watcher second time causing going back in time. + if len(initEvents) > 0 { + watchRV = initEvents[len(initEvents)-1].ResourceVersion + } + + func() { + c.Lock() + defer c.Unlock() + // Update watcher.forget function once we can compute it. + watcher.forget = forgetWatcher(c, c.watcherIdx, triggerValue, triggerSupported) + c.watchers.addWatcher(watcher, c.watcherIdx, triggerValue, triggerSupported) + + // Add it to the queue only when the client support watch bookmarks. + if watcher.allowWatchBookmarks { + c.bookmarkWatchers.addWatcher(watcher) + } + c.watcherIdx++ + }() + + go watcher.process(ctx, initEvents, watchRV) + return watcher, nil +} + +// WatchList implements storage.Interface. +func (c *Cacher) WatchList(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) { + return c.Watch(ctx, key, opts) +} + +// Get implements storage.Interface. +func (c *Cacher) Get(ctx context.Context, key string, opts storage.GetOptions, objPtr runtime.Object) error { + if opts.ResourceVersion == "" { + // If resourceVersion is not specified, serve it from underlying + // storage (for backward compatibility). + return c.storage.Get(ctx, key, opts, objPtr) + } + + // If resourceVersion is specified, serve it from cache. + // It's guaranteed that the returned value is at least that + // fresh as the given resourceVersion. + getRV, err := c.versioner.ParseResourceVersion(opts.ResourceVersion) + if err != nil { + return err + } + + if getRV == 0 && !c.ready.check() { + // If Cacher is not yet initialized and we don't require any specific + // minimal resource version, simply forward the request to storage. + return c.storage.Get(ctx, key, opts, objPtr) + } + + // Do not create a trace - it's not for free and there are tons + // of Get requests. We can add it if it will be really needed. + c.ready.wait() + + objVal, err := conversion.EnforcePtr(objPtr) + if err != nil { + return err + } + + obj, exists, readResourceVersion, err := c.watchCache.WaitUntilFreshAndGet(getRV, key, nil) + if err != nil { + return err + } + + if exists { + elem, ok := obj.(*storeElement) + if !ok { + return fmt.Errorf("non *storeElement returned from storage: %v", obj) + } + objVal.Set(reflect.ValueOf(elem.Object).Elem()) + } else { + objVal.Set(reflect.Zero(objVal.Type())) + if !opts.IgnoreNotFound { + return storage.NewKeyNotFoundError(key, int64(readResourceVersion)) + } + } + return nil +} + +// GetToList implements storage.Interface. +func (c *Cacher) GetToList(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error { + resourceVersion := opts.ResourceVersion + pred := opts.Predicate + pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking) + hasContinuation := pagingEnabled && len(pred.Continue) > 0 + hasLimit := pagingEnabled && pred.Limit > 0 && resourceVersion != "0" + if resourceVersion == "" || hasContinuation || hasLimit || opts.ResourceVersionMatch == metav1.ResourceVersionMatchExact { + // If resourceVersion is not specified, serve it from underlying + // storage (for backward compatibility). If a continuation is + // requested, serve it from the underlying storage as well. + // Limits are only sent to storage when resourceVersion is non-zero + // since the watch cache isn't able to perform continuations, and + // limits are ignored when resource version is zero + return c.storage.GetToList(ctx, key, opts, listObj) + } + + // If resourceVersion is specified, serve it from cache. + // It's guaranteed that the returned value is at least that + // fresh as the given resourceVersion. + listRV, err := c.versioner.ParseResourceVersion(resourceVersion) + if err != nil { + return err + } + + if listRV == 0 && !c.ready.check() { + // If Cacher is not yet initialized and we don't require any specific + // minimal resource version, simply forward the request to storage. + return c.storage.GetToList(ctx, key, opts, listObj) + } + + trace := utiltrace.New("cacher list", utiltrace.Field{"type", c.objectType.String()}) + defer trace.LogIfLong(500 * time.Millisecond) + + c.ready.wait() + trace.Step("Ready") + + // List elements with at least 'listRV' from cache. + listPtr, err := meta.GetItemsPtr(listObj) + if err != nil { + return err + } + listVal, err := conversion.EnforcePtr(listPtr) + if err != nil { + return err + } + if listVal.Kind() != reflect.Slice { + return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind()) + } + filter := filterWithAttrsFunction(key, pred) + + obj, exists, readResourceVersion, err := c.watchCache.WaitUntilFreshAndGet(listRV, key, trace) + if err != nil { + return err + } + trace.Step("Got from cache") + + if exists { + elem, ok := obj.(*storeElement) + if !ok { + return fmt.Errorf("non *storeElement returned from storage: %v", obj) + } + if filter(elem.Key, elem.Labels, elem.Fields) { + listVal.Set(reflect.Append(listVal, reflect.ValueOf(elem.Object).Elem())) + } + } + if c.versioner != nil { + if err := c.versioner.UpdateList(listObj, readResourceVersion, "", nil); err != nil { + return err + } + } + return nil +} + +// List implements storage.Interface. +func (c *Cacher) List(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error { + resourceVersion := opts.ResourceVersion + pred := opts.Predicate + pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking) + hasContinuation := pagingEnabled && len(pred.Continue) > 0 + hasLimit := pagingEnabled && pred.Limit > 0 && resourceVersion != "0" + if resourceVersion == "" || hasContinuation || hasLimit || opts.ResourceVersionMatch == metav1.ResourceVersionMatchExact { + // If resourceVersion is not specified, serve it from underlying + // storage (for backward compatibility). If a continuation is + // requested, serve it from the underlying storage as well. + // Limits are only sent to storage when resourceVersion is non-zero + // since the watch cache isn't able to perform continuations, and + // limits are ignored when resource version is zero. + return c.storage.List(ctx, key, opts, listObj) + } + + // If resourceVersion is specified, serve it from cache. + // It's guaranteed that the returned value is at least that + // fresh as the given resourceVersion. + listRV, err := c.versioner.ParseResourceVersion(resourceVersion) + if err != nil { + return err + } + + if listRV == 0 && !c.ready.check() { + // If Cacher is not yet initialized and we don't require any specific + // minimal resource version, simply forward the request to storage. + return c.storage.List(ctx, key, opts, listObj) + } + + trace := utiltrace.New("cacher list", utiltrace.Field{"type", c.objectType.String()}) + defer trace.LogIfLong(500 * time.Millisecond) + + c.ready.wait() + trace.Step("Ready") + + // List elements with at least 'listRV' from cache. + listPtr, err := meta.GetItemsPtr(listObj) + if err != nil { + return err + } + listVal, err := conversion.EnforcePtr(listPtr) + if err != nil { + return err + } + if listVal.Kind() != reflect.Slice { + return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind()) + } + filter := filterWithAttrsFunction(key, pred) + + objs, readResourceVersion, err := c.watchCache.WaitUntilFreshAndList(listRV, pred.MatcherIndex(), trace) + if err != nil { + return err + } + trace.Step("Listed items from cache", utiltrace.Field{"count", len(objs)}) + if len(objs) > listVal.Cap() && pred.Label.Empty() && pred.Field.Empty() { + // Resize the slice appropriately, since we already know that none + // of the elements will be filtered out. + listVal.Set(reflect.MakeSlice(reflect.SliceOf(c.objectType.Elem()), 0, len(objs))) + trace.Step("Resized result") + } + for _, obj := range objs { + elem, ok := obj.(*storeElement) + if !ok { + return fmt.Errorf("non *storeElement returned from storage: %v", obj) + } + if filter(elem.Key, elem.Labels, elem.Fields) { + listVal.Set(reflect.Append(listVal, reflect.ValueOf(elem.Object).Elem())) + } + } + trace.Step("Filtered items", utiltrace.Field{"count", listVal.Len()}) + if c.versioner != nil { + if err := c.versioner.UpdateList(listObj, readResourceVersion, "", nil); err != nil { + return err + } + } + return nil +} + +// GuaranteedUpdate implements storage.Interface. +func (c *Cacher) GuaranteedUpdate( + ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool, + preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, _ runtime.Object) error { + // Ignore the suggestion and try to pass down the current version of the object + // read from cache. + if elem, exists, err := c.watchCache.GetByKey(key); err != nil { + klog.Errorf("GetByKey returned error: %v", err) + } else if exists { + currObj := elem.(*storeElement).Object.DeepCopyObject() + return c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, currObj) + } + // If we couldn't get the object, fallback to no-suggestion. + return c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, nil) +} + +// Count implements storage.Interface. +func (c *Cacher) Count(pathPrefix string) (int64, error) { + return c.storage.Count(pathPrefix) +} + +// baseObjectThreadUnsafe omits locking for cachingObject. +func baseObjectThreadUnsafe(object runtime.Object) runtime.Object { + if co, ok := object.(*cachingObject); ok { + return co.object + } + return object +} + +func (c *Cacher) triggerValuesThreadUnsafe(event *watchCacheEvent) ([]string, bool) { + if c.indexedTrigger == nil { + return nil, false + } + + result := make([]string, 0, 2) + result = append(result, c.indexedTrigger.indexerFunc(baseObjectThreadUnsafe(event.Object))) + if event.PrevObject == nil { + return result, true + } + prevTriggerValue := c.indexedTrigger.indexerFunc(baseObjectThreadUnsafe(event.PrevObject)) + if result[0] != prevTriggerValue { + result = append(result, prevTriggerValue) + } + return result, true +} + +func (c *Cacher) processEvent(event *watchCacheEvent) { + if curLen := int64(len(c.incoming)); c.incomingHWM.Update(curLen) { + // Monitor if this gets backed up, and how much. + klog.V(1).Infof("cacher (%v): %v objects queued in incoming channel.", c.objectType.String(), curLen) + } + c.incoming <- *event +} + +func (c *Cacher) dispatchEvents() { + // Jitter to help level out any aggregate load. + bookmarkTimer := c.clock.NewTimer(wait.Jitter(time.Second, 0.25)) + defer bookmarkTimer.Stop() + + lastProcessedResourceVersion := uint64(0) + for { + select { + case event, ok := <-c.incoming: + if !ok { + return + } + // Don't dispatch bookmarks coming from the storage layer. + // They can be very frequent (even to the level of subseconds) + // to allow efficient watch resumption on kube-apiserver restarts, + // and propagating them down may overload the whole system. + // + // TODO: If at some point we decide the performance and scalability + // footprint is acceptable, this is the place to hook them in. + // However, we then need to check if this was called as a result + // of a bookmark event or regular Add/Update/Delete operation by + // checking if resourceVersion here has changed. + if event.Type != watch.Bookmark { + c.dispatchEvent(&event) + } + lastProcessedResourceVersion = event.ResourceVersion + case <-bookmarkTimer.C(): + bookmarkTimer.Reset(wait.Jitter(time.Second, 0.25)) + // Never send a bookmark event if we did not see an event here, this is fine + // because we don't provide any guarantees on sending bookmarks. + if lastProcessedResourceVersion == 0 { + // pop expired watchers in case there has been no update + c.bookmarkWatchers.popExpiredWatchers() + continue + } + bookmarkEvent := &watchCacheEvent{ + Type: watch.Bookmark, + Object: c.newFunc(), + ResourceVersion: lastProcessedResourceVersion, + } + if err := c.versioner.UpdateObject(bookmarkEvent.Object, bookmarkEvent.ResourceVersion); err != nil { + klog.Errorf("failure to set resourceVersion to %d on bookmark event %+v", bookmarkEvent.ResourceVersion, bookmarkEvent.Object) + continue + } + c.dispatchEvent(bookmarkEvent) + case <-c.stopCh: + return + } + } +} + +func setCachingObjects(event *watchCacheEvent, versioner storage.Versioner) { + switch event.Type { + case watch.Added, watch.Modified: + if object, err := newCachingObject(event.Object); err == nil { + event.Object = object + } else { + klog.Errorf("couldn't create cachingObject from: %#v", event.Object) + } + // Don't wrap PrevObject for update event (for create events it is nil). + // We only encode those to deliver DELETE watch events, so if + // event.Object is not nil it can be used only for watchers for which + // selector was satisfied for its previous version and is no longer + // satisfied for the current version. + // This is rare enough that it doesn't justify making deep-copy of the + // object (done by newCachingObject) every time. + case watch.Deleted: + // Don't wrap Object for delete events - these are not to deliver any + // events. Only wrap PrevObject. + if object, err := newCachingObject(event.PrevObject); err == nil { + // Update resource version of the underlying object. + // event.PrevObject is used to deliver DELETE watch events and + // for them, we set resourceVersion to instead of + // the resourceVersion of the last modification of the object. + updateResourceVersionIfNeeded(object.object, versioner, event.ResourceVersion) + event.PrevObject = object + } else { + klog.Errorf("couldn't create cachingObject from: %#v", event.Object) + } + } +} + +func (c *Cacher) dispatchEvent(event *watchCacheEvent) { + c.startDispatching(event) + defer c.finishDispatching() + // Watchers stopped after startDispatching will be delayed to finishDispatching, + + // Since add() can block, we explicitly add when cacher is unlocked. + // Dispatching event in nonblocking way first, which make faster watchers + // not be blocked by slower ones. + if event.Type == watch.Bookmark { + for _, watcher := range c.watchersBuffer { + watcher.nonblockingAdd(event) + } + } else { + // Set up caching of object serializations only for dispatching this event. + // + // Storing serializations in memory would result in increased memory usage, + // but it would help for caching encodings for watches started from old + // versions. However, we still don't have a convincing data that the gain + // from it justifies increased memory usage, so for now we drop the cached + // serializations after dispatching this event. + // + // Given the deep-copies that are done to create cachingObjects, + // we try to cache serializations only if there are at least 3 watchers. + if len(c.watchersBuffer) >= 3 { + // Make a shallow copy to allow overwriting Object and PrevObject. + wcEvent := *event + setCachingObjects(&wcEvent, c.versioner) + event = &wcEvent + } + + c.blockedWatchers = c.blockedWatchers[:0] + for _, watcher := range c.watchersBuffer { + if !watcher.nonblockingAdd(event) { + c.blockedWatchers = append(c.blockedWatchers, watcher) + } + } + + if len(c.blockedWatchers) > 0 { + // dispatchEvent is called very often, so arrange + // to reuse timers instead of constantly allocating. + startTime := time.Now() + timeout := c.dispatchTimeoutBudget.takeAvailable() + c.timer.Reset(timeout) + + // Make sure every watcher will try to send event without blocking first, + // even if the timer has already expired. + timer := c.timer + for _, watcher := range c.blockedWatchers { + if !watcher.add(event, timer) { + // fired, clean the timer by set it to nil. + timer = nil + } + } + + // Stop the timer if it is not fired + if timer != nil && !timer.Stop() { + // Consume triggered (but not yet received) timer event + // so that future reuse does not get a spurious timeout. + <-timer.C + } + + c.dispatchTimeoutBudget.returnUnused(timeout - time.Since(startTime)) + } + } +} + +func (c *Cacher) startDispatchingBookmarkEvents() { + // Pop already expired watchers. However, explicitly ignore stopped ones, + // as we don't delete watcher from bookmarkWatchers when it is stopped. + for _, watchers := range c.bookmarkWatchers.popExpiredWatchers() { + for _, watcher := range watchers { + // c.Lock() is held here. + // watcher.stopThreadUnsafe() is protected by c.Lock() + if watcher.stopped { + continue + } + c.watchersBuffer = append(c.watchersBuffer, watcher) + // Requeue the watcher for the next bookmark if needed. + c.bookmarkWatchers.addWatcher(watcher) + } + } +} + +// startDispatching chooses watchers potentially interested in a given event +// a marks dispatching as true. +func (c *Cacher) startDispatching(event *watchCacheEvent) { + // It is safe to call triggerValuesThreadUnsafe here, because at this + // point only this thread can access this event (we create a separate + // watchCacheEvent for every dispatch). + triggerValues, supported := c.triggerValuesThreadUnsafe(event) + + c.Lock() + defer c.Unlock() + + c.dispatching = true + // We are reusing the slice to avoid memory reallocations in every + // dispatchEvent() call. That may prevent Go GC from freeing items + // from previous phases that are sitting behind the current length + // of the slice, but there is only a limited number of those and the + // gain from avoiding memory allocations is much bigger. + c.watchersBuffer = c.watchersBuffer[:0] + + if event.Type == watch.Bookmark { + c.startDispatchingBookmarkEvents() + // return here to reduce following code indentation and diff + return + } + + // Iterate over "allWatchers" no matter what the trigger function is. + for _, watcher := range c.watchers.allWatchers { + c.watchersBuffer = append(c.watchersBuffer, watcher) + } + if supported { + // Iterate over watchers interested in the given values of the trigger. + for _, triggerValue := range triggerValues { + for _, watcher := range c.watchers.valueWatchers[triggerValue] { + c.watchersBuffer = append(c.watchersBuffer, watcher) + } + } + } else { + // supported equal to false generally means that trigger function + // is not defined (or not aware of any indexes). In this case, + // watchers filters should generally also don't generate any + // trigger values, but can cause problems in case of some + // misconfiguration. Thus we paranoidly leave this branch. + + // Iterate over watchers interested in exact values for all values. + for _, watchers := range c.watchers.valueWatchers { + for _, watcher := range watchers { + c.watchersBuffer = append(c.watchersBuffer, watcher) + } + } + } +} + +// finishDispatching stops all the watchers that were supposed to be +// stopped in the meantime, but it was deferred to avoid closing input +// channels of watchers, as add() may still have writing to it. +// It also marks dispatching as false. +func (c *Cacher) finishDispatching() { + c.Lock() + defer c.Unlock() + c.dispatching = false + for _, watcher := range c.watchersToStop { + watcher.stopThreadUnsafe() + } + c.watchersToStop = c.watchersToStop[:0] +} + +func (c *Cacher) terminateAllWatchers() { + c.Lock() + defer c.Unlock() + c.watchers.terminateAll(c.objectType, c.stopWatcherThreadUnsafe) +} + +func (c *Cacher) stopWatcherThreadUnsafe(watcher *cacheWatcher) { + if c.dispatching { + c.watchersToStop = append(c.watchersToStop, watcher) + } else { + watcher.stopThreadUnsafe() + } +} + +func (c *Cacher) isStopped() bool { + c.stopLock.RLock() + defer c.stopLock.RUnlock() + return c.stopped +} + +// Stop implements the graceful termination. +func (c *Cacher) Stop() { + c.stopLock.Lock() + if c.stopped { + // avoid stopping twice (note: cachers are shared with subresources) + c.stopLock.Unlock() + return + } + c.stopped = true + c.stopLock.Unlock() + close(c.stopCh) + c.stopWg.Wait() +} + +func forgetWatcher(c *Cacher, index int, triggerValue string, triggerSupported bool) func() { + return func() { + c.Lock() + defer c.Unlock() + + // It's possible that the watcher is already not in the structure (e.g. in case of + // simultaneous Stop() and terminateAllWatchers(), but it is safe to call stopThreadUnsafe() + // on a watcher multiple times. + c.watchers.deleteWatcher(index, triggerValue, triggerSupported, c.stopWatcherThreadUnsafe) + } +} + +func filterWithAttrsFunction(key string, p storage.SelectionPredicate) filterWithAttrsFunc { + filterFunc := func(objKey string, label labels.Set, field fields.Set) bool { + if !hasPathPrefix(objKey, key) { + return false + } + return p.MatchesObjectAttributes(label, field) + } + return filterFunc +} + +// LastSyncResourceVersion returns resource version to which the underlying cache is synced. +func (c *Cacher) LastSyncResourceVersion() (uint64, error) { + c.ready.wait() + + resourceVersion := c.reflector.LastSyncResourceVersion() + return c.versioner.ParseResourceVersion(resourceVersion) +} + +// cacherListerWatcher opaques storage.Interface to expose cache.ListerWatcher. +type cacherListerWatcher struct { + storage storage.Interface + resourcePrefix string + newListFunc func() runtime.Object +} + +// NewCacherListerWatcher returns a storage.Interface backed ListerWatcher. +func NewCacherListerWatcher(storage storage.Interface, resourcePrefix string, newListFunc func() runtime.Object) cache.ListerWatcher { + return &cacherListerWatcher{ + storage: storage, + resourcePrefix: resourcePrefix, + newListFunc: newListFunc, + } +} + +// Implements cache.ListerWatcher interface. +func (lw *cacherListerWatcher) List(options metav1.ListOptions) (runtime.Object, error) { + list := lw.newListFunc() + pred := storage.SelectionPredicate{ + Label: labels.Everything(), + Field: fields.Everything(), + Limit: options.Limit, + Continue: options.Continue, + } + + if err := lw.storage.List(context.TODO(), lw.resourcePrefix, storage.ListOptions{ResourceVersionMatch: options.ResourceVersionMatch, Predicate: pred}, list); err != nil { + return nil, err + } + return list, nil +} + +// Implements cache.ListerWatcher interface. +func (lw *cacherListerWatcher) Watch(options metav1.ListOptions) (watch.Interface, error) { + opts := storage.ListOptions{ + ResourceVersion: options.ResourceVersion, + Predicate: storage.Everything, + } + if utilfeature.DefaultFeatureGate.Enabled(features.EfficientWatchResumption) { + opts.ProgressNotify = true + } + return lw.storage.WatchList(context.TODO(), lw.resourcePrefix, opts) +} + +// errWatcher implements watch.Interface to return a single error +type errWatcher struct { + result chan watch.Event +} + +func newErrWatcher(err error) *errWatcher { + // Create an error event + errEvent := watch.Event{Type: watch.Error} + switch err := err.(type) { + case runtime.Object: + errEvent.Object = err + case *errors.StatusError: + errEvent.Object = &err.ErrStatus + default: + errEvent.Object = &metav1.Status{ + Status: metav1.StatusFailure, + Message: err.Error(), + Reason: metav1.StatusReasonInternalError, + Code: http.StatusInternalServerError, + } + } + + // Create a watcher with room for a single event, populate it, and close the channel + watcher := &errWatcher{result: make(chan watch.Event, 1)} + watcher.result <- errEvent + close(watcher.result) + + return watcher +} + +// Implements watch.Interface. +func (c *errWatcher) ResultChan() <-chan watch.Event { + return c.result +} + +// Implements watch.Interface. +func (c *errWatcher) Stop() { + // no-op +} + +// cacheWatcher implements watch.Interface +// this is not thread-safe +type cacheWatcher struct { + input chan *watchCacheEvent + result chan watch.Event + done chan struct{} + filter filterWithAttrsFunc + stopped bool + forget func() + versioner storage.Versioner + // The watcher will be closed by server after the deadline, + // save it here to send bookmark events before that. + deadline time.Time + allowWatchBookmarks bool + // Object type of the cache watcher interests + objectType reflect.Type +} + +func newCacheWatcher(chanSize int, filter filterWithAttrsFunc, forget func(), versioner storage.Versioner, deadline time.Time, allowWatchBookmarks bool, objectType reflect.Type) *cacheWatcher { + return &cacheWatcher{ + input: make(chan *watchCacheEvent, chanSize), + result: make(chan watch.Event, chanSize), + done: make(chan struct{}), + filter: filter, + stopped: false, + forget: forget, + versioner: versioner, + deadline: deadline, + allowWatchBookmarks: allowWatchBookmarks, + objectType: objectType, + } +} + +// Implements watch.Interface. +func (c *cacheWatcher) ResultChan() <-chan watch.Event { + return c.result +} + +// Implements watch.Interface. +func (c *cacheWatcher) Stop() { + c.forget() +} + +// we rely on the fact that stopThredUnsafe is actually protected by Cacher.Lock() +func (c *cacheWatcher) stopThreadUnsafe() { + if !c.stopped { + c.stopped = true + close(c.done) + close(c.input) + } +} + +func (c *cacheWatcher) nonblockingAdd(event *watchCacheEvent) bool { + select { + case c.input <- event: + return true + default: + return false + } +} + +// Nil timer means that add will not block (if it can't send event immediately, it will break the watcher) +func (c *cacheWatcher) add(event *watchCacheEvent, timer *time.Timer) bool { + // Try to send the event immediately, without blocking. + if c.nonblockingAdd(event) { + return true + } + + closeFunc := func() { + // This means that we couldn't send event to that watcher. + // Since we don't want to block on it infinitely, + // we simply terminate it. + klog.V(1).Infof("Forcing watcher close due to unresponsiveness: %v", c.objectType.String()) + c.forget() + } + + if timer == nil { + closeFunc() + return false + } + + // OK, block sending, but only until timer fires. + select { + case c.input <- event: + return true + case <-timer.C: + closeFunc() + return false + } +} + +func (c *cacheWatcher) nextBookmarkTime(now time.Time, bookmarkFrequency time.Duration) (time.Time, bool) { + // We try to send bookmarks: + // (a) roughly every minute + // (b) right before the watcher timeout - for now we simply set it 2s before + // the deadline + // The former gives us periodicity if the watch breaks due to unexpected + // conditions, the later ensures that on timeout the watcher is as close to + // now as possible - this covers 99% of cases. + heartbeatTime := now.Add(bookmarkFrequency) + if c.deadline.IsZero() { + // Timeout is set by our client libraries (e.g. reflector) as well as defaulted by + // apiserver if properly configured. So this shoudln't happen in practice. + return heartbeatTime, true + } + if pretimeoutTime := c.deadline.Add(-2 * time.Second); pretimeoutTime.Before(heartbeatTime) { + heartbeatTime = pretimeoutTime + } + + if heartbeatTime.Before(now) { + return time.Time{}, false + } + return heartbeatTime, true +} + +func getEventObject(object runtime.Object) runtime.Object { + if _, ok := object.(runtime.CacheableObject); ok { + // It is safe to return without deep-copy, because the underlying + // object was already deep-copied during construction. + return object + } + return object.DeepCopyObject() +} + +func updateResourceVersionIfNeeded(object runtime.Object, versioner storage.Versioner, resourceVersion uint64) { + if _, ok := object.(*cachingObject); ok { + // We assume that for cachingObject resourceVersion was already propagated before. + return + } + if err := versioner.UpdateObject(object, resourceVersion); err != nil { + utilruntime.HandleError(fmt.Errorf("failure to version api object (%d) %#v: %v", resourceVersion, object, err)) + } +} + +func (c *cacheWatcher) convertToWatchEvent(event *watchCacheEvent) *watch.Event { + if event.Type == watch.Bookmark { + return &watch.Event{Type: watch.Bookmark, Object: event.Object.DeepCopyObject()} + } + + curObjPasses := event.Type != watch.Deleted && c.filter(event.Key, event.ObjLabels, event.ObjFields) + oldObjPasses := false + if event.PrevObject != nil { + oldObjPasses = c.filter(event.Key, event.PrevObjLabels, event.PrevObjFields) + } + if !curObjPasses && !oldObjPasses { + // Watcher is not interested in that object. + return nil + } + + switch { + case curObjPasses && !oldObjPasses: + return &watch.Event{Type: watch.Added, Object: getEventObject(event.Object)} + case curObjPasses && oldObjPasses: + return &watch.Event{Type: watch.Modified, Object: getEventObject(event.Object)} + case !curObjPasses && oldObjPasses: + // return a delete event with the previous object content, but with the event's resource version + oldObj := getEventObject(event.PrevObject) + updateResourceVersionIfNeeded(oldObj, c.versioner, event.ResourceVersion) + return &watch.Event{Type: watch.Deleted, Object: oldObj} + } + + return nil +} + +// NOTE: sendWatchCacheEvent is assumed to not modify !!! +func (c *cacheWatcher) sendWatchCacheEvent(event *watchCacheEvent) { + watchEvent := c.convertToWatchEvent(event) + if watchEvent == nil { + // Watcher is not interested in that object. + return + } + + // We need to ensure that if we put event X to the c.result, all + // previous events were already put into it before, no matter whether + // c.done is close or not. + // Thus we cannot simply select from c.done and c.result and this + // would give us non-determinism. + // At the same time, we don't want to block infinitely on putting + // to c.result, when c.done is already closed. + + // This ensures that with c.done already close, we at most once go + // into the next select after this. With that, no matter which + // statement we choose there, we will deliver only consecutive + // events. + select { + case <-c.done: + return + default: + } + + select { + case c.result <- *watchEvent: + case <-c.done: + } +} + +func (c *cacheWatcher) process(ctx context.Context, initEvents []*watchCacheEvent, resourceVersion uint64) { + defer utilruntime.HandleCrash() + + // Check how long we are processing initEvents. + // As long as these are not processed, we are not processing + // any incoming events, so if it takes long, we may actually + // block all watchers for some time. + // TODO: From the logs it seems that there happens processing + // times even up to 1s which is very long. However, this doesn't + // depend that much on the number of initEvents. E.g. from the + // 2000-node Kubemark run we have logs like this, e.g.: + // ... processing 13862 initEvents took 66.808689ms + // ... processing 14040 initEvents took 993.532539ms + // We should understand what is blocking us in those cases (e.g. + // is it lack of CPU, network, or sth else) and potentially + // consider increase size of result buffer in those cases. + const initProcessThreshold = 500 * time.Millisecond + startTime := time.Now() + for _, event := range initEvents { + c.sendWatchCacheEvent(event) + } + objType := c.objectType.String() + if len(initEvents) > 0 { + initCounter.WithLabelValues(objType).Add(float64(len(initEvents))) + } + processingTime := time.Since(startTime) + if processingTime > initProcessThreshold { + klog.V(2).Infof("processing %d initEvents of %s took %v", len(initEvents), objType, processingTime) + } + + defer close(c.result) + defer c.Stop() + for { + select { + case event, ok := <-c.input: + if !ok { + return + } + // only send events newer than resourceVersion + if event.ResourceVersion > resourceVersion { + c.sendWatchCacheEvent(event) + } + case <-ctx.Done(): + return + } + } +} + +type ready struct { + ok bool + c *sync.Cond +} + +func newReady() *ready { + return &ready{c: sync.NewCond(&sync.RWMutex{})} +} + +func (r *ready) wait() { + r.c.L.Lock() + for !r.ok { + r.c.Wait() + } + r.c.L.Unlock() +} + +// TODO: Make check() function more sophisticated, in particular +// allow it to behave as "waitWithTimeout". +func (r *ready) check() bool { + rwMutex := r.c.L.(*sync.RWMutex) + rwMutex.RLock() + defer rwMutex.RUnlock() + return r.ok +} + +func (r *ready) set(ok bool) { + r.c.L.Lock() + defer r.c.L.Unlock() + r.ok = ok + r.c.Broadcast() +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/caching_object.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/caching_object.go new file mode 100644 index 000000000..752a28714 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/caching_object.go @@ -0,0 +1,397 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cacher + +import ( + "bytes" + "fmt" + "io" + "reflect" + "runtime/debug" + "sync" + "sync/atomic" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" +) + +var _ runtime.CacheableObject = &cachingObject{} + +// metaRuntimeInterface implements runtime.Object and +// metav1.Object interfaces. +type metaRuntimeInterface interface { + runtime.Object + metav1.Object +} + +// serializationResult captures a result of serialization. +type serializationResult struct { + // once should be used to ensure serialization is computed once. + once sync.Once + + // raw is serialized object. + raw []byte + // err is error from serialization. + err error +} + +// serializationsCache is a type for caching serialization results. +type serializationsCache map[runtime.Identifier]*serializationResult + +// cachingObject is an object that is able to cache its serializations +// so that each of those is computed exactly once. +// +// cachingObject implements the metav1.Object interface (accessors for +// all metadata fields). However, setters for all fields except from +// SelfLink (which is set lately in the path) are ignored. +type cachingObject struct { + lock sync.RWMutex + + // Object for which serializations are cached. + object metaRuntimeInterface + + // serializations is a cache containing object`s serializations. + // The value stored in atomic.Value is of type serializationsCache. + // The atomic.Value type is used to allow fast-path. + serializations atomic.Value +} + +// newCachingObject performs a deep copy of the given object and wraps it +// into a cachingObject. +// An error is returned if it's not possible to cast the object to +// metav1.Object type. +func newCachingObject(object runtime.Object) (*cachingObject, error) { + if obj, ok := object.(metaRuntimeInterface); ok { + result := &cachingObject{object: obj.DeepCopyObject().(metaRuntimeInterface)} + result.serializations.Store(make(serializationsCache)) + return result, nil + } + return nil, fmt.Errorf("can't cast object to metav1.Object: %#v", object) +} + +func (o *cachingObject) getSerializationResult(id runtime.Identifier) *serializationResult { + // Fast-path for getting from cache. + serializations := o.serializations.Load().(serializationsCache) + if result, exists := serializations[id]; exists { + return result + } + + // Slow-path (that may require insert). + o.lock.Lock() + defer o.lock.Unlock() + + serializations = o.serializations.Load().(serializationsCache) + // Check if in the meantime it wasn't inserted. + if result, exists := serializations[id]; exists { + return result + } + + // Insert an entry for . This requires copy of existing map. + newSerializations := make(serializationsCache) + for k, v := range serializations { + newSerializations[k] = v + } + result := &serializationResult{} + newSerializations[id] = result + o.serializations.Store(newSerializations) + return result +} + +// CacheEncode implements runtime.CacheableObject interface. +// It serializes the object and writes the result to given io.Writer trying +// to first use the already cached result and falls back to a given encode +// function in case of cache miss. +// It assumes that for a given identifier, the encode function always encodes +// each input object into the same output format. +func (o *cachingObject) CacheEncode(id runtime.Identifier, encode func(runtime.Object, io.Writer) error, w io.Writer) error { + result := o.getSerializationResult(id) + result.once.Do(func() { + buffer := bytes.NewBuffer(nil) + result.err = encode(o.GetObject(), buffer) + result.raw = buffer.Bytes() + }) + // Once invoked, fields of serialization will not change. + if result.err != nil { + return result.err + } + _, err := w.Write(result.raw) + return err +} + +// GetObject implements runtime.CacheableObject interface. +// It returns deep-copy of the wrapped object to return ownership of it +// to the called according to the contract of the interface. +func (o *cachingObject) GetObject() runtime.Object { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.DeepCopyObject().(metaRuntimeInterface) +} + +// GetObjectKind implements runtime.Object interface. +func (o *cachingObject) GetObjectKind() schema.ObjectKind { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetObjectKind() +} + +// DeepCopyObject implements runtime.Object interface. +func (o *cachingObject) DeepCopyObject() runtime.Object { + // DeepCopyObject on cachingObject is not expected to be called anywhere. + // However, to be on the safe-side, we implement it, though given the + // cache is only an optimization we ignore copying it. + result := &cachingObject{} + result.serializations.Store(make(serializationsCache)) + + o.lock.RLock() + defer o.lock.RUnlock() + result.object = o.object.DeepCopyObject().(metaRuntimeInterface) + return result +} + +var ( + invalidationCacheTimestampLock sync.Mutex + invalidationCacheTimestamp time.Time +) + +// shouldLogCacheInvalidation allows for logging cache-invalidation +// at most once per second (to avoid spamming logs in case of issues). +func shouldLogCacheInvalidation(now time.Time) bool { + invalidationCacheTimestampLock.Lock() + defer invalidationCacheTimestampLock.Unlock() + if invalidationCacheTimestamp.Add(time.Second).Before(now) { + invalidationCacheTimestamp = now + return true + } + return false +} + +func (o *cachingObject) invalidateCacheLocked() { + if cache, ok := o.serializations.Load().(serializationsCache); ok && len(cache) == 0 { + return + } + // We don't expect cache invalidation to happen - so we want + // to log the stacktrace to allow debugging if that will happen. + // OTOH, we don't want to spam logs with it. + // So we try to log it at most once per second. + if shouldLogCacheInvalidation(time.Now()) { + klog.Warningf("Unexpected cache invalidation for %#v\n%s", o.object, string(debug.Stack())) + } + o.serializations.Store(make(serializationsCache)) +} + +// The following functions implement metav1.Object interface: +// - getters simply delegate for the underlying object +// - setters check if operations isn't noop and if so, +// invalidate the cache and delegate for the underlying object + +func (o *cachingObject) conditionalSet(isNoop func() bool, set func()) { + if fastPath := func() bool { + o.lock.RLock() + defer o.lock.RUnlock() + return isNoop() + }(); fastPath { + return + } + o.lock.Lock() + defer o.lock.Unlock() + if isNoop() { + return + } + o.invalidateCacheLocked() + set() +} + +func (o *cachingObject) GetNamespace() string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetNamespace() +} +func (o *cachingObject) SetNamespace(namespace string) { + o.conditionalSet( + func() bool { return o.object.GetNamespace() == namespace }, + func() { o.object.SetNamespace(namespace) }, + ) +} +func (o *cachingObject) GetName() string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetName() +} +func (o *cachingObject) SetName(name string) { + o.conditionalSet( + func() bool { return o.object.GetName() == name }, + func() { o.object.SetName(name) }, + ) +} +func (o *cachingObject) GetGenerateName() string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetGenerateName() +} +func (o *cachingObject) SetGenerateName(name string) { + o.conditionalSet( + func() bool { return o.object.GetGenerateName() == name }, + func() { o.object.SetGenerateName(name) }, + ) +} +func (o *cachingObject) GetUID() types.UID { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetUID() +} +func (o *cachingObject) SetUID(uid types.UID) { + o.conditionalSet( + func() bool { return o.object.GetUID() == uid }, + func() { o.object.SetUID(uid) }, + ) +} +func (o *cachingObject) GetResourceVersion() string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetResourceVersion() +} +func (o *cachingObject) SetResourceVersion(version string) { + o.conditionalSet( + func() bool { return o.object.GetResourceVersion() == version }, + func() { o.object.SetResourceVersion(version) }, + ) +} +func (o *cachingObject) GetGeneration() int64 { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetGeneration() +} +func (o *cachingObject) SetGeneration(generation int64) { + o.conditionalSet( + func() bool { return o.object.GetGeneration() == generation }, + func() { o.object.SetGeneration(generation) }, + ) +} +func (o *cachingObject) GetSelfLink() string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetSelfLink() +} +func (o *cachingObject) SetSelfLink(selfLink string) { + o.conditionalSet( + func() bool { return o.object.GetSelfLink() == selfLink }, + func() { o.object.SetSelfLink(selfLink) }, + ) +} +func (o *cachingObject) GetCreationTimestamp() metav1.Time { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetCreationTimestamp() +} +func (o *cachingObject) SetCreationTimestamp(timestamp metav1.Time) { + o.conditionalSet( + func() bool { return o.object.GetCreationTimestamp() == timestamp }, + func() { o.object.SetCreationTimestamp(timestamp) }, + ) +} +func (o *cachingObject) GetDeletionTimestamp() *metav1.Time { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetDeletionTimestamp() +} +func (o *cachingObject) SetDeletionTimestamp(timestamp *metav1.Time) { + o.conditionalSet( + func() bool { return o.object.GetDeletionTimestamp() == timestamp }, + func() { o.object.SetDeletionTimestamp(timestamp) }, + ) +} +func (o *cachingObject) GetDeletionGracePeriodSeconds() *int64 { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetDeletionGracePeriodSeconds() +} +func (o *cachingObject) SetDeletionGracePeriodSeconds(gracePeriodSeconds *int64) { + o.conditionalSet( + func() bool { return o.object.GetDeletionGracePeriodSeconds() == gracePeriodSeconds }, + func() { o.object.SetDeletionGracePeriodSeconds(gracePeriodSeconds) }, + ) +} +func (o *cachingObject) GetLabels() map[string]string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetLabels() +} +func (o *cachingObject) SetLabels(labels map[string]string) { + o.conditionalSet( + func() bool { return reflect.DeepEqual(o.object.GetLabels(), labels) }, + func() { o.object.SetLabels(labels) }, + ) +} +func (o *cachingObject) GetAnnotations() map[string]string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetAnnotations() +} +func (o *cachingObject) SetAnnotations(annotations map[string]string) { + o.conditionalSet( + func() bool { return reflect.DeepEqual(o.object.GetAnnotations(), annotations) }, + func() { o.object.SetAnnotations(annotations) }, + ) +} +func (o *cachingObject) GetFinalizers() []string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetFinalizers() +} +func (o *cachingObject) SetFinalizers(finalizers []string) { + o.conditionalSet( + func() bool { return reflect.DeepEqual(o.object.GetFinalizers(), finalizers) }, + func() { o.object.SetFinalizers(finalizers) }, + ) +} +func (o *cachingObject) GetOwnerReferences() []metav1.OwnerReference { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetOwnerReferences() +} +func (o *cachingObject) SetOwnerReferences(references []metav1.OwnerReference) { + o.conditionalSet( + func() bool { return reflect.DeepEqual(o.object.GetOwnerReferences(), references) }, + func() { o.object.SetOwnerReferences(references) }, + ) +} +func (o *cachingObject) GetClusterName() string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetClusterName() +} +func (o *cachingObject) SetClusterName(clusterName string) { + o.conditionalSet( + func() bool { return o.object.GetClusterName() == clusterName }, + func() { o.object.SetClusterName(clusterName) }, + ) +} +func (o *cachingObject) GetManagedFields() []metav1.ManagedFieldsEntry { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetManagedFields() +} +func (o *cachingObject) SetManagedFields(managedFields []metav1.ManagedFieldsEntry) { + o.conditionalSet( + func() bool { return reflect.DeepEqual(o.object.GetManagedFields(), managedFields) }, + func() { o.object.SetManagedFields(managedFields) }, + ) +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/metrics.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/metrics.go new file mode 100644 index 000000000..19cd5da6a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/metrics.go @@ -0,0 +1,74 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cacher + +import ( + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +/* + * By default, all the following metrics are defined as falling under + * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes) + * + * Promoting the stability level of the metric is a responsibility of the component owner, since it + * involves explicitly acknowledging support for the metric across multiple releases, in accordance with + * the metric stability policy. + */ +var ( + initCounter = metrics.NewCounterVec( + &metrics.CounterOpts{ + Name: "apiserver_init_events_total", + Help: "Counter of init events processed in watchcache broken by resource type.", + StabilityLevel: metrics.ALPHA, + }, + []string{"resource"}, + ) + + watchCacheCapacityIncreaseTotal = metrics.NewCounterVec( + &metrics.CounterOpts{ + Name: "watch_cache_capacity_increase_total", + Help: "Total number of watch cache capacity increase events broken by resource type.", + StabilityLevel: metrics.ALPHA, + }, + []string{"resource"}, + ) + + watchCacheCapacityDecreaseTotal = metrics.NewCounterVec( + &metrics.CounterOpts{ + Name: "watch_cache_capacity_decrease_total", + Help: "Total number of watch cache capacity decrease events broken by resource type.", + StabilityLevel: metrics.ALPHA, + }, + []string{"resource"}, + ) +) + +func init() { + legacyregistry.MustRegister(initCounter) + legacyregistry.MustRegister(watchCacheCapacityIncreaseTotal) + legacyregistry.MustRegister(watchCacheCapacityDecreaseTotal) +} + +// recordsWatchCacheCapacityChange record watchCache capacity resize(increase or decrease) operations. +func recordsWatchCacheCapacityChange(objType string, old, new int) { + if old < new { + watchCacheCapacityIncreaseTotal.WithLabelValues(objType).Inc() + return + } + watchCacheCapacityDecreaseTotal.WithLabelValues(objType).Inc() +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/time_budget.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/time_budget.go new file mode 100644 index 000000000..2eb0fed32 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/time_budget.go @@ -0,0 +1,100 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cacher + +import ( + "sync" + "time" +) + +const ( + refreshPerSecond = 50 * time.Millisecond + maxBudget = 100 * time.Millisecond +) + +// timeBudget implements a budget of time that you can use and is +// periodically being refreshed. The pattern to use it is: +// budget := newTimeBudget(...) +// ... +// timeout := budget.takeAvailable() +// // Now you can spend at most timeout on doing stuff +// ... +// // If you didn't use all timeout, return what you didn't use +// budget.returnUnused() +// +// NOTE: It's not recommended to be used concurrently from multiple threads - +// if first user takes the whole timeout, the second one will get 0 timeout +// even though the first one may return something later. +type timeBudget interface { + takeAvailable() time.Duration + returnUnused(unused time.Duration) +} + +type timeBudgetImpl struct { + sync.Mutex + budget time.Duration + + refresh time.Duration + maxBudget time.Duration +} + +func newTimeBudget(stopCh <-chan struct{}) timeBudget { + result := &timeBudgetImpl{ + budget: time.Duration(0), + refresh: refreshPerSecond, + maxBudget: maxBudget, + } + go result.periodicallyRefresh(stopCh) + return result +} + +func (t *timeBudgetImpl) periodicallyRefresh(stopCh <-chan struct{}) { + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + for { + select { + case <-ticker.C: + t.Lock() + if t.budget = t.budget + t.refresh; t.budget > t.maxBudget { + t.budget = t.maxBudget + } + t.Unlock() + case <-stopCh: + return + } + } +} + +func (t *timeBudgetImpl) takeAvailable() time.Duration { + t.Lock() + defer t.Unlock() + result := t.budget + t.budget = time.Duration(0) + return result +} + +func (t *timeBudgetImpl) returnUnused(unused time.Duration) { + t.Lock() + defer t.Unlock() + if unused < 0 { + // We used more than allowed. + return + } + if t.budget = t.budget + unused; t.budget > t.maxBudget { + t.budget = t.maxBudget + } +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/util.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/util.go new file mode 100644 index 000000000..7943a93dc --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/util.go @@ -0,0 +1,60 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cacher + +import ( + "strings" +) + +// hasPathPrefix returns true if the string matches pathPrefix exactly, or if is prefixed with pathPrefix at a path segment boundary +func hasPathPrefix(s, pathPrefix string) bool { + // Short circuit if s doesn't contain the prefix at all + if !strings.HasPrefix(s, pathPrefix) { + return false + } + + pathPrefixLength := len(pathPrefix) + + if len(s) == pathPrefixLength { + // Exact match + return true + } + if strings.HasSuffix(pathPrefix, "/") { + // pathPrefix already ensured a path segment boundary + return true + } + if s[pathPrefixLength:pathPrefixLength+1] == "/" { + // The next character in s is a path segment boundary + // Check this instead of normalizing pathPrefix to avoid allocating on every call + return true + } + return false +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go new file mode 100644 index 000000000..dafcb3996 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go @@ -0,0 +1,631 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cacher + +import ( + "fmt" + "reflect" + "sort" + "sync" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/apiserver/pkg/storage" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + utiltrace "k8s.io/utils/trace" +) + +const ( + // blockTimeout determines how long we're willing to block the request + // to wait for a given resource version to be propagated to cache, + // before terminating request and returning Timeout error with retry + // after suggestion. + blockTimeout = 3 * time.Second + + // resourceVersionTooHighRetrySeconds is the seconds before a operation should be retried by the client + // after receiving a 'too high resource version' error. + resourceVersionTooHighRetrySeconds = 1 + + // eventFreshDuration is time duration of events we want to keep. + // We set it to `defaultBookmarkFrequency` plus epsilon to maximize + // chances that last bookmark was sent within kept history, at the + // same time, minimizing the needed memory usage. + eventFreshDuration = 75 * time.Second + + // defaultLowerBoundCapacity is a default value for event cache capacity's lower bound. + // TODO: Figure out, to what value we can decreased it. + defaultLowerBoundCapacity = 100 + + // defaultUpperBoundCapacity should be able to keep eventFreshDuration of history. + defaultUpperBoundCapacity = 100 * 1024 +) + +// watchCacheEvent is a single "watch event" that is send to users of +// watchCache. Additionally to a typical "watch.Event" it contains +// the previous value of the object to enable proper filtering in the +// upper layers. +type watchCacheEvent struct { + Type watch.EventType + Object runtime.Object + ObjLabels labels.Set + ObjFields fields.Set + PrevObject runtime.Object + PrevObjLabels labels.Set + PrevObjFields fields.Set + Key string + ResourceVersion uint64 + RecordTime time.Time +} + +// Computing a key of an object is generally non-trivial (it performs +// e.g. validation underneath). Similarly computing object fields and +// labels. To avoid computing them multiple times (to serve the event +// in different List/Watch requests), in the underlying store we are +// keeping structs (key, object, labels, fields). +type storeElement struct { + Key string + Object runtime.Object + Labels labels.Set + Fields fields.Set +} + +func storeElementKey(obj interface{}) (string, error) { + elem, ok := obj.(*storeElement) + if !ok { + return "", fmt.Errorf("not a storeElement: %v", obj) + } + return elem.Key, nil +} + +func storeElementObject(obj interface{}) (runtime.Object, error) { + elem, ok := obj.(*storeElement) + if !ok { + return nil, fmt.Errorf("not a storeElement: %v", obj) + } + return elem.Object, nil +} + +func storeElementIndexFunc(objIndexFunc cache.IndexFunc) cache.IndexFunc { + return func(obj interface{}) (strings []string, e error) { + seo, err := storeElementObject(obj) + if err != nil { + return nil, err + } + return objIndexFunc(seo) + } +} + +func storeElementIndexers(indexers *cache.Indexers) cache.Indexers { + if indexers == nil { + return cache.Indexers{} + } + ret := cache.Indexers{} + for indexName, indexFunc := range *indexers { + ret[indexName] = storeElementIndexFunc(indexFunc) + } + return ret +} + +// watchCache implements a Store interface. +// However, it depends on the elements implementing runtime.Object interface. +// +// watchCache is a "sliding window" (with a limited capacity) of objects +// observed from a watch. +type watchCache struct { + sync.RWMutex + + // Condition on which lists are waiting for the fresh enough + // resource version. + cond *sync.Cond + + // Maximum size of history window. + capacity int + + // upper bound of capacity since event cache has a dynamic size. + upperBoundCapacity int + + // lower bound of capacity since event cache has a dynamic size. + lowerBoundCapacity int + + // keyFunc is used to get a key in the underlying storage for a given object. + keyFunc func(runtime.Object) (string, error) + + // getAttrsFunc is used to get labels and fields of an object. + getAttrsFunc func(runtime.Object) (labels.Set, fields.Set, error) + + // cache is used a cyclic buffer - its first element (with the smallest + // resourceVersion) is defined by startIndex, its last element is defined + // by endIndex (if cache is full it will be startIndex + capacity). + // Both startIndex and endIndex can be greater than buffer capacity - + // you should always apply modulo capacity to get an index in cache array. + cache []*watchCacheEvent + startIndex int + endIndex int + + // store will effectively support LIST operation from the "end of cache + // history" i.e. from the moment just after the newest cached watched event. + // It is necessary to effectively allow clients to start watching at now. + // NOTE: We assume that is thread-safe. + store cache.Indexer + + // ResourceVersion up to which the watchCache is propagated. + resourceVersion uint64 + + // ResourceVersion of the last list result (populated via Replace() method). + listResourceVersion uint64 + + // This handler is run at the end of every successful Replace() method. + onReplace func() + + // This handler is run at the end of every Add/Update/Delete method + // and additionally gets the previous value of the object. + eventHandler func(*watchCacheEvent) + + // for testing timeouts. + clock clock.Clock + + // An underlying storage.Versioner. + versioner storage.Versioner + + // cacher's objectType. + objectType reflect.Type +} + +func newWatchCache( + keyFunc func(runtime.Object) (string, error), + eventHandler func(*watchCacheEvent), + getAttrsFunc func(runtime.Object) (labels.Set, fields.Set, error), + versioner storage.Versioner, + indexers *cache.Indexers, + clock clock.Clock, + objectType reflect.Type) *watchCache { + wc := &watchCache{ + capacity: defaultLowerBoundCapacity, + keyFunc: keyFunc, + getAttrsFunc: getAttrsFunc, + cache: make([]*watchCacheEvent, defaultLowerBoundCapacity), + lowerBoundCapacity: defaultLowerBoundCapacity, + upperBoundCapacity: defaultUpperBoundCapacity, + startIndex: 0, + endIndex: 0, + store: cache.NewIndexer(storeElementKey, storeElementIndexers(indexers)), + resourceVersion: 0, + listResourceVersion: 0, + eventHandler: eventHandler, + clock: clock, + versioner: versioner, + objectType: objectType, + } + wc.cond = sync.NewCond(wc.RLocker()) + return wc +} + +// Add takes runtime.Object as an argument. +func (w *watchCache) Add(obj interface{}) error { + object, resourceVersion, err := w.objectToVersionedRuntimeObject(obj) + if err != nil { + return err + } + event := watch.Event{Type: watch.Added, Object: object} + + f := func(elem *storeElement) error { return w.store.Add(elem) } + return w.processEvent(event, resourceVersion, f) +} + +// Update takes runtime.Object as an argument. +func (w *watchCache) Update(obj interface{}) error { + object, resourceVersion, err := w.objectToVersionedRuntimeObject(obj) + if err != nil { + return err + } + event := watch.Event{Type: watch.Modified, Object: object} + + f := func(elem *storeElement) error { return w.store.Update(elem) } + return w.processEvent(event, resourceVersion, f) +} + +// Delete takes runtime.Object as an argument. +func (w *watchCache) Delete(obj interface{}) error { + object, resourceVersion, err := w.objectToVersionedRuntimeObject(obj) + if err != nil { + return err + } + event := watch.Event{Type: watch.Deleted, Object: object} + + f := func(elem *storeElement) error { return w.store.Delete(elem) } + return w.processEvent(event, resourceVersion, f) +} + +func (w *watchCache) objectToVersionedRuntimeObject(obj interface{}) (runtime.Object, uint64, error) { + object, ok := obj.(runtime.Object) + if !ok { + return nil, 0, fmt.Errorf("obj does not implement runtime.Object interface: %v", obj) + } + resourceVersion, err := w.versioner.ObjectResourceVersion(object) + if err != nil { + return nil, 0, err + } + return object, resourceVersion, nil +} + +// processEvent is safe as long as there is at most one call to it in flight +// at any point in time. +func (w *watchCache) processEvent(event watch.Event, resourceVersion uint64, updateFunc func(*storeElement) error) error { + key, err := w.keyFunc(event.Object) + if err != nil { + return fmt.Errorf("couldn't compute key: %v", err) + } + elem := &storeElement{Key: key, Object: event.Object} + elem.Labels, elem.Fields, err = w.getAttrsFunc(event.Object) + if err != nil { + return err + } + + wcEvent := &watchCacheEvent{ + Type: event.Type, + Object: elem.Object, + ObjLabels: elem.Labels, + ObjFields: elem.Fields, + Key: key, + ResourceVersion: resourceVersion, + RecordTime: w.clock.Now(), + } + + if err := func() error { + // TODO: We should consider moving this lock below after the watchCacheEvent + // is created. In such situation, the only problematic scenario is Replace( + // happening after getting object from store and before acquiring a lock. + // Maybe introduce another lock for this purpose. + w.Lock() + defer w.Unlock() + + previous, exists, err := w.store.Get(elem) + if err != nil { + return err + } + if exists { + previousElem := previous.(*storeElement) + wcEvent.PrevObject = previousElem.Object + wcEvent.PrevObjLabels = previousElem.Labels + wcEvent.PrevObjFields = previousElem.Fields + } + + w.updateCache(wcEvent) + w.resourceVersion = resourceVersion + defer w.cond.Broadcast() + + return updateFunc(elem) + }(); err != nil { + return err + } + + // Avoid calling event handler under lock. + // This is safe as long as there is at most one call to Add/Update/Delete and + // UpdateResourceVersion in flight at any point in time, which is true now, + // because reflector calls them synchronously from its main thread. + if w.eventHandler != nil { + w.eventHandler(wcEvent) + } + return nil +} + +// Assumes that lock is already held for write. +func (w *watchCache) updateCache(event *watchCacheEvent) { + w.resizeCacheLocked(event.RecordTime) + if w.isCacheFullLocked() { + // Cache is full - remove the oldest element. + w.startIndex++ + } + w.cache[w.endIndex%w.capacity] = event + w.endIndex++ +} + +// resizeCacheLocked resizes the cache if necessary: +// - increases capacity by 2x if cache is full and all cached events occurred within last eventFreshDuration. +// - decreases capacity by 2x when recent quarter of events occurred outside of eventFreshDuration(protect watchCache from flapping). +func (w *watchCache) resizeCacheLocked(eventTime time.Time) { + if w.isCacheFullLocked() && eventTime.Sub(w.cache[w.startIndex%w.capacity].RecordTime) < eventFreshDuration { + capacity := min(w.capacity*2, w.upperBoundCapacity) + if capacity > w.capacity { + w.doCacheResizeLocked(capacity) + } + return + } + if w.isCacheFullLocked() && eventTime.Sub(w.cache[(w.endIndex-w.capacity/4)%w.capacity].RecordTime) > eventFreshDuration { + capacity := max(w.capacity/2, w.lowerBoundCapacity) + if capacity < w.capacity { + w.doCacheResizeLocked(capacity) + } + return + } +} + +// isCacheFullLocked used to judge whether watchCacheEvent is full. +// Assumes that lock is already held for write. +func (w *watchCache) isCacheFullLocked() bool { + return w.endIndex == w.startIndex+w.capacity +} + +// doCacheResizeLocked resize watchCache's event array with different capacity. +// Assumes that lock is already held for write. +func (w *watchCache) doCacheResizeLocked(capacity int) { + newCache := make([]*watchCacheEvent, capacity) + if capacity < w.capacity { + // adjust startIndex if cache capacity shrink. + w.startIndex = w.endIndex - capacity + } + for i := w.startIndex; i < w.endIndex; i++ { + newCache[i%capacity] = w.cache[i%w.capacity] + } + w.cache = newCache + recordsWatchCacheCapacityChange(w.objectType.String(), w.capacity, capacity) + w.capacity = capacity +} + +func (w *watchCache) UpdateResourceVersion(resourceVersion string) { + rv, err := w.versioner.ParseResourceVersion(resourceVersion) + if err != nil { + klog.Errorf("Couldn't parse resourceVersion: %v", err) + return + } + + func() { + w.Lock() + defer w.Unlock() + w.resourceVersion = rv + }() + + // Avoid calling event handler under lock. + // This is safe as long as there is at most one call to Add/Update/Delete and + // UpdateResourceVersion in flight at any point in time, which is true now, + // because reflector calls them synchronously from its main thread. + if w.eventHandler != nil { + wcEvent := &watchCacheEvent{ + Type: watch.Bookmark, + ResourceVersion: rv, + } + w.eventHandler(wcEvent) + } +} + +// List returns list of pointers to objects. +func (w *watchCache) List() []interface{} { + return w.store.List() +} + +// waitUntilFreshAndBlock waits until cache is at least as fresh as given . +// NOTE: This function acquired lock and doesn't release it. +// You HAVE TO explicitly call w.RUnlock() after this function. +func (w *watchCache) waitUntilFreshAndBlock(resourceVersion uint64, trace *utiltrace.Trace) error { + startTime := w.clock.Now() + go func() { + // Wake us up when the time limit has expired. The docs + // promise that time.After (well, NewTimer, which it calls) + // will wait *at least* the duration given. Since this go + // routine starts sometime after we record the start time, and + // it will wake up the loop below sometime after the broadcast, + // we don't need to worry about waking it up before the time + // has expired accidentally. + <-w.clock.After(blockTimeout) + w.cond.Broadcast() + }() + + w.RLock() + if trace != nil { + trace.Step("watchCache locked acquired") + } + for w.resourceVersion < resourceVersion { + if w.clock.Since(startTime) >= blockTimeout { + // Request that the client retry after 'resourceVersionTooHighRetrySeconds' seconds. + return storage.NewTooLargeResourceVersionError(resourceVersion, w.resourceVersion, resourceVersionTooHighRetrySeconds) + } + w.cond.Wait() + } + if trace != nil { + trace.Step("watchCache fresh enough") + } + return nil +} + +// WaitUntilFreshAndList returns list of pointers to objects. +func (w *watchCache) WaitUntilFreshAndList(resourceVersion uint64, matchValues []storage.MatchValue, trace *utiltrace.Trace) ([]interface{}, uint64, error) { + err := w.waitUntilFreshAndBlock(resourceVersion, trace) + defer w.RUnlock() + if err != nil { + return nil, 0, err + } + + // This isn't the place where we do "final filtering" - only some "prefiltering" is happening here. So the only + // requirement here is to NOT miss anything that should be returned. We can return as many non-matching items as we + // want - they will be filtered out later. The fact that we return less things is only further performance improvement. + // TODO: if multiple indexes match, return the one with the fewest items, so as to do as much filtering as possible. + for _, matchValue := range matchValues { + if result, err := w.store.ByIndex(matchValue.IndexName, matchValue.Value); err == nil { + return result, w.resourceVersion, nil + } + } + return w.store.List(), w.resourceVersion, nil +} + +// WaitUntilFreshAndGet returns a pointers to object. +func (w *watchCache) WaitUntilFreshAndGet(resourceVersion uint64, key string, trace *utiltrace.Trace) (interface{}, bool, uint64, error) { + err := w.waitUntilFreshAndBlock(resourceVersion, trace) + defer w.RUnlock() + if err != nil { + return nil, false, 0, err + } + value, exists, err := w.store.GetByKey(key) + return value, exists, w.resourceVersion, err +} + +func (w *watchCache) ListKeys() []string { + return w.store.ListKeys() +} + +// Get takes runtime.Object as a parameter. However, it returns +// pointer to . +func (w *watchCache) Get(obj interface{}) (interface{}, bool, error) { + object, ok := obj.(runtime.Object) + if !ok { + return nil, false, fmt.Errorf("obj does not implement runtime.Object interface: %v", obj) + } + key, err := w.keyFunc(object) + if err != nil { + return nil, false, fmt.Errorf("couldn't compute key: %v", err) + } + + return w.store.Get(&storeElement{Key: key, Object: object}) +} + +// GetByKey returns pointer to . +func (w *watchCache) GetByKey(key string) (interface{}, bool, error) { + return w.store.GetByKey(key) +} + +// Replace takes slice of runtime.Object as a parameter. +func (w *watchCache) Replace(objs []interface{}, resourceVersion string) error { + version, err := w.versioner.ParseResourceVersion(resourceVersion) + if err != nil { + return err + } + + toReplace := make([]interface{}, 0, len(objs)) + for _, obj := range objs { + object, ok := obj.(runtime.Object) + if !ok { + return fmt.Errorf("didn't get runtime.Object for replace: %#v", obj) + } + key, err := w.keyFunc(object) + if err != nil { + return fmt.Errorf("couldn't compute key: %v", err) + } + objLabels, objFields, err := w.getAttrsFunc(object) + if err != nil { + return err + } + toReplace = append(toReplace, &storeElement{ + Key: key, + Object: object, + Labels: objLabels, + Fields: objFields, + }) + } + + w.Lock() + defer w.Unlock() + + w.startIndex = 0 + w.endIndex = 0 + if err := w.store.Replace(toReplace, resourceVersion); err != nil { + return err + } + w.listResourceVersion = version + w.resourceVersion = version + if w.onReplace != nil { + w.onReplace() + } + w.cond.Broadcast() + klog.V(3).Infof("Replace watchCache (rev: %v) ", resourceVersion) + return nil +} + +func (w *watchCache) SetOnReplace(onReplace func()) { + w.Lock() + defer w.Unlock() + w.onReplace = onReplace +} + +func (w *watchCache) GetAllEventsSinceThreadUnsafe(resourceVersion uint64) ([]*watchCacheEvent, error) { + size := w.endIndex - w.startIndex + var oldest uint64 + switch { + case w.listResourceVersion > 0 && w.startIndex == 0: + // If no event was removed from the buffer since last relist, the oldest watch + // event we can deliver is one greater than the resource version of the list. + oldest = w.listResourceVersion + 1 + case size > 0: + // If the previous condition is not satisfied: either some event was already + // removed from the buffer or we've never completed a list (the latter can + // only happen in unit tests that populate the buffer without performing + // list/replace operations), the oldest watch event we can deliver is the first + // one in the buffer. + oldest = w.cache[w.startIndex%w.capacity].ResourceVersion + default: + return nil, fmt.Errorf("watch cache isn't correctly initialized") + } + + if resourceVersion == 0 { + // resourceVersion = 0 means that we don't require any specific starting point + // and we would like to start watching from ~now. + // However, to keep backward compatibility, we additionally need to return the + // current state and only then start watching from that point. + // + // TODO: In v2 api, we should stop returning the current state - #13969. + allItems := w.store.List() + result := make([]*watchCacheEvent, len(allItems)) + for i, item := range allItems { + elem, ok := item.(*storeElement) + if !ok { + return nil, fmt.Errorf("not a storeElement: %v", elem) + } + objLabels, objFields, err := w.getAttrsFunc(elem.Object) + if err != nil { + return nil, err + } + result[i] = &watchCacheEvent{ + Type: watch.Added, + Object: elem.Object, + ObjLabels: objLabels, + ObjFields: objFields, + Key: elem.Key, + ResourceVersion: w.resourceVersion, + } + } + return result, nil + } + if resourceVersion < oldest-1 { + return nil, errors.NewResourceExpired(fmt.Sprintf("too old resource version: %d (%d)", resourceVersion, oldest-1)) + } + + // Binary search the smallest index at which resourceVersion is greater than the given one. + f := func(i int) bool { + return w.cache[(w.startIndex+i)%w.capacity].ResourceVersion > resourceVersion + } + first := sort.Search(size, f) + result := make([]*watchCacheEvent, size-first) + for i := 0; i < size-first; i++ { + result[i] = w.cache[(w.startIndex+first+i)%w.capacity] + } + return result, nil +} + +func (w *watchCache) GetAllEventsSince(resourceVersion uint64) ([]*watchCacheEvent, error) { + w.RLock() + defer w.RUnlock() + return w.GetAllEventsSinceThreadUnsafe(resourceVersion) +} + +func (w *watchCache) Resync() error { + // Nothing to do + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/doc.go b/vendor/k8s.io/apiserver/pkg/storage/doc.go new file mode 100644 index 000000000..fbdd94468 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Interfaces for database-related operations. +package storage // import "k8s.io/apiserver/pkg/storage" diff --git a/vendor/k8s.io/apiserver/pkg/storage/errors.go b/vendor/k8s.io/apiserver/pkg/storage/errors.go new file mode 100644 index 000000000..9c72d59fb --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/errors.go @@ -0,0 +1,195 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +const ( + ErrCodeKeyNotFound int = iota + 1 + ErrCodeKeyExists + ErrCodeResourceVersionConflicts + ErrCodeInvalidObj + ErrCodeUnreachable +) + +var errCodeToMessage = map[int]string{ + ErrCodeKeyNotFound: "key not found", + ErrCodeKeyExists: "key exists", + ErrCodeResourceVersionConflicts: "resource version conflicts", + ErrCodeInvalidObj: "invalid object", + ErrCodeUnreachable: "server unreachable", +} + +func NewKeyNotFoundError(key string, rv int64) *StorageError { + return &StorageError{ + Code: ErrCodeKeyNotFound, + Key: key, + ResourceVersion: rv, + } +} + +func NewKeyExistsError(key string, rv int64) *StorageError { + return &StorageError{ + Code: ErrCodeKeyExists, + Key: key, + ResourceVersion: rv, + } +} + +func NewResourceVersionConflictsError(key string, rv int64) *StorageError { + return &StorageError{ + Code: ErrCodeResourceVersionConflicts, + Key: key, + ResourceVersion: rv, + } +} + +func NewUnreachableError(key string, rv int64) *StorageError { + return &StorageError{ + Code: ErrCodeUnreachable, + Key: key, + ResourceVersion: rv, + } +} + +func NewInvalidObjError(key, msg string) *StorageError { + return &StorageError{ + Code: ErrCodeInvalidObj, + Key: key, + AdditionalErrorMsg: msg, + } +} + +type StorageError struct { + Code int + Key string + ResourceVersion int64 + AdditionalErrorMsg string +} + +func (e *StorageError) Error() string { + return fmt.Sprintf("StorageError: %s, Code: %d, Key: %s, ResourceVersion: %d, AdditionalErrorMsg: %s", + errCodeToMessage[e.Code], e.Code, e.Key, e.ResourceVersion, e.AdditionalErrorMsg) +} + +// IsNotFound returns true if and only if err is "key" not found error. +func IsNotFound(err error) bool { + return isErrCode(err, ErrCodeKeyNotFound) +} + +// IsNodeExist returns true if and only if err is an node already exist error. +func IsNodeExist(err error) bool { + return isErrCode(err, ErrCodeKeyExists) +} + +// IsUnreachable returns true if and only if err indicates the server could not be reached. +func IsUnreachable(err error) bool { + return isErrCode(err, ErrCodeUnreachable) +} + +// IsConflict returns true if and only if err is a write conflict. +func IsConflict(err error) bool { + return isErrCode(err, ErrCodeResourceVersionConflicts) +} + +// IsInvalidObj returns true if and only if err is invalid error +func IsInvalidObj(err error) bool { + return isErrCode(err, ErrCodeInvalidObj) +} + +func isErrCode(err error, code int) bool { + if err == nil { + return false + } + if e, ok := err.(*StorageError); ok { + return e.Code == code + } + return false +} + +// InvalidError is generated when an error caused by invalid API object occurs +// in the storage package. +type InvalidError struct { + Errs field.ErrorList +} + +func (e InvalidError) Error() string { + return e.Errs.ToAggregate().Error() +} + +// IsInvalidError returns true if and only if err is an InvalidError. +func IsInvalidError(err error) bool { + _, ok := err.(InvalidError) + return ok +} + +func NewInvalidError(errors field.ErrorList) InvalidError { + return InvalidError{errors} +} + +// InternalError is generated when an error occurs in the storage package, i.e., +// not from the underlying storage backend (e.g., etcd). +type InternalError struct { + Reason string +} + +func (e InternalError) Error() string { + return e.Reason +} + +// IsInternalError returns true if and only if err is an InternalError. +func IsInternalError(err error) bool { + _, ok := err.(InternalError) + return ok +} + +func NewInternalError(reason string) InternalError { + return InternalError{reason} +} + +func NewInternalErrorf(format string, a ...interface{}) InternalError { + return InternalError{fmt.Sprintf(format, a...)} +} + +var tooLargeResourceVersionCauseMsg = "Too large resource version" + +// NewTooLargeResourceVersionError returns a timeout error with the given retrySeconds for a request for +// a minimum resource version that is larger than the largest currently available resource version for a requested resource. +func NewTooLargeResourceVersionError(minimumResourceVersion, currentRevision uint64, retrySeconds int) error { + err := errors.NewTimeoutError(fmt.Sprintf("Too large resource version: %d, current: %d", minimumResourceVersion, currentRevision), retrySeconds) + err.ErrStatus.Details.Causes = []metav1.StatusCause{ + { + Type: metav1.CauseTypeResourceVersionTooLarge, + Message: tooLargeResourceVersionCauseMsg, + }, + } + return err +} + +// IsTooLargeResourceVersion returns true if the error is a TooLargeResourceVersion error. +func IsTooLargeResourceVersion(err error) bool { + if !errors.IsTimeout(err) { + return false + } + return errors.HasStatusCause(err, metav1.CauseTypeResourceVersionTooLarge) +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/errors/doc.go b/vendor/k8s.io/apiserver/pkg/storage/errors/doc.go new file mode 100644 index 000000000..e251b6168 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/errors/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package storage provides conversion of storage errors to API errors. +package storage // import "k8s.io/apiserver/pkg/storage/errors" diff --git a/vendor/k8s.io/apiserver/pkg/storage/errors/storage.go b/vendor/k8s.io/apiserver/pkg/storage/errors/storage.go new file mode 100644 index 000000000..fd3b35ed0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/errors/storage.go @@ -0,0 +1,116 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/storage" +) + +// InterpretListError converts a generic error on a retrieval +// operation into the appropriate API error. +func InterpretListError(err error, qualifiedResource schema.GroupResource) error { + switch { + case storage.IsNotFound(err): + return errors.NewNotFound(qualifiedResource, "") + case storage.IsUnreachable(err): + return errors.NewServerTimeout(qualifiedResource, "list", 2) // TODO: make configurable or handled at a higher level + case storage.IsInternalError(err): + return errors.NewInternalError(err) + default: + return err + } +} + +// InterpretGetError converts a generic error on a retrieval +// operation into the appropriate API error. +func InterpretGetError(err error, qualifiedResource schema.GroupResource, name string) error { + switch { + case storage.IsNotFound(err): + return errors.NewNotFound(qualifiedResource, name) + case storage.IsUnreachable(err): + return errors.NewServerTimeout(qualifiedResource, "get", 2) // TODO: make configurable or handled at a higher level + case storage.IsInternalError(err): + return errors.NewInternalError(err) + default: + return err + } +} + +// InterpretCreateError converts a generic error on a create +// operation into the appropriate API error. +func InterpretCreateError(err error, qualifiedResource schema.GroupResource, name string) error { + switch { + case storage.IsNodeExist(err): + return errors.NewAlreadyExists(qualifiedResource, name) + case storage.IsUnreachable(err): + return errors.NewServerTimeout(qualifiedResource, "create", 2) // TODO: make configurable or handled at a higher level + case storage.IsInternalError(err): + return errors.NewInternalError(err) + default: + return err + } +} + +// InterpretUpdateError converts a generic error on an update +// operation into the appropriate API error. +func InterpretUpdateError(err error, qualifiedResource schema.GroupResource, name string) error { + switch { + case storage.IsConflict(err), storage.IsNodeExist(err), storage.IsInvalidObj(err): + return errors.NewConflict(qualifiedResource, name, err) + case storage.IsUnreachable(err): + return errors.NewServerTimeout(qualifiedResource, "update", 2) // TODO: make configurable or handled at a higher level + case storage.IsNotFound(err): + return errors.NewNotFound(qualifiedResource, name) + case storage.IsInternalError(err): + return errors.NewInternalError(err) + default: + return err + } +} + +// InterpretDeleteError converts a generic error on a delete +// operation into the appropriate API error. +func InterpretDeleteError(err error, qualifiedResource schema.GroupResource, name string) error { + switch { + case storage.IsNotFound(err): + return errors.NewNotFound(qualifiedResource, name) + case storage.IsUnreachable(err): + return errors.NewServerTimeout(qualifiedResource, "delete", 2) // TODO: make configurable or handled at a higher level + case storage.IsConflict(err), storage.IsNodeExist(err), storage.IsInvalidObj(err): + return errors.NewConflict(qualifiedResource, name, err) + case storage.IsInternalError(err): + return errors.NewInternalError(err) + default: + return err + } +} + +// InterpretWatchError converts a generic error on a watch +// operation into the appropriate API error. +func InterpretWatchError(err error, resource schema.GroupResource, name string) error { + switch { + case storage.IsInvalidError(err): + invalidError, _ := err.(storage.InvalidError) + return errors.NewInvalid(schema.GroupKind{Group: resource.Group, Kind: resource.Resource}, name, invalidError.Errs) + case storage.IsInternalError(err): + return errors.NewInternalError(err) + default: + return err + } +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/OWNERS b/vendor/k8s.io/apiserver/pkg/storage/etcd3/OWNERS new file mode 100644 index 000000000..84666835d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- wojtek-t +- timothysc +- madhusudancs +- hongchaodeng diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/api_object_versioner.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/api_object_versioner.go new file mode 100644 index 000000000..c42fc6e08 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/api_object_versioner.go @@ -0,0 +1,131 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "fmt" + "strconv" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/storage" +) + +// APIObjectVersioner implements versioning and extracting etcd node information +// for objects that have an embedded ObjectMeta or ListMeta field. +type APIObjectVersioner struct{} + +// UpdateObject implements Versioner +func (a APIObjectVersioner) UpdateObject(obj runtime.Object, resourceVersion uint64) error { + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + versionString := "" + if resourceVersion != 0 { + versionString = strconv.FormatUint(resourceVersion, 10) + } + accessor.SetResourceVersion(versionString) + return nil +} + +// UpdateList implements Versioner +func (a APIObjectVersioner) UpdateList(obj runtime.Object, resourceVersion uint64, nextKey string, count *int64) error { + if resourceVersion == 0 { + return fmt.Errorf("illegal resource version from storage: %d", resourceVersion) + } + listAccessor, err := meta.ListAccessor(obj) + if err != nil || listAccessor == nil { + return err + } + versionString := strconv.FormatUint(resourceVersion, 10) + listAccessor.SetResourceVersion(versionString) + listAccessor.SetContinue(nextKey) + listAccessor.SetRemainingItemCount(count) + return nil +} + +// PrepareObjectForStorage clears resource version and self link prior to writing to etcd. +func (a APIObjectVersioner) PrepareObjectForStorage(obj runtime.Object) error { + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + accessor.SetResourceVersion("") + accessor.SetSelfLink("") + return nil +} + +// ObjectResourceVersion implements Versioner +func (a APIObjectVersioner) ObjectResourceVersion(obj runtime.Object) (uint64, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return 0, err + } + version := accessor.GetResourceVersion() + if len(version) == 0 { + return 0, nil + } + return strconv.ParseUint(version, 10, 64) +} + +// ParseResourceVersion takes a resource version argument and converts it to +// the etcd version. For watch we should pass to helper.Watch(). Because resourceVersion is +// an opaque value, the default watch behavior for non-zero watch is to watch +// the next value (if you pass "1", you will see updates from "2" onwards). +func (a APIObjectVersioner) ParseResourceVersion(resourceVersion string) (uint64, error) { + if resourceVersion == "" || resourceVersion == "0" { + return 0, nil + } + version, err := strconv.ParseUint(resourceVersion, 10, 64) + if err != nil { + return 0, storage.NewInvalidError(field.ErrorList{ + // Validation errors are supposed to return version-specific field + // paths, but this is probably close enough. + field.Invalid(field.NewPath("resourceVersion"), resourceVersion, err.Error()), + }) + } + return version, nil +} + +// Versioner implements Versioner +var Versioner storage.Versioner = APIObjectVersioner{} + +// CompareResourceVersion compares etcd resource versions. Outside this API they are all strings, +// but etcd resource versions are special, they're actually ints, so we can easily compare them. +func (a APIObjectVersioner) CompareResourceVersion(lhs, rhs runtime.Object) int { + lhsVersion, err := Versioner.ObjectResourceVersion(lhs) + if err != nil { + // coder error + panic(err) + } + rhsVersion, err := Versioner.ObjectResourceVersion(rhs) + if err != nil { + // coder error + panic(err) + } + + if lhsVersion == rhsVersion { + return 0 + } + if lhsVersion < rhsVersion { + return -1 + } + + return 1 +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/compact.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/compact.go new file mode 100644 index 000000000..1f97a5a77 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/compact.go @@ -0,0 +1,162 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "context" + "strconv" + "sync" + "time" + + "go.etcd.io/etcd/clientv3" + "k8s.io/klog/v2" +) + +const ( + compactRevKey = "compact_rev_key" +) + +var ( + endpointsMapMu sync.Mutex + endpointsMap map[string]struct{} +) + +func init() { + endpointsMap = make(map[string]struct{}) +} + +// StartCompactor starts a compactor in the background to compact old version of keys that's not needed. +// By default, we save the most recent 10 minutes data and compact versions > 10minutes ago. +// It should be enough for slow watchers and to tolerate burst. +// TODO: We might keep a longer history (12h) in the future once storage API can take advantage of past version of keys. +func StartCompactor(ctx context.Context, client *clientv3.Client, compactInterval time.Duration) { + endpointsMapMu.Lock() + defer endpointsMapMu.Unlock() + + // In one process, we can have only one compactor for one cluster. + // Currently we rely on endpoints to differentiate clusters. + for _, ep := range client.Endpoints() { + if _, ok := endpointsMap[ep]; ok { + klog.V(4).Infof("compactor already exists for endpoints %v", client.Endpoints()) + return + } + } + for _, ep := range client.Endpoints() { + endpointsMap[ep] = struct{}{} + } + + if compactInterval != 0 { + go compactor(ctx, client, compactInterval) + } +} + +// compactor periodically compacts historical versions of keys in etcd. +// It will compact keys with versions older than given interval. +// In other words, after compaction, it will only contain keys set during last interval. +// Any API call for the older versions of keys will return error. +// Interval is the time interval between each compaction. The first compaction happens after "interval". +func compactor(ctx context.Context, client *clientv3.Client, interval time.Duration) { + // Technical definitions: + // We have a special key in etcd defined as *compactRevKey*. + // compactRevKey's value will be set to the string of last compacted revision. + // compactRevKey's version will be used as logical time for comparison. THe version is referred as compact time. + // Initially, because the key doesn't exist, the compact time (version) is 0. + // + // Algorithm: + // - Compare to see if (local compact_time) = (remote compact_time). + // - If yes, increment both local and remote compact_time, and do a compaction. + // - If not, set local to remote compact_time. + // + // Technical details/insights: + // + // The protocol here is lease based. If one compactor CAS successfully, the others would know it when they fail in + // CAS later and would try again in 10 minutes. If an APIServer crashed, another one would "take over" the lease. + // + // For example, in the following diagram, we have a compactor C1 doing compaction in t1, t2. Another compactor C2 + // at t1' (t1 < t1' < t2) would CAS fail, set its known oldRev to rev at t1', and try again in t2' (t2' > t2). + // If C1 crashed and wouldn't compact at t2, C2 would CAS successfully at t2'. + // + // oldRev(t2) curRev(t2) + // + + // oldRev curRev | + // + + | + // | | | + // | | t1' | t2' + // +---v-------------v----^---------v------^----> + // t0 t1 t2 + // + // We have the guarantees: + // - in normal cases, the interval is 10 minutes. + // - in failover, the interval is >10m and <20m + // + // FAQ: + // - What if time is not accurate? We don't care as long as someone did the compaction. Atomicity is ensured using + // etcd API. + // - What happened under heavy load scenarios? Initially, each apiserver will do only one compaction + // every 10 minutes. This is very unlikely affecting or affected w.r.t. server load. + + var compactTime int64 + var rev int64 + var err error + for { + select { + case <-time.After(interval): + case <-ctx.Done(): + return + } + + compactTime, rev, err = compact(ctx, client, compactTime, rev) + if err != nil { + klog.Errorf("etcd: endpoint (%v) compact failed: %v", client.Endpoints(), err) + continue + } + } +} + +// compact compacts etcd store and returns current rev. +// It will return the current compact time and global revision if no error occurred. +// Note that CAS fail will not incur any error. +func compact(ctx context.Context, client *clientv3.Client, t, rev int64) (int64, int64, error) { + resp, err := client.KV.Txn(ctx).If( + clientv3.Compare(clientv3.Version(compactRevKey), "=", t), + ).Then( + clientv3.OpPut(compactRevKey, strconv.FormatInt(rev, 10)), // Expect side effect: increment Version + ).Else( + clientv3.OpGet(compactRevKey), + ).Commit() + if err != nil { + return t, rev, err + } + + curRev := resp.Header.Revision + + if !resp.Succeeded { + curTime := resp.Responses[0].GetResponseRange().Kvs[0].Version + return curTime, curRev, nil + } + curTime := t + 1 + + if rev == 0 { + // We don't compact on bootstrap. + return curTime, curRev, nil + } + if _, err = client.Compact(ctx, rev); err != nil { + return curTime, curRev, err + } + klog.V(4).Infof("etcd: compacted rev (%d), endpoints (%v)", rev, client.Endpoints()) + return curTime, curRev, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/errors.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/errors.go new file mode 100644 index 000000000..b33751480 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/errors.go @@ -0,0 +1,71 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + + etcdrpc "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +func interpretWatchError(err error) error { + switch { + case err == etcdrpc.ErrCompacted: + return errors.NewResourceExpired("The resourceVersion for the provided watch is too old.") + } + return err +} + +const ( + expired string = "The resourceVersion for the provided list is too old." + continueExpired string = "The provided continue parameter is too old " + + "to display a consistent list result. You can start a new list without " + + "the continue parameter." + inconsistentContinue string = "The provided continue parameter is too old " + + "to display a consistent list result. You can start a new list without " + + "the continue parameter, or use the continue token in this response to " + + "retrieve the remainder of the results. Continuing with the provided " + + "token results in an inconsistent list - objects that were created, " + + "modified, or deleted between the time the first chunk was returned " + + "and now may show up in the list." +) + +func interpretListError(err error, paging bool, continueKey, keyPrefix string) error { + switch { + case err == etcdrpc.ErrCompacted: + if paging { + return handleCompactedErrorForPaging(continueKey, keyPrefix) + } + return errors.NewResourceExpired(expired) + } + return err +} + +func handleCompactedErrorForPaging(continueKey, keyPrefix string) error { + // continueToken.ResoureVersion=-1 means that the apiserver can + // continue the list at the latest resource version. We don't use rv=0 + // for this purpose to distinguish from a bad token that has empty rv. + newToken, err := encodeContinue(continueKey, keyPrefix, -1) + if err != nil { + utilruntime.HandleError(err) + return errors.NewResourceExpired(continueExpired) + } + statusError := errors.NewResourceExpired(inconsistentContinue) + statusError.ErrStatus.ListMeta.Continue = newToken + return statusError +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/event.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/event.go new file mode 100644 index 000000000..83e52c064 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/event.go @@ -0,0 +1,71 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "fmt" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/mvcc/mvccpb" +) + +type event struct { + key string + value []byte + prevValue []byte + rev int64 + isDeleted bool + isCreated bool + isProgressNotify bool +} + +// parseKV converts a KeyValue retrieved from an initial sync() listing to a synthetic isCreated event. +func parseKV(kv *mvccpb.KeyValue) *event { + return &event{ + key: string(kv.Key), + value: kv.Value, + prevValue: nil, + rev: kv.ModRevision, + isDeleted: false, + isCreated: true, + } +} + +func parseEvent(e *clientv3.Event) (*event, error) { + if !e.IsCreate() && e.PrevKv == nil { + // If the previous value is nil, error. One example of how this is possible is if the previous value has been compacted already. + return nil, fmt.Errorf("etcd event received with PrevKv=nil (key=%q, modRevision=%d, type=%s)", string(e.Kv.Key), e.Kv.ModRevision, e.Type.String()) + + } + ret := &event{ + key: string(e.Kv.Key), + value: e.Kv.Value, + rev: e.Kv.ModRevision, + isDeleted: e.Type == clientv3.EventTypeDelete, + isCreated: e.IsCreate(), + } + if e.PrevKv != nil { + ret.prevValue = e.PrevKv.Value + } + return ret, nil +} + +func progressNotifyEvent(rev int64) *event { + return &event{ + rev: rev, + isProgressNotify: true, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/healthcheck.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/healthcheck.go new file mode 100644 index 000000000..ad051d2d6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/healthcheck.go @@ -0,0 +1,40 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "encoding/json" + "fmt" +) + +// etcdHealth encodes data returned from etcd /healthz handler. +type etcdHealth struct { + // Note this has to be public so the json library can modify it. + Health string `json:"health"` +} + +// EtcdHealthCheck decodes data returned from etcd /healthz handler. +func EtcdHealthCheck(data []byte) error { + obj := etcdHealth{} + if err := json.Unmarshal(data, &obj); err != nil { + return err + } + if obj.Health != "true" { + return fmt.Errorf("Unhealthy status: %s", obj.Health) + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/lease_manager.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/lease_manager.go new file mode 100644 index 000000000..6b5a5700a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/lease_manager.go @@ -0,0 +1,102 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "context" + "sync" + "time" + + "go.etcd.io/etcd/clientv3" +) + +// leaseManager is used to manage leases requested from etcd. If a new write +// needs a lease that has similar expiration time to the previous one, the old +// lease will be reused to reduce the overhead of etcd, since lease operations +// are expensive. In the implementation, we only store one previous lease, +// since all the events have the same ttl. +type leaseManager struct { + client *clientv3.Client // etcd client used to grant leases + leaseMu sync.Mutex + prevLeaseID clientv3.LeaseID + prevLeaseExpirationTime time.Time + // The period of time in seconds and percent of TTL that each lease is + // reused. The minimum of them is used to avoid unreasonably large + // numbers. We use var instead of const for testing purposes. + leaseReuseDurationSeconds int64 + leaseReuseDurationPercent float64 +} + +// newDefaultLeaseManager creates a new lease manager using default setting. +func newDefaultLeaseManager(client *clientv3.Client) *leaseManager { + return newLeaseManager(client, 60, 0.05) +} + +// newLeaseManager creates a new lease manager with the number of buffered +// leases, lease reuse duration in seconds and percentage. The percentage +// value x means x*100%. +func newLeaseManager(client *clientv3.Client, leaseReuseDurationSeconds int64, leaseReuseDurationPercent float64) *leaseManager { + return &leaseManager{ + client: client, + leaseReuseDurationSeconds: leaseReuseDurationSeconds, + leaseReuseDurationPercent: leaseReuseDurationPercent, + } +} + +// setLeaseReuseDurationSeconds is used for testing purpose. It is used to +// reduce the extra lease duration to avoid unnecessary timeout in testing. +func (l *leaseManager) setLeaseReuseDurationSeconds(duration int64) { + l.leaseMu.Lock() + defer l.leaseMu.Unlock() + l.leaseReuseDurationSeconds = duration +} + +// GetLease returns a lease based on requested ttl: if the cached previous +// lease can be reused, reuse it; otherwise request a new one from etcd. +func (l *leaseManager) GetLease(ctx context.Context, ttl int64) (clientv3.LeaseID, error) { + now := time.Now() + l.leaseMu.Lock() + defer l.leaseMu.Unlock() + // check if previous lease can be reused + reuseDurationSeconds := l.getReuseDurationSecondsLocked(ttl) + valid := now.Add(time.Duration(ttl) * time.Second).Before(l.prevLeaseExpirationTime) + sufficient := now.Add(time.Duration(ttl+reuseDurationSeconds) * time.Second).After(l.prevLeaseExpirationTime) + if valid && sufficient { + return l.prevLeaseID, nil + } + // request a lease with a little extra ttl from etcd + ttl += reuseDurationSeconds + lcr, err := l.client.Lease.Grant(ctx, ttl) + if err != nil { + return clientv3.LeaseID(0), err + } + // cache the new lease id + l.prevLeaseID = lcr.ID + l.prevLeaseExpirationTime = now.Add(time.Duration(ttl) * time.Second) + return lcr.ID, nil +} + +// getReuseDurationSecondsLocked returns the reusable duration in seconds +// based on the configuration. Lock has to be acquired before calling this +// function. +func (l *leaseManager) getReuseDurationSecondsLocked(ttl int64) int64 { + reuseDurationSeconds := int64(l.leaseReuseDurationPercent * float64(ttl)) + if reuseDurationSeconds > l.leaseReuseDurationSeconds { + reuseDurationSeconds = l.leaseReuseDurationSeconds + } + return reuseDurationSeconds +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/logger.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/logger.go new file mode 100644 index 000000000..e8a73082c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/logger.go @@ -0,0 +1,84 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "fmt" + + "go.etcd.io/etcd/clientv3" + "k8s.io/klog/v2" +) + +func init() { + clientv3.SetLogger(klogWrapper{}) +} + +type klogWrapper struct{} + +const klogWrapperDepth = 4 + +func (klogWrapper) Info(args ...interface{}) { + klog.InfoDepth(klogWrapperDepth, args...) +} + +func (klogWrapper) Infoln(args ...interface{}) { + klog.InfoDepth(klogWrapperDepth, fmt.Sprintln(args...)) +} + +func (klogWrapper) Infof(format string, args ...interface{}) { + klog.InfoDepth(klogWrapperDepth, fmt.Sprintf(format, args...)) +} + +func (klogWrapper) Warning(args ...interface{}) { + klog.WarningDepth(klogWrapperDepth, args...) +} + +func (klogWrapper) Warningln(args ...interface{}) { + klog.WarningDepth(klogWrapperDepth, fmt.Sprintln(args...)) +} + +func (klogWrapper) Warningf(format string, args ...interface{}) { + klog.WarningDepth(klogWrapperDepth, fmt.Sprintf(format, args...)) +} + +func (klogWrapper) Error(args ...interface{}) { + klog.ErrorDepth(klogWrapperDepth, args...) +} + +func (klogWrapper) Errorln(args ...interface{}) { + klog.ErrorDepth(klogWrapperDepth, fmt.Sprintln(args...)) +} + +func (klogWrapper) Errorf(format string, args ...interface{}) { + klog.ErrorDepth(klogWrapperDepth, fmt.Sprintf(format, args...)) +} + +func (klogWrapper) Fatal(args ...interface{}) { + klog.FatalDepth(klogWrapperDepth, args...) +} + +func (klogWrapper) Fatalln(args ...interface{}) { + klog.FatalDepth(klogWrapperDepth, fmt.Sprintln(args...)) +} + +func (klogWrapper) Fatalf(format string, args ...interface{}) { + klog.FatalDepth(klogWrapperDepth, fmt.Sprintf(format, args...)) +} + +func (klogWrapper) V(l int) bool { + return bool(klog.V(klog.Level(l)).Enabled()) +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go new file mode 100644 index 000000000..1f001406a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go @@ -0,0 +1,115 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "sync" + "time" + + compbasemetrics "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +/* + * By default, all the following metrics are defined as falling under + * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes) + * + * Promoting the stability level of the metric is a responsibility of the component owner, since it + * involves explicitly acknowledging support for the metric across multiple releases, in accordance with + * the metric stability policy. + */ +var ( + etcdRequestLatency = compbasemetrics.NewHistogramVec( + &compbasemetrics.HistogramOpts{ + Name: "etcd_request_duration_seconds", + Help: "Etcd request latency in seconds for each operation and object type.", + // Keeping it similar to the buckets used by the apiserver_request_duration_seconds metric so that + // api latency and etcd latency can be more comparable side by side. + Buckets: []float64{.005, .01, .025, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, + 0.8, 0.9, 1.0, 1.25, 1.5, 1.75, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 40, 50, 60}, + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"operation", "type"}, + ) + objectCounts = compbasemetrics.NewGaugeVec( + &compbasemetrics.GaugeOpts{ + Name: "etcd_object_counts", + Help: "Number of stored objects at the time of last check split by kind.", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"resource"}, + ) + dbTotalSize = compbasemetrics.NewGaugeVec( + &compbasemetrics.GaugeOpts{ + Name: "etcd_db_total_size_in_bytes", + Help: "Total size of the etcd database file physically allocated in bytes.", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"endpoint"}, + ) + etcdBookmarkCounts = compbasemetrics.NewGaugeVec( + &compbasemetrics.GaugeOpts{ + Name: "etcd_bookmark_counts", + Help: "Number of etcd bookmarks (progress notify events) split by kind.", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"resource"}, + ) +) + +var registerMetrics sync.Once + +// Register all metrics. +func Register() { + // Register the metrics. + registerMetrics.Do(func() { + legacyregistry.MustRegister(etcdRequestLatency) + legacyregistry.MustRegister(objectCounts) + legacyregistry.MustRegister(dbTotalSize) + legacyregistry.MustRegister(etcdBookmarkCounts) + }) +} + +// UpdateObjectCount sets the etcd_object_counts metric. +func UpdateObjectCount(resourcePrefix string, count int64) { + objectCounts.WithLabelValues(resourcePrefix).Set(float64(count)) +} + +// RecordEtcdRequestLatency sets the etcd_request_duration_seconds metrics. +func RecordEtcdRequestLatency(verb, resource string, startTime time.Time) { + etcdRequestLatency.WithLabelValues(verb, resource).Observe(sinceInSeconds(startTime)) +} + +// RecordEtcdBookmark updates the etcd_bookmark_counts metric. +func RecordEtcdBookmark(resource string) { + etcdBookmarkCounts.WithLabelValues(resource).Inc() +} + +// Reset resets the etcd_request_duration_seconds metric. +func Reset() { + etcdRequestLatency.Reset() +} + +// sinceInSeconds gets the time since the specified start in seconds. +func sinceInSeconds(start time.Time) float64 { + return time.Since(start).Seconds() +} + +// UpdateEtcdDbSize sets the etcd_db_total_size_in_bytes metric. +func UpdateEtcdDbSize(ep string, size int64) { + dbTotalSize.WithLabelValues(ep).Set(float64(size)) +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go new file mode 100644 index 000000000..0cff6b3fc --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go @@ -0,0 +1,939 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "path" + "reflect" + "strings" + "time" + + "go.etcd.io/etcd/clientv3" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/storage" + "k8s.io/apiserver/pkg/storage/etcd3/metrics" + "k8s.io/apiserver/pkg/storage/value" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog/v2" + utiltrace "k8s.io/utils/trace" +) + +// authenticatedDataString satisfies the value.Context interface. It uses the key to +// authenticate the stored data. This does not defend against reuse of previously +// encrypted values under the same key, but will prevent an attacker from using an +// encrypted value from a different key. A stronger authenticated data segment would +// include the etcd3 Version field (which is incremented on each write to a key and +// reset when the key is deleted), but an attacker with write access to etcd can +// force deletion and recreation of keys to weaken that angle. +type authenticatedDataString string + +// AuthenticatedData implements the value.Context interface. +func (d authenticatedDataString) AuthenticatedData() []byte { + return []byte(string(d)) +} + +var _ value.Context = authenticatedDataString("") + +type store struct { + client *clientv3.Client + codec runtime.Codec + versioner storage.Versioner + transformer value.Transformer + pathPrefix string + watcher *watcher + pagingEnabled bool + leaseManager *leaseManager +} + +type objState struct { + obj runtime.Object + meta *storage.ResponseMeta + rev int64 + data []byte + stale bool +} + +// New returns an etcd3 implementation of storage.Interface. +func New(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, prefix string, transformer value.Transformer, pagingEnabled bool) storage.Interface { + return newStore(c, newFunc, pagingEnabled, codec, prefix, transformer) +} + +func newStore(c *clientv3.Client, newFunc func() runtime.Object, pagingEnabled bool, codec runtime.Codec, prefix string, transformer value.Transformer) *store { + versioner := APIObjectVersioner{} + result := &store{ + client: c, + codec: codec, + versioner: versioner, + transformer: transformer, + pagingEnabled: pagingEnabled, + // for compatibility with etcd2 impl. + // no-op for default prefix of '/registry'. + // keeps compatibility with etcd2 impl for custom prefixes that don't start with '/' + pathPrefix: path.Join("/", prefix), + watcher: newWatcher(c, codec, newFunc, versioner, transformer), + leaseManager: newDefaultLeaseManager(c), + } + return result +} + +// Versioner implements storage.Interface.Versioner. +func (s *store) Versioner() storage.Versioner { + return s.versioner +} + +// Get implements storage.Interface.Get. +func (s *store) Get(ctx context.Context, key string, opts storage.GetOptions, out runtime.Object) error { + key = path.Join(s.pathPrefix, key) + startTime := time.Now() + getResp, err := s.client.KV.Get(ctx, key) + metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime) + if err != nil { + return err + } + if err = s.validateMinimumResourceVersion(opts.ResourceVersion, uint64(getResp.Header.Revision)); err != nil { + return err + } + + if len(getResp.Kvs) == 0 { + if opts.IgnoreNotFound { + return runtime.SetZeroValue(out) + } + return storage.NewKeyNotFoundError(key, 0) + } + kv := getResp.Kvs[0] + + data, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(key)) + if err != nil { + return storage.NewInternalError(err.Error()) + } + + return decode(s.codec, s.versioner, data, out, kv.ModRevision) +} + +// Create implements storage.Interface.Create. +func (s *store) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error { + if version, err := s.versioner.ObjectResourceVersion(obj); err == nil && version != 0 { + return errors.New("resourceVersion should not be set on objects to be created") + } + if err := s.versioner.PrepareObjectForStorage(obj); err != nil { + return fmt.Errorf("PrepareObjectForStorage failed: %v", err) + } + data, err := runtime.Encode(s.codec, obj) + if err != nil { + return err + } + key = path.Join(s.pathPrefix, key) + + opts, err := s.ttlOpts(ctx, int64(ttl)) + if err != nil { + return err + } + + newData, err := s.transformer.TransformToStorage(data, authenticatedDataString(key)) + if err != nil { + return storage.NewInternalError(err.Error()) + } + + startTime := time.Now() + txnResp, err := s.client.KV.Txn(ctx).If( + notFound(key), + ).Then( + clientv3.OpPut(key, string(newData), opts...), + ).Commit() + metrics.RecordEtcdRequestLatency("create", getTypeName(obj), startTime) + if err != nil { + return err + } + if !txnResp.Succeeded { + return storage.NewKeyExistsError(key, 0) + } + + if out != nil { + putResp := txnResp.Responses[0].GetResponsePut() + return decode(s.codec, s.versioner, data, out, putResp.Header.Revision) + } + return nil +} + +// Delete implements storage.Interface.Delete. +func (s *store) Delete(ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions, validateDeletion storage.ValidateObjectFunc) error { + v, err := conversion.EnforcePtr(out) + if err != nil { + return fmt.Errorf("unable to convert output object to pointer: %v", err) + } + key = path.Join(s.pathPrefix, key) + return s.conditionalDelete(ctx, key, out, v, preconditions, validateDeletion) +} + +func (s *store) conditionalDelete(ctx context.Context, key string, out runtime.Object, v reflect.Value, preconditions *storage.Preconditions, validateDeletion storage.ValidateObjectFunc) error { + startTime := time.Now() + getResp, err := s.client.KV.Get(ctx, key) + metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime) + if err != nil { + return err + } + for { + origState, err := s.getState(getResp, key, v, false) + if err != nil { + return err + } + if preconditions != nil { + if err := preconditions.Check(key, origState.obj); err != nil { + return err + } + } + if err := validateDeletion(ctx, origState.obj); err != nil { + return err + } + startTime := time.Now() + txnResp, err := s.client.KV.Txn(ctx).If( + clientv3.Compare(clientv3.ModRevision(key), "=", origState.rev), + ).Then( + clientv3.OpDelete(key), + ).Else( + clientv3.OpGet(key), + ).Commit() + metrics.RecordEtcdRequestLatency("delete", getTypeName(out), startTime) + if err != nil { + return err + } + if !txnResp.Succeeded { + getResp = (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange()) + klog.V(4).Infof("deletion of %s failed because of a conflict, going to retry", key) + continue + } + return decode(s.codec, s.versioner, origState.data, out, origState.rev) + } +} + +// GuaranteedUpdate implements storage.Interface.GuaranteedUpdate. +func (s *store) GuaranteedUpdate( + ctx context.Context, key string, out runtime.Object, ignoreNotFound bool, + preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, suggestion runtime.Object) error { + trace := utiltrace.New("GuaranteedUpdate etcd3", utiltrace.Field{"type", getTypeName(out)}) + defer trace.LogIfLong(500 * time.Millisecond) + + v, err := conversion.EnforcePtr(out) + if err != nil { + return fmt.Errorf("unable to convert output object to pointer: %v", err) + } + key = path.Join(s.pathPrefix, key) + + getCurrentState := func() (*objState, error) { + startTime := time.Now() + getResp, err := s.client.KV.Get(ctx, key) + metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime) + if err != nil { + return nil, err + } + return s.getState(getResp, key, v, ignoreNotFound) + } + + var origState *objState + var mustCheckData bool + if suggestion != nil { + origState, err = s.getStateFromObject(suggestion) + if err != nil { + return err + } + mustCheckData = true + } else { + origState, err = getCurrentState() + if err != nil { + return err + } + } + trace.Step("initial value restored") + + transformContext := authenticatedDataString(key) + for { + if err := preconditions.Check(key, origState.obj); err != nil { + // If our data is already up to date, return the error + if !mustCheckData { + return err + } + + // It's possible we were working with stale data + // Actually fetch + origState, err = getCurrentState() + if err != nil { + return err + } + mustCheckData = false + // Retry + continue + } + + ret, ttl, err := s.updateState(origState, tryUpdate) + if err != nil { + // If our data is already up to date, return the error + if !mustCheckData { + return err + } + + // It's possible we were working with stale data + // Actually fetch + origState, err = getCurrentState() + if err != nil { + return err + } + mustCheckData = false + // Retry + continue + } + + data, err := runtime.Encode(s.codec, ret) + if err != nil { + return err + } + if !origState.stale && bytes.Equal(data, origState.data) { + // if we skipped the original Get in this loop, we must refresh from + // etcd in order to be sure the data in the store is equivalent to + // our desired serialization + if mustCheckData { + origState, err = getCurrentState() + if err != nil { + return err + } + mustCheckData = false + if !bytes.Equal(data, origState.data) { + // original data changed, restart loop + continue + } + } + // recheck that the data from etcd is not stale before short-circuiting a write + if !origState.stale { + return decode(s.codec, s.versioner, origState.data, out, origState.rev) + } + } + + newData, err := s.transformer.TransformToStorage(data, transformContext) + if err != nil { + return storage.NewInternalError(err.Error()) + } + + opts, err := s.ttlOpts(ctx, int64(ttl)) + if err != nil { + return err + } + trace.Step("Transaction prepared") + + startTime := time.Now() + txnResp, err := s.client.KV.Txn(ctx).If( + clientv3.Compare(clientv3.ModRevision(key), "=", origState.rev), + ).Then( + clientv3.OpPut(key, string(newData), opts...), + ).Else( + clientv3.OpGet(key), + ).Commit() + metrics.RecordEtcdRequestLatency("update", getTypeName(out), startTime) + if err != nil { + return err + } + trace.Step("Transaction committed") + if !txnResp.Succeeded { + getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange()) + klog.V(4).Infof("GuaranteedUpdate of %s failed because of a conflict, going to retry", key) + origState, err = s.getState(getResp, key, v, ignoreNotFound) + if err != nil { + return err + } + trace.Step("Retry value restored") + mustCheckData = false + continue + } + putResp := txnResp.Responses[0].GetResponsePut() + + return decode(s.codec, s.versioner, data, out, putResp.Header.Revision) + } +} + +// GetToList implements storage.Interface.GetToList. +func (s *store) GetToList(ctx context.Context, key string, listOpts storage.ListOptions, listObj runtime.Object) error { + resourceVersion := listOpts.ResourceVersion + match := listOpts.ResourceVersionMatch + pred := listOpts.Predicate + trace := utiltrace.New("GetToList etcd3", + utiltrace.Field{"key", key}, + utiltrace.Field{"resourceVersion", resourceVersion}, + utiltrace.Field{"resourceVersionMatch", match}, + utiltrace.Field{"limit", pred.Limit}, + utiltrace.Field{"continue", pred.Continue}) + defer trace.LogIfLong(500 * time.Millisecond) + listPtr, err := meta.GetItemsPtr(listObj) + if err != nil { + return err + } + v, err := conversion.EnforcePtr(listPtr) + if err != nil || v.Kind() != reflect.Slice { + return fmt.Errorf("need ptr to slice: %v", err) + } + + newItemFunc := getNewItemFunc(listObj, v) + + key = path.Join(s.pathPrefix, key) + startTime := time.Now() + var opts []clientv3.OpOption + if len(resourceVersion) > 0 && match == metav1.ResourceVersionMatchExact { + rv, err := s.versioner.ParseResourceVersion(resourceVersion) + if err != nil { + return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err)) + } + opts = append(opts, clientv3.WithRev(int64(rv))) + } + + getResp, err := s.client.KV.Get(ctx, key, opts...) + metrics.RecordEtcdRequestLatency("get", getTypeName(listPtr), startTime) + if err != nil { + return err + } + if err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil { + return err + } + + if len(getResp.Kvs) > 0 { + data, _, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key)) + if err != nil { + return storage.NewInternalError(err.Error()) + } + if err := appendListItem(v, data, uint64(getResp.Kvs[0].ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil { + return err + } + } + // update version with cluster level revision + return s.versioner.UpdateList(listObj, uint64(getResp.Header.Revision), "", nil) +} + +func getNewItemFunc(listObj runtime.Object, v reflect.Value) func() runtime.Object { + // For unstructured lists with a target group/version, preserve the group/version in the instantiated list items + if unstructuredList, isUnstructured := listObj.(*unstructured.UnstructuredList); isUnstructured { + if apiVersion := unstructuredList.GetAPIVersion(); len(apiVersion) > 0 { + return func() runtime.Object { + return &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": apiVersion}} + } + } + } + + // Otherwise just instantiate an empty item + elem := v.Type().Elem() + return func() runtime.Object { + return reflect.New(elem).Interface().(runtime.Object) + } +} + +func (s *store) Count(key string) (int64, error) { + key = path.Join(s.pathPrefix, key) + + // We need to make sure the key ended with "/" so that we only get children "directories". + // e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three, + // while with prefix "/a/" will return only "/a/b" which is the correct answer. + if !strings.HasSuffix(key, "/") { + key += "/" + } + + startTime := time.Now() + getResp, err := s.client.KV.Get(context.Background(), key, clientv3.WithRange(clientv3.GetPrefixRangeEnd(key)), clientv3.WithCountOnly()) + metrics.RecordEtcdRequestLatency("listWithCount", key, startTime) + if err != nil { + return 0, err + } + return getResp.Count, nil +} + +// continueToken is a simple structured object for encoding the state of a continue token. +// TODO: if we change the version of the encoded from, we can't start encoding the new version +// until all other servers are upgraded (i.e. we need to support rolling schema) +// This is a public API struct and cannot change. +type continueToken struct { + APIVersion string `json:"v"` + ResourceVersion int64 `json:"rv"` + StartKey string `json:"start"` +} + +// parseFrom transforms an encoded predicate from into a versioned struct. +// TODO: return a typed error that instructs clients that they must relist +func decodeContinue(continueValue, keyPrefix string) (fromKey string, rv int64, err error) { + data, err := base64.RawURLEncoding.DecodeString(continueValue) + if err != nil { + return "", 0, fmt.Errorf("continue key is not valid: %v", err) + } + var c continueToken + if err := json.Unmarshal(data, &c); err != nil { + return "", 0, fmt.Errorf("continue key is not valid: %v", err) + } + switch c.APIVersion { + case "meta.k8s.io/v1": + if c.ResourceVersion == 0 { + return "", 0, fmt.Errorf("continue key is not valid: incorrect encoded start resourceVersion (version meta.k8s.io/v1)") + } + if len(c.StartKey) == 0 { + return "", 0, fmt.Errorf("continue key is not valid: encoded start key empty (version meta.k8s.io/v1)") + } + // defend against path traversal attacks by clients - path.Clean will ensure that startKey cannot + // be at a higher level of the hierarchy, and so when we append the key prefix we will end up with + // continue start key that is fully qualified and cannot range over anything less specific than + // keyPrefix. + key := c.StartKey + if !strings.HasPrefix(key, "/") { + key = "/" + key + } + cleaned := path.Clean(key) + if cleaned != key { + return "", 0, fmt.Errorf("continue key is not valid: %s", c.StartKey) + } + return keyPrefix + cleaned[1:], c.ResourceVersion, nil + default: + return "", 0, fmt.Errorf("continue key is not valid: server does not recognize this encoded version %q", c.APIVersion) + } +} + +// encodeContinue returns a string representing the encoded continuation of the current query. +func encodeContinue(key, keyPrefix string, resourceVersion int64) (string, error) { + nextKey := strings.TrimPrefix(key, keyPrefix) + if nextKey == key { + return "", fmt.Errorf("unable to encode next field: the key and key prefix do not match") + } + out, err := json.Marshal(&continueToken{APIVersion: "meta.k8s.io/v1", ResourceVersion: resourceVersion, StartKey: nextKey}) + if err != nil { + return "", err + } + return base64.RawURLEncoding.EncodeToString(out), nil +} + +// List implements storage.Interface.List. +func (s *store) List(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error { + resourceVersion := opts.ResourceVersion + match := opts.ResourceVersionMatch + pred := opts.Predicate + trace := utiltrace.New("List etcd3", + utiltrace.Field{"key", key}, + utiltrace.Field{"resourceVersion", resourceVersion}, + utiltrace.Field{"resourceVersionMatch", match}, + utiltrace.Field{"limit", pred.Limit}, + utiltrace.Field{"continue", pred.Continue}) + defer trace.LogIfLong(500 * time.Millisecond) + listPtr, err := meta.GetItemsPtr(listObj) + if err != nil { + return err + } + v, err := conversion.EnforcePtr(listPtr) + if err != nil || v.Kind() != reflect.Slice { + return fmt.Errorf("need ptr to slice: %v", err) + } + + if s.pathPrefix != "" { + key = path.Join(s.pathPrefix, key) + } + // We need to make sure the key ended with "/" so that we only get children "directories". + // e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three, + // while with prefix "/a/" will return only "/a/b" which is the correct answer. + if !strings.HasSuffix(key, "/") { + key += "/" + } + keyPrefix := key + + // set the appropriate clientv3 options to filter the returned data set + var paging bool + options := make([]clientv3.OpOption, 0, 4) + if s.pagingEnabled && pred.Limit > 0 { + paging = true + options = append(options, clientv3.WithLimit(pred.Limit)) + } + + newItemFunc := getNewItemFunc(listObj, v) + + var fromRV *uint64 + if len(resourceVersion) > 0 { + parsedRV, err := s.versioner.ParseResourceVersion(resourceVersion) + if err != nil { + return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err)) + } + fromRV = &parsedRV + } + + var returnedRV, continueRV, withRev int64 + var continueKey string + switch { + case s.pagingEnabled && len(pred.Continue) > 0: + continueKey, continueRV, err = decodeContinue(pred.Continue, keyPrefix) + if err != nil { + return apierrors.NewBadRequest(fmt.Sprintf("invalid continue token: %v", err)) + } + + if len(resourceVersion) > 0 && resourceVersion != "0" { + return apierrors.NewBadRequest("specifying resource version is not allowed when using continue") + } + + rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix) + options = append(options, clientv3.WithRange(rangeEnd)) + key = continueKey + + // If continueRV > 0, the LIST request needs a specific resource version. + // continueRV==0 is invalid. + // If continueRV < 0, the request is for the latest resource version. + if continueRV > 0 { + withRev = continueRV + returnedRV = continueRV + } + case s.pagingEnabled && pred.Limit > 0: + if fromRV != nil { + switch match { + case metav1.ResourceVersionMatchNotOlderThan: + // The not older than constraint is checked after we get a response from etcd, + // and returnedRV is then set to the revision we get from the etcd response. + case metav1.ResourceVersionMatchExact: + returnedRV = int64(*fromRV) + withRev = returnedRV + case "": // legacy case + if *fromRV > 0 { + returnedRV = int64(*fromRV) + withRev = returnedRV + } + default: + return fmt.Errorf("unknown ResourceVersionMatch value: %v", match) + } + } + + rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix) + options = append(options, clientv3.WithRange(rangeEnd)) + default: + if fromRV != nil { + switch match { + case metav1.ResourceVersionMatchNotOlderThan: + // The not older than constraint is checked after we get a response from etcd, + // and returnedRV is then set to the revision we get from the etcd response. + case metav1.ResourceVersionMatchExact: + returnedRV = int64(*fromRV) + withRev = returnedRV + case "": // legacy case + default: + return fmt.Errorf("unknown ResourceVersionMatch value: %v", match) + } + } + + options = append(options, clientv3.WithPrefix()) + } + if withRev != 0 { + options = append(options, clientv3.WithRev(withRev)) + } + + // loop until we have filled the requested limit from etcd or there are no more results + var lastKey []byte + var hasMore bool + var getResp *clientv3.GetResponse + for { + startTime := time.Now() + getResp, err = s.client.KV.Get(ctx, key, options...) + metrics.RecordEtcdRequestLatency("list", getTypeName(listPtr), startTime) + if err != nil { + return interpretListError(err, len(pred.Continue) > 0, continueKey, keyPrefix) + } + if err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil { + return err + } + hasMore = getResp.More + + if len(getResp.Kvs) == 0 && getResp.More { + return fmt.Errorf("no results were found, but etcd indicated there were more values remaining") + } + + // avoid small allocations for the result slice, since this can be called in many + // different contexts and we don't know how significantly the result will be filtered + if pred.Empty() { + growSlice(v, len(getResp.Kvs)) + } else { + growSlice(v, 2048, len(getResp.Kvs)) + } + + // take items from the response until the bucket is full, filtering as we go + for _, kv := range getResp.Kvs { + if paging && int64(v.Len()) >= pred.Limit { + hasMore = true + break + } + lastKey = kv.Key + + data, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(kv.Key)) + if err != nil { + return storage.NewInternalErrorf("unable to transform key %q: %v", kv.Key, err) + } + + if err := appendListItem(v, data, uint64(kv.ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil { + return err + } + } + + // indicate to the client which resource version was returned + if returnedRV == 0 { + returnedRV = getResp.Header.Revision + } + + // no more results remain or we didn't request paging + if !hasMore || !paging { + break + } + // we're paging but we have filled our bucket + if int64(v.Len()) >= pred.Limit { + break + } + key = string(lastKey) + "\x00" + if withRev == 0 { + withRev = returnedRV + options = append(options, clientv3.WithRev(withRev)) + } + } + + // instruct the client to begin querying from immediately after the last key we returned + // we never return a key that the client wouldn't be allowed to see + if hasMore { + // we want to start immediately after the last key + next, err := encodeContinue(string(lastKey)+"\x00", keyPrefix, returnedRV) + if err != nil { + return err + } + var remainingItemCount *int64 + // getResp.Count counts in objects that do not match the pred. + // Instead of returning inaccurate count for non-empty selectors, we return nil. + // Only set remainingItemCount if the predicate is empty. + if utilfeature.DefaultFeatureGate.Enabled(features.RemainingItemCount) { + if pred.Empty() { + c := int64(getResp.Count - pred.Limit) + remainingItemCount = &c + } + } + return s.versioner.UpdateList(listObj, uint64(returnedRV), next, remainingItemCount) + } + + // no continuation + return s.versioner.UpdateList(listObj, uint64(returnedRV), "", nil) +} + +// growSlice takes a slice value and grows its capacity up +// to the maximum of the passed sizes or maxCapacity, whichever +// is smaller. Above maxCapacity decisions about allocation are left +// to the Go runtime on append. This allows a caller to make an +// educated guess about the potential size of the total list while +// still avoiding overly aggressive initial allocation. If sizes +// is empty maxCapacity will be used as the size to grow. +func growSlice(v reflect.Value, maxCapacity int, sizes ...int) { + cap := v.Cap() + max := cap + for _, size := range sizes { + if size > max { + max = size + } + } + if len(sizes) == 0 || max > maxCapacity { + max = maxCapacity + } + if max <= cap { + return + } + if v.Len() > 0 { + extra := reflect.MakeSlice(v.Type(), 0, max) + reflect.Copy(extra, v) + v.Set(extra) + } else { + extra := reflect.MakeSlice(v.Type(), 0, max) + v.Set(extra) + } +} + +// Watch implements storage.Interface.Watch. +func (s *store) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) { + return s.watch(ctx, key, opts, false) +} + +// WatchList implements storage.Interface.WatchList. +func (s *store) WatchList(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) { + return s.watch(ctx, key, opts, true) +} + +func (s *store) watch(ctx context.Context, key string, opts storage.ListOptions, recursive bool) (watch.Interface, error) { + rev, err := s.versioner.ParseResourceVersion(opts.ResourceVersion) + if err != nil { + return nil, err + } + key = path.Join(s.pathPrefix, key) + return s.watcher.Watch(ctx, key, int64(rev), recursive, opts.ProgressNotify, opts.Predicate) +} + +func (s *store) getState(getResp *clientv3.GetResponse, key string, v reflect.Value, ignoreNotFound bool) (*objState, error) { + state := &objState{ + meta: &storage.ResponseMeta{}, + } + + if u, ok := v.Addr().Interface().(runtime.Unstructured); ok { + state.obj = u.NewEmptyInstance() + } else { + state.obj = reflect.New(v.Type()).Interface().(runtime.Object) + } + + if len(getResp.Kvs) == 0 { + if !ignoreNotFound { + return nil, storage.NewKeyNotFoundError(key, 0) + } + if err := runtime.SetZeroValue(state.obj); err != nil { + return nil, err + } + } else { + data, stale, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key)) + if err != nil { + return nil, storage.NewInternalError(err.Error()) + } + state.rev = getResp.Kvs[0].ModRevision + state.meta.ResourceVersion = uint64(state.rev) + state.data = data + state.stale = stale + if err := decode(s.codec, s.versioner, state.data, state.obj, state.rev); err != nil { + return nil, err + } + } + return state, nil +} + +func (s *store) getStateFromObject(obj runtime.Object) (*objState, error) { + state := &objState{ + obj: obj, + meta: &storage.ResponseMeta{}, + } + + rv, err := s.versioner.ObjectResourceVersion(obj) + if err != nil { + return nil, fmt.Errorf("couldn't get resource version: %v", err) + } + state.rev = int64(rv) + state.meta.ResourceVersion = uint64(state.rev) + + // Compute the serialized form - for that we need to temporarily clean + // its resource version field (those are not stored in etcd). + if err := s.versioner.PrepareObjectForStorage(obj); err != nil { + return nil, fmt.Errorf("PrepareObjectForStorage failed: %v", err) + } + state.data, err = runtime.Encode(s.codec, obj) + if err != nil { + return nil, err + } + if err := s.versioner.UpdateObject(state.obj, uint64(rv)); err != nil { + klog.Errorf("failed to update object version: %v", err) + } + return state, nil +} + +func (s *store) updateState(st *objState, userUpdate storage.UpdateFunc) (runtime.Object, uint64, error) { + ret, ttlPtr, err := userUpdate(st.obj, *st.meta) + if err != nil { + return nil, 0, err + } + + if err := s.versioner.PrepareObjectForStorage(ret); err != nil { + return nil, 0, fmt.Errorf("PrepareObjectForStorage failed: %v", err) + } + var ttl uint64 + if ttlPtr != nil { + ttl = *ttlPtr + } + return ret, ttl, nil +} + +// ttlOpts returns client options based on given ttl. +// ttl: if ttl is non-zero, it will attach the key to a lease with ttl of roughly the same length +func (s *store) ttlOpts(ctx context.Context, ttl int64) ([]clientv3.OpOption, error) { + if ttl == 0 { + return nil, nil + } + id, err := s.leaseManager.GetLease(ctx, ttl) + if err != nil { + return nil, err + } + return []clientv3.OpOption{clientv3.WithLease(id)}, nil +} + +// validateMinimumResourceVersion returns a 'too large resource' version error when the provided minimumResourceVersion is +// greater than the most recent actualRevision available from storage. +func (s *store) validateMinimumResourceVersion(minimumResourceVersion string, actualRevision uint64) error { + if minimumResourceVersion == "" { + return nil + } + minimumRV, err := s.versioner.ParseResourceVersion(minimumResourceVersion) + if err != nil { + return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err)) + } + // Enforce the storage.Interface guarantee that the resource version of the returned data + // "will be at least 'resourceVersion'". + if minimumRV > actualRevision { + return storage.NewTooLargeResourceVersionError(minimumRV, actualRevision, 0) + } + return nil +} + +// decode decodes value of bytes into object. It will also set the object resource version to rev. +// On success, objPtr would be set to the object. +func decode(codec runtime.Codec, versioner storage.Versioner, value []byte, objPtr runtime.Object, rev int64) error { + if _, err := conversion.EnforcePtr(objPtr); err != nil { + return fmt.Errorf("unable to convert output object to pointer: %v", err) + } + _, _, err := codec.Decode(value, nil, objPtr) + if err != nil { + return err + } + // being unable to set the version does not prevent the object from being extracted + if err := versioner.UpdateObject(objPtr, uint64(rev)); err != nil { + klog.Errorf("failed to update object version: %v", err) + } + return nil +} + +// appendListItem decodes and appends the object (if it passes filter) to v, which must be a slice. +func appendListItem(v reflect.Value, data []byte, rev uint64, pred storage.SelectionPredicate, codec runtime.Codec, versioner storage.Versioner, newItemFunc func() runtime.Object) error { + obj, _, err := codec.Decode(data, nil, newItemFunc()) + if err != nil { + return err + } + // being unable to set the version does not prevent the object from being extracted + if err := versioner.UpdateObject(obj, rev); err != nil { + klog.Errorf("failed to update object version: %v", err) + } + if matched, err := pred.Matches(obj); err == nil && matched { + v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem())) + } + return nil +} + +func notFound(key string) clientv3.Cmp { + return clientv3.Compare(clientv3.ModRevision(key), "=", 0) +} + +// getTypeName returns type name of an object for reporting purposes. +func getTypeName(obj interface{}) string { + return reflect.TypeOf(obj).String() +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go new file mode 100644 index 000000000..bd87382e8 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go @@ -0,0 +1,468 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "context" + "errors" + "fmt" + "os" + "reflect" + "strconv" + "strings" + "sync" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/apiserver/pkg/storage" + "k8s.io/apiserver/pkg/storage/etcd3/metrics" + "k8s.io/apiserver/pkg/storage/value" + + "go.etcd.io/etcd/clientv3" + "k8s.io/klog/v2" +) + +const ( + // We have set a buffer in order to reduce times of context switches. + incomingBufSize = 100 + outgoingBufSize = 100 +) + +// fatalOnDecodeError is used during testing to panic the server if watcher encounters a decoding error +var fatalOnDecodeError = false + +// errTestingDecode is the only error that testingDeferOnDecodeError catches during a panic +var errTestingDecode = errors.New("sentinel error only used during testing to indicate watch decoding error") + +// testingDeferOnDecodeError is used during testing to recover from a panic caused by errTestingDecode, all other values continue to panic +func testingDeferOnDecodeError() { + if r := recover(); r != nil && r != errTestingDecode { + panic(r) + } +} + +func init() { + // check to see if we are running in a test environment + TestOnlySetFatalOnDecodeError(true) + fatalOnDecodeError, _ = strconv.ParseBool(os.Getenv("KUBE_PANIC_WATCH_DECODE_ERROR")) +} + +// TestOnlySetFatalOnDecodeError should only be used for cases where decode errors are expected and need to be tested. e.g. conversion webhooks. +func TestOnlySetFatalOnDecodeError(b bool) { + fatalOnDecodeError = b +} + +type watcher struct { + client *clientv3.Client + codec runtime.Codec + newFunc func() runtime.Object + objectType string + versioner storage.Versioner + transformer value.Transformer +} + +// watchChan implements watch.Interface. +type watchChan struct { + watcher *watcher + key string + initialRev int64 + recursive bool + progressNotify bool + internalPred storage.SelectionPredicate + ctx context.Context + cancel context.CancelFunc + incomingEventChan chan *event + resultChan chan watch.Event + errChan chan error +} + +func newWatcher(client *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, versioner storage.Versioner, transformer value.Transformer) *watcher { + res := &watcher{ + client: client, + codec: codec, + newFunc: newFunc, + versioner: versioner, + transformer: transformer, + } + if newFunc == nil { + res.objectType = "" + } else { + res.objectType = reflect.TypeOf(newFunc()).String() + } + return res +} + +// Watch watches on a key and returns a watch.Interface that transfers relevant notifications. +// If rev is zero, it will return the existing object(s) and then start watching from +// the maximum revision+1 from returned objects. +// If rev is non-zero, it will watch events happened after given revision. +// If recursive is false, it watches on given key. +// If recursive is true, it watches any children and directories under the key, excluding the root key itself. +// pred must be non-nil. Only if pred matches the change, it will be returned. +func (w *watcher) Watch(ctx context.Context, key string, rev int64, recursive, progressNotify bool, pred storage.SelectionPredicate) (watch.Interface, error) { + if recursive && !strings.HasSuffix(key, "/") { + key += "/" + } + wc := w.createWatchChan(ctx, key, rev, recursive, progressNotify, pred) + go wc.run() + return wc, nil +} + +func (w *watcher) createWatchChan(ctx context.Context, key string, rev int64, recursive, progressNotify bool, pred storage.SelectionPredicate) *watchChan { + wc := &watchChan{ + watcher: w, + key: key, + initialRev: rev, + recursive: recursive, + progressNotify: progressNotify, + internalPred: pred, + incomingEventChan: make(chan *event, incomingBufSize), + resultChan: make(chan watch.Event, outgoingBufSize), + errChan: make(chan error, 1), + } + if pred.Empty() { + // The filter doesn't filter out any object. + wc.internalPred = storage.Everything + } + + // The etcd server waits until it cannot find a leader for 3 election + // timeouts to cancel existing streams. 3 is currently a hard coded + // constant. The election timeout defaults to 1000ms. If the cluster is + // healthy, when the leader is stopped, the leadership transfer should be + // smooth. (leader transfers its leadership before stopping). If leader is + // hard killed, other servers will take an election timeout to realize + // leader lost and start campaign. + wc.ctx, wc.cancel = context.WithCancel(clientv3.WithRequireLeader(ctx)) + return wc +} + +func (wc *watchChan) run() { + watchClosedCh := make(chan struct{}) + go wc.startWatching(watchClosedCh) + + var resultChanWG sync.WaitGroup + resultChanWG.Add(1) + go wc.processEvent(&resultChanWG) + + select { + case err := <-wc.errChan: + if err == context.Canceled { + break + } + errResult := transformErrorToEvent(err) + if errResult != nil { + // error result is guaranteed to be received by user before closing ResultChan. + select { + case wc.resultChan <- *errResult: + case <-wc.ctx.Done(): // user has given up all results + } + } + case <-watchClosedCh: + case <-wc.ctx.Done(): // user cancel + } + + // We use wc.ctx to reap all goroutines. Under whatever condition, we should stop them all. + // It's fine to double cancel. + wc.cancel() + + // we need to wait until resultChan wouldn't be used anymore + resultChanWG.Wait() + close(wc.resultChan) +} + +func (wc *watchChan) Stop() { + wc.cancel() +} + +func (wc *watchChan) ResultChan() <-chan watch.Event { + return wc.resultChan +} + +// sync tries to retrieve existing data and send them to process. +// The revision to watch will be set to the revision in response. +// All events sent will have isCreated=true +func (wc *watchChan) sync() error { + opts := []clientv3.OpOption{} + if wc.recursive { + opts = append(opts, clientv3.WithPrefix()) + } + getResp, err := wc.watcher.client.Get(wc.ctx, wc.key, opts...) + if err != nil { + return err + } + wc.initialRev = getResp.Header.Revision + for _, kv := range getResp.Kvs { + wc.sendEvent(parseKV(kv)) + } + return nil +} + +// logWatchChannelErr checks whether the error is about mvcc revision compaction which is regarded as warning +func logWatchChannelErr(err error) { + if !strings.Contains(err.Error(), "mvcc: required revision has been compacted") { + klog.Errorf("watch chan error: %v", err) + } else { + klog.Warningf("watch chan error: %v", err) + } +} + +// startWatching does: +// - get current objects if initialRev=0; set initialRev to current rev +// - watch on given key and send events to process. +func (wc *watchChan) startWatching(watchClosedCh chan struct{}) { + if wc.initialRev == 0 { + if err := wc.sync(); err != nil { + klog.Errorf("failed to sync with latest state: %v", err) + wc.sendError(err) + return + } + } + opts := []clientv3.OpOption{clientv3.WithRev(wc.initialRev + 1), clientv3.WithPrevKV()} + if wc.recursive { + opts = append(opts, clientv3.WithPrefix()) + } + if wc.progressNotify { + opts = append(opts, clientv3.WithProgressNotify()) + } + wch := wc.watcher.client.Watch(wc.ctx, wc.key, opts...) + for wres := range wch { + if wres.Err() != nil { + err := wres.Err() + // If there is an error on server (e.g. compaction), the channel will return it before closed. + logWatchChannelErr(err) + wc.sendError(err) + return + } + if wres.IsProgressNotify() { + wc.sendEvent(progressNotifyEvent(wres.Header.GetRevision())) + metrics.RecordEtcdBookmark(wc.watcher.objectType) + continue + } + + for _, e := range wres.Events { + parsedEvent, err := parseEvent(e) + if err != nil { + logWatchChannelErr(err) + wc.sendError(err) + return + } + wc.sendEvent(parsedEvent) + } + } + // When we come to this point, it's only possible that client side ends the watch. + // e.g. cancel the context, close the client. + // If this watch chan is broken and context isn't cancelled, other goroutines will still hang. + // We should notify the main thread that this goroutine has exited. + close(watchClosedCh) +} + +// processEvent processes events from etcd watcher and sends results to resultChan. +func (wc *watchChan) processEvent(wg *sync.WaitGroup) { + defer wg.Done() + + for { + select { + case e := <-wc.incomingEventChan: + res := wc.transform(e) + if res == nil { + continue + } + if len(wc.resultChan) == outgoingBufSize { + klog.V(3).InfoS("Fast watcher, slow processing. Probably caused by slow dispatching events to watchers", "outgoingEvents", outgoingBufSize) + } + // If user couldn't receive results fast enough, we also block incoming events from watcher. + // Because storing events in local will cause more memory usage. + // The worst case would be closing the fast watcher. + select { + case wc.resultChan <- *res: + case <-wc.ctx.Done(): + return + } + case <-wc.ctx.Done(): + return + } + } +} + +func (wc *watchChan) filter(obj runtime.Object) bool { + if wc.internalPred.Empty() { + return true + } + matched, err := wc.internalPred.Matches(obj) + return err == nil && matched +} + +func (wc *watchChan) acceptAll() bool { + return wc.internalPred.Empty() +} + +// transform transforms an event into a result for user if not filtered. +func (wc *watchChan) transform(e *event) (res *watch.Event) { + curObj, oldObj, err := wc.prepareObjs(e) + if err != nil { + klog.Errorf("failed to prepare current and previous objects: %v", err) + wc.sendError(err) + return nil + } + + switch { + case e.isProgressNotify: + if wc.watcher.newFunc == nil { + return nil + } + object := wc.watcher.newFunc() + if err := wc.watcher.versioner.UpdateObject(object, uint64(e.rev)); err != nil { + klog.Errorf("failed to propagate object version: %v", err) + return nil + } + res = &watch.Event{ + Type: watch.Bookmark, + Object: object, + } + case e.isDeleted: + if !wc.filter(oldObj) { + return nil + } + res = &watch.Event{ + Type: watch.Deleted, + Object: oldObj, + } + case e.isCreated: + if !wc.filter(curObj) { + return nil + } + res = &watch.Event{ + Type: watch.Added, + Object: curObj, + } + default: + if wc.acceptAll() { + res = &watch.Event{ + Type: watch.Modified, + Object: curObj, + } + return res + } + curObjPasses := wc.filter(curObj) + oldObjPasses := wc.filter(oldObj) + switch { + case curObjPasses && oldObjPasses: + res = &watch.Event{ + Type: watch.Modified, + Object: curObj, + } + case curObjPasses && !oldObjPasses: + res = &watch.Event{ + Type: watch.Added, + Object: curObj, + } + case !curObjPasses && oldObjPasses: + res = &watch.Event{ + Type: watch.Deleted, + Object: oldObj, + } + } + } + return res +} + +func transformErrorToEvent(err error) *watch.Event { + err = interpretWatchError(err) + if _, ok := err.(apierrors.APIStatus); !ok { + err = apierrors.NewInternalError(err) + } + status := err.(apierrors.APIStatus).Status() + return &watch.Event{ + Type: watch.Error, + Object: &status, + } +} + +func (wc *watchChan) sendError(err error) { + select { + case wc.errChan <- err: + case <-wc.ctx.Done(): + } +} + +func (wc *watchChan) sendEvent(e *event) { + if len(wc.incomingEventChan) == incomingBufSize { + klog.V(3).InfoS("Fast watcher, slow processing. Probably caused by slow decoding, user not receiving fast, or other processing logic", "incomingEvents", incomingBufSize) + } + select { + case wc.incomingEventChan <- e: + case <-wc.ctx.Done(): + } +} + +func (wc *watchChan) prepareObjs(e *event) (curObj runtime.Object, oldObj runtime.Object, err error) { + if e.isProgressNotify { + // progressNotify events doesn't contain neither current nor previous object version, + return nil, nil, nil + } + + if !e.isDeleted { + data, _, err := wc.watcher.transformer.TransformFromStorage(e.value, authenticatedDataString(e.key)) + if err != nil { + return nil, nil, err + } + curObj, err = decodeObj(wc.watcher.codec, wc.watcher.versioner, data, e.rev) + if err != nil { + return nil, nil, err + } + } + // We need to decode prevValue, only if this is deletion event or + // the underlying filter doesn't accept all objects (otherwise we + // know that the filter for previous object will return true and + // we need the object only to compute whether it was filtered out + // before). + if len(e.prevValue) > 0 && (e.isDeleted || !wc.acceptAll()) { + data, _, err := wc.watcher.transformer.TransformFromStorage(e.prevValue, authenticatedDataString(e.key)) + if err != nil { + return nil, nil, err + } + // Note that this sends the *old* object with the etcd revision for the time at + // which it gets deleted. + oldObj, err = decodeObj(wc.watcher.codec, wc.watcher.versioner, data, e.rev) + if err != nil { + return nil, nil, err + } + } + return curObj, oldObj, nil +} + +func decodeObj(codec runtime.Codec, versioner storage.Versioner, data []byte, rev int64) (_ runtime.Object, err error) { + obj, err := runtime.Decode(codec, []byte(data)) + if err != nil { + if fatalOnDecodeError { + // catch watch decode error iff we caused it on + // purpose during a unit test + defer testingDeferOnDecodeError() + // we are running in a test environment and thus an + // error here is due to a coder mistake if the defer + // does not catch it + panic(err) + } + return nil, err + } + // ensure resource version is set on the object we load from etcd + if err := versioner.UpdateObject(obj, uint64(rev)); err != nil { + return nil, fmt.Errorf("failure to version api object (%d) %#v: %v", rev, obj, err) + } + return obj, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/interfaces.go b/vendor/k8s.io/apiserver/pkg/storage/interfaces.go new file mode 100644 index 000000000..01f9132f5 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/interfaces.go @@ -0,0 +1,275 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" +) + +// Versioner abstracts setting and retrieving metadata fields from database response +// onto the object ot list. It is required to maintain storage invariants - updating an +// object twice with the same data except for the ResourceVersion and SelfLink must be +// a no-op. A resourceVersion of type uint64 is a 'raw' resourceVersion, +// intended to be sent directly to or from the backend. A resourceVersion of +// type string is a 'safe' resourceVersion, intended for consumption by users. +type Versioner interface { + // UpdateObject sets storage metadata into an API object. Returns an error if the object + // cannot be updated correctly. May return nil if the requested object does not need metadata + // from database. + UpdateObject(obj runtime.Object, resourceVersion uint64) error + // UpdateList sets the resource version into an API list object. Returns an error if the object + // cannot be updated correctly. May return nil if the requested object does not need metadata from + // database. continueValue is optional and indicates that more results are available if the client + // passes that value to the server in a subsequent call. remainingItemCount indicates the number + // of remaining objects if the list is partial. The remainingItemCount field is omitted during + // serialization if it is set to nil. + UpdateList(obj runtime.Object, resourceVersion uint64, continueValue string, remainingItemCount *int64) error + // PrepareObjectForStorage should set SelfLink and ResourceVersion to the empty value. Should + // return an error if the specified object cannot be updated. + PrepareObjectForStorage(obj runtime.Object) error + // ObjectResourceVersion returns the resource version (for persistence) of the specified object. + // Should return an error if the specified object does not have a persistable version. + ObjectResourceVersion(obj runtime.Object) (uint64, error) + + // ParseResourceVersion takes a resource version argument and + // converts it to the storage backend. For watch we should pass to helper.Watch(). + // Because resourceVersion is an opaque value, the default watch + // behavior for non-zero watch is to watch the next value (if you pass + // "1", you will see updates from "2" onwards). + ParseResourceVersion(resourceVersion string) (uint64, error) +} + +// ResponseMeta contains information about the database metadata that is associated with +// an object. It abstracts the actual underlying objects to prevent coupling with concrete +// database and to improve testability. +type ResponseMeta struct { + // TTL is the time to live of the node that contained the returned object. It may be + // zero or negative in some cases (objects may be expired after the requested + // expiration time due to server lag). + TTL int64 + // The resource version of the node that contained the returned object. + ResourceVersion uint64 +} + +// IndexerFunc is a function that for a given object computes +// for a particular . +type IndexerFunc func(obj runtime.Object) string + +// IndexerFuncs is a mapping from to function that +// for a given object computes . +type IndexerFuncs map[string]IndexerFunc + +// Everything accepts all objects. +var Everything = SelectionPredicate{ + Label: labels.Everything(), + Field: fields.Everything(), +} + +// MatchValue defines a pair (, ). +type MatchValue struct { + IndexName string + Value string +} + +// Pass an UpdateFunc to Interface.GuaranteedUpdate to make an update +// that is guaranteed to succeed. +// See the comment for GuaranteedUpdate for more details. +type UpdateFunc func(input runtime.Object, res ResponseMeta) (output runtime.Object, ttl *uint64, err error) + +// ValidateObjectFunc is a function to act on a given object. An error may be returned +// if the hook cannot be completed. The function may NOT transform the provided +// object. +type ValidateObjectFunc func(ctx context.Context, obj runtime.Object) error + +// ValidateAllObjectFunc is a "admit everything" instance of ValidateObjectFunc. +func ValidateAllObjectFunc(ctx context.Context, obj runtime.Object) error { + return nil +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions struct { + // Specifies the target UID. + // +optional + UID *types.UID `json:"uid,omitempty"` + // Specifies the target ResourceVersion + // +optional + ResourceVersion *string `json:"resourceVersion,omitempty"` +} + +// NewUIDPreconditions returns a Preconditions with UID set. +func NewUIDPreconditions(uid string) *Preconditions { + u := types.UID(uid) + return &Preconditions{UID: &u} +} + +func (p *Preconditions) Check(key string, obj runtime.Object) error { + if p == nil { + return nil + } + objMeta, err := meta.Accessor(obj) + if err != nil { + return NewInternalErrorf( + "can't enforce preconditions %v on un-introspectable object %v, got error: %v", + *p, + obj, + err) + } + if p.UID != nil && *p.UID != objMeta.GetUID() { + err := fmt.Sprintf( + "Precondition failed: UID in precondition: %v, UID in object meta: %v", + *p.UID, + objMeta.GetUID()) + return NewInvalidObjError(key, err) + } + if p.ResourceVersion != nil && *p.ResourceVersion != objMeta.GetResourceVersion() { + err := fmt.Sprintf( + "Precondition failed: ResourceVersion in precondition: %v, ResourceVersion in object meta: %v", + *p.ResourceVersion, + objMeta.GetResourceVersion()) + return NewInvalidObjError(key, err) + } + return nil +} + +// Interface offers a common interface for object marshaling/unmarshaling operations and +// hides all the storage-related operations behind it. +type Interface interface { + // Returns Versioner associated with this interface. + Versioner() Versioner + + // Create adds a new object at a key unless it already exists. 'ttl' is time-to-live + // in seconds (0 means forever). If no error is returned and out is not nil, out will be + // set to the read value from database. + Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error + + // Delete removes the specified key and returns the value that existed at that spot. + // If key didn't exist, it will return NotFound storage error. + Delete(ctx context.Context, key string, out runtime.Object, preconditions *Preconditions, validateDeletion ValidateObjectFunc) error + + // Watch begins watching the specified key. Events are decoded into API objects, + // and any items selected by 'p' are sent down to returned watch.Interface. + // resourceVersion may be used to specify what version to begin watching, + // which should be the current resourceVersion, and no longer rv+1 + // (e.g. reconnecting without missing any updates). + // If resource version is "0", this interface will get current object at given key + // and send it in an "ADDED" event, before watch starts. + Watch(ctx context.Context, key string, opts ListOptions) (watch.Interface, error) + + // WatchList begins watching the specified key's items. Items are decoded into API + // objects and any item selected by 'p' are sent down to returned watch.Interface. + // resourceVersion may be used to specify what version to begin watching, + // which should be the current resourceVersion, and no longer rv+1 + // (e.g. reconnecting without missing any updates). + // If resource version is "0", this interface will list current objects directory defined by key + // and send them in "ADDED" events, before watch starts. + WatchList(ctx context.Context, key string, opts ListOptions) (watch.Interface, error) + + // Get unmarshals json found at key into objPtr. On a not found error, will either + // return a zero object of the requested type, or an error, depending on 'opts.ignoreNotFound'. + // Treats empty responses and nil response nodes exactly like a not found error. + // The returned contents may be delayed, but it is guaranteed that they will + // match 'opts.ResourceVersion' according 'opts.ResourceVersionMatch'. + Get(ctx context.Context, key string, opts GetOptions, objPtr runtime.Object) error + + // GetToList unmarshals json found at key and opaque it into *List api object + // (an object that satisfies the runtime.IsList definition). + // The returned contents may be delayed, but it is guaranteed that they will + // match 'opts.ResourceVersion' according 'opts.ResourceVersionMatch'. + GetToList(ctx context.Context, key string, opts ListOptions, listObj runtime.Object) error + + // List unmarshalls jsons found at directory defined by key and opaque them + // into *List api object (an object that satisfies runtime.IsList definition). + // The returned contents may be delayed, but it is guaranteed that they will + // match 'opts.ResourceVersion' according 'opts.ResourceVersionMatch'. + List(ctx context.Context, key string, opts ListOptions, listObj runtime.Object) error + + // GuaranteedUpdate keeps calling 'tryUpdate()' to update key 'key' (of type 'ptrToType') + // retrying the update until success if there is index conflict. + // Note that object passed to tryUpdate may change across invocations of tryUpdate() if + // other writers are simultaneously updating it, so tryUpdate() needs to take into account + // the current contents of the object when deciding how the update object should look. + // If the key doesn't exist, it will return NotFound storage error if ignoreNotFound=false + // or zero value in 'ptrToType' parameter otherwise. + // If the object to update has the same value as previous, it won't do any update + // but will return the object in 'ptrToType' parameter. + // If 'suggestion' is non-nil, it can be used as a suggestion about the current version + // of the object to avoid read operation from storage to get it. However, the + // implementations have to retry in case suggestion is stale. + // + // Example: + // + // s := /* implementation of Interface */ + // err := s.GuaranteedUpdate( + // "myKey", &MyType{}, true, + // func(input runtime.Object, res ResponseMeta) (runtime.Object, *uint64, error) { + // // Before each invocation of the user defined function, "input" is reset to + // // current contents for "myKey" in database. + // curr := input.(*MyType) // Guaranteed to succeed. + // + // // Make the modification + // curr.Counter++ + // + // // Return the modified object - return an error to stop iterating. Return + // // a uint64 to alter the TTL on the object, or nil to keep it the same value. + // return cur, nil, nil + // }, + // ) + GuaranteedUpdate( + ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool, + precondtions *Preconditions, tryUpdate UpdateFunc, suggestion runtime.Object) error + + // Count returns number of different entries under the key (generally being path prefix). + Count(key string) (int64, error) +} + +// GetOptions provides the options that may be provided for storage get operations. +type GetOptions struct { + // IgnoreNotFound determines what is returned if the requested object is not found. If + // true, a zero object is returned. If false, an error is returned. + IgnoreNotFound bool + // ResourceVersion provides a resource version constraint to apply to the get operation + // as a "not older than" constraint: the result contains data at least as new as the provided + // ResourceVersion. The newest available data is preferred, but any data not older than this + // ResourceVersion may be served. + ResourceVersion string +} + +// ListOptions provides the options that may be provided for storage list operations. +type ListOptions struct { + // ResourceVersion provides a resource version constraint to apply to the list operation + // as a "not older than" constraint: the result contains data at least as new as the provided + // ResourceVersion. The newest available data is preferred, but any data not older than this + // ResourceVersion may be served. + ResourceVersion string + // ResourceVersionMatch provides the rule for how the resource version constraint applies. If set + // to the default value "" the legacy resource version semantic apply. + ResourceVersionMatch metav1.ResourceVersionMatch + // Predicate provides the selection rules for the list operation. + Predicate SelectionPredicate + // ProgressNotify determines whether storage-originated bookmark (progress notify) events should + // be delivered to the users. The option is ignored for non-watch requests. + ProgressNotify bool +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/names/generate.go b/vendor/k8s.io/apiserver/pkg/storage/names/generate.go new file mode 100644 index 000000000..aad9a07f9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/names/generate.go @@ -0,0 +1,54 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package names + +import ( + "fmt" + + utilrand "k8s.io/apimachinery/pkg/util/rand" +) + +// NameGenerator generates names for objects. Some backends may have more information +// available to guide selection of new names and this interface hides those details. +type NameGenerator interface { + // GenerateName generates a valid name from the base name, adding a random suffix to the + // the base. If base is valid, the returned name must also be valid. The generator is + // responsible for knowing the maximum valid name length. + GenerateName(base string) string +} + +// simpleNameGenerator generates random names. +type simpleNameGenerator struct{} + +// SimpleNameGenerator is a generator that returns the name plus a random suffix of five alphanumerics +// when a name is requested. The string is guaranteed to not exceed the length of a standard Kubernetes +// name (63 characters) +var SimpleNameGenerator NameGenerator = simpleNameGenerator{} + +const ( + // TODO: make this flexible for non-core resources with alternate naming rules. + maxNameLength = 63 + randomLength = 5 + maxGeneratedNameLength = maxNameLength - randomLength +) + +func (simpleNameGenerator) GenerateName(base string) string { + if len(base) > maxGeneratedNameLength { + base = base[:maxGeneratedNameLength] + } + return fmt.Sprintf("%s%s", base, utilrand.String(randomLength)) +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/selection_predicate.go b/vendor/k8s.io/apiserver/pkg/storage/selection_predicate.go new file mode 100644 index 000000000..7370518e3 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/selection_predicate.go @@ -0,0 +1,159 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" +) + +// AttrFunc returns label and field sets and the uninitialized flag for List or Watch to match. +// In any failure to parse given object, it returns error. +type AttrFunc func(obj runtime.Object) (labels.Set, fields.Set, error) + +// FieldMutationFunc allows the mutation of the field selection fields. It is mutating to +// avoid the extra allocation on this common path +type FieldMutationFunc func(obj runtime.Object, fieldSet fields.Set) error + +func DefaultClusterScopedAttr(obj runtime.Object) (labels.Set, fields.Set, error) { + metadata, err := meta.Accessor(obj) + if err != nil { + return nil, nil, err + } + fieldSet := fields.Set{ + "metadata.name": metadata.GetName(), + } + + return labels.Set(metadata.GetLabels()), fieldSet, nil +} + +func DefaultNamespaceScopedAttr(obj runtime.Object) (labels.Set, fields.Set, error) { + metadata, err := meta.Accessor(obj) + if err != nil { + return nil, nil, err + } + fieldSet := fields.Set{ + "metadata.name": metadata.GetName(), + "metadata.namespace": metadata.GetNamespace(), + } + + return labels.Set(metadata.GetLabels()), fieldSet, nil +} + +func (f AttrFunc) WithFieldMutation(fieldMutator FieldMutationFunc) AttrFunc { + return func(obj runtime.Object) (labels.Set, fields.Set, error) { + labelSet, fieldSet, err := f(obj) + if err != nil { + return nil, nil, err + } + if err := fieldMutator(obj, fieldSet); err != nil { + return nil, nil, err + } + return labelSet, fieldSet, nil + } +} + +// SelectionPredicate is used to represent the way to select objects from api storage. +type SelectionPredicate struct { + Label labels.Selector + Field fields.Selector + GetAttrs AttrFunc + IndexLabels []string + IndexFields []string + Limit int64 + Continue string + AllowWatchBookmarks bool +} + +// Matches returns true if the given object's labels and fields (as +// returned by s.GetAttrs) match s.Label and s.Field. An error is +// returned if s.GetAttrs fails. +func (s *SelectionPredicate) Matches(obj runtime.Object) (bool, error) { + if s.Empty() { + return true, nil + } + labels, fields, err := s.GetAttrs(obj) + if err != nil { + return false, err + } + matched := s.Label.Matches(labels) + if matched && s.Field != nil { + matched = matched && s.Field.Matches(fields) + } + return matched, nil +} + +// MatchesObjectAttributes returns true if the given labels and fields +// match s.Label and s.Field. +func (s *SelectionPredicate) MatchesObjectAttributes(l labels.Set, f fields.Set) bool { + if s.Label.Empty() && s.Field.Empty() { + return true + } + matched := s.Label.Matches(l) + if matched && s.Field != nil { + matched = (matched && s.Field.Matches(f)) + } + return matched +} + +// MatchesSingle will return (name, true) if and only if s.Field matches on the object's +// name. +func (s *SelectionPredicate) MatchesSingle() (string, bool) { + if len(s.Continue) > 0 { + return "", false + } + // TODO: should be namespace.name + if name, ok := s.Field.RequiresExactMatch("metadata.name"); ok { + return name, true + } + return "", false +} + +// Empty returns true if the predicate performs no filtering. +func (s *SelectionPredicate) Empty() bool { + return s.Label.Empty() && s.Field.Empty() +} + +// For any index defined by IndexFields, if a matcher can match only (a subset) +// of objects that return for a given index, a pair (, ) +// wil be returned. +func (s *SelectionPredicate) MatcherIndex() []MatchValue { + var result []MatchValue + for _, field := range s.IndexFields { + if value, ok := s.Field.RequiresExactMatch(field); ok { + result = append(result, MatchValue{IndexName: FieldIndex(field), Value: value}) + } + } + for _, label := range s.IndexLabels { + if value, ok := s.Label.RequiresExactMatch(label); ok { + result = append(result, MatchValue{IndexName: LabelIndex(label), Value: value}) + } + } + return result +} + +// LabelIndex add prefix for label index. +func LabelIndex(label string) string { + return "l:" + label +} + +// FiledIndex add prefix for field index. +func FieldIndex(field string) string { + return "f:" + field +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/OWNERS b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/OWNERS new file mode 100644 index 000000000..dbef228c1 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- lavalamp +- smarterclayton +- wojtek-t +- timothysc +- hongchaodeng diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go new file mode 100644 index 000000000..af94efcea --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go @@ -0,0 +1,91 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storagebackend + +import ( + "time" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/server/egressselector" + "k8s.io/apiserver/pkg/storage/value" +) + +const ( + StorageTypeUnset = "" + StorageTypeETCD3 = "etcd3" + + DefaultCompactInterval = 5 * time.Minute + DefaultDBMetricPollInterval = 30 * time.Second + DefaultHealthcheckTimeout = 2 * time.Second +) + +// TransportConfig holds all connection related info, i.e. equal TransportConfig means equal servers we talk to. +type TransportConfig struct { + // ServerList is the list of storage servers to connect with. + ServerList []string + // TLS credentials + KeyFile string + CertFile string + TrustedCAFile string + // function to determine the egress dialer. (i.e. konnectivity server dialer) + EgressLookup egressselector.Lookup +} + +// Config is configuration for creating a storage backend. +type Config struct { + // Type defines the type of storage backend. Default ("") is "etcd3". + Type string + // Prefix is the prefix to all keys passed to storage.Interface methods. + Prefix string + // Transport holds all connection related info, i.e. equal TransportConfig means equal servers we talk to. + Transport TransportConfig + // Paging indicates whether the server implementation should allow paging (if it is + // supported). This is generally configured by feature gating, or by a specific + // resource type not wishing to allow paging, and is not intended for end users to + // set. + Paging bool + + Codec runtime.Codec + // EncodeVersioner is the same groupVersioner used to build the + // storage encoder. Given a list of kinds the input object might belong + // to, the EncodeVersioner outputs the gvk the object will be + // converted to before persisted in etcd. + EncodeVersioner runtime.GroupVersioner + // Transformer allows the value to be transformed prior to persisting into etcd. + Transformer value.Transformer + + // CompactionInterval is an interval of requesting compaction from apiserver. + // If the value is 0, no compaction will be issued. + CompactionInterval time.Duration + // CountMetricPollPeriod specifies how often should count metric be updated + CountMetricPollPeriod time.Duration + // DBMetricPollInterval specifies how often should storage backend metric be updated. + DBMetricPollInterval time.Duration + // HealthcheckTimeout specifies the timeout used when checking health + HealthcheckTimeout time.Duration +} + +func NewDefaultConfig(prefix string, codec runtime.Codec) *Config { + return &Config{ + Paging: true, + Prefix: prefix, + Codec: codec, + CompactionInterval: DefaultCompactInterval, + DBMetricPollInterval: DefaultDBMetricPollInterval, + HealthcheckTimeout: DefaultHealthcheckTimeout, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go new file mode 100644 index 000000000..9a1618df8 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go @@ -0,0 +1,291 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package factory + +import ( + "context" + "fmt" + "net" + "net/url" + "path" + "sync" + "sync/atomic" + "time" + + grpcprom "github.com/grpc-ecosystem/go-grpc-prometheus" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/pkg/transport" + "google.golang.org/grpc" + + "k8s.io/apimachinery/pkg/runtime" + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/server/egressselector" + "k8s.io/apiserver/pkg/storage" + "k8s.io/apiserver/pkg/storage/etcd3" + "k8s.io/apiserver/pkg/storage/etcd3/metrics" + "k8s.io/apiserver/pkg/storage/storagebackend" + "k8s.io/apiserver/pkg/storage/value" + "k8s.io/component-base/metrics/legacyregistry" + "k8s.io/klog/v2" +) + +const ( + // The short keepalive timeout and interval have been chosen to aggressively + // detect a failed etcd server without introducing much overhead. + keepaliveTime = 30 * time.Second + keepaliveTimeout = 10 * time.Second + + // dialTimeout is the timeout for failing to establish a connection. + // It is set to 20 seconds as times shorter than that will cause TLS connections to fail + // on heavily loaded arm64 CPUs (issue #64649) + dialTimeout = 20 * time.Second + + dbMetricsMonitorJitter = 0.5 +) + +func init() { + // grpcprom auto-registers (via an init function) their client metrics, since we are opting out of + // using the global prometheus registry and using our own wrapped global registry, + // we need to explicitly register these metrics to our global registry here. + // For reference: https://github.com/kubernetes/kubernetes/pull/81387 + legacyregistry.RawMustRegister(grpcprom.DefaultClientMetrics) + dbMetricsMonitors = make(map[string]struct{}) +} + +func newETCD3HealthCheck(c storagebackend.Config) (func() error, error) { + // constructing the etcd v3 client blocks and times out if etcd is not available. + // retry in a loop in the background until we successfully create the client, storing the client or error encountered + + clientValue := &atomic.Value{} + + clientErrMsg := &atomic.Value{} + clientErrMsg.Store("etcd client connection not yet established") + + go wait.PollUntil(time.Second, func() (bool, error) { + client, err := newETCD3Client(c.Transport) + if err != nil { + clientErrMsg.Store(err.Error()) + return false, nil + } + clientValue.Store(client) + clientErrMsg.Store("") + return true, nil + }, wait.NeverStop) + + return func() error { + if errMsg := clientErrMsg.Load().(string); len(errMsg) > 0 { + return fmt.Errorf(errMsg) + } + client := clientValue.Load().(*clientv3.Client) + healthcheckTimeout := storagebackend.DefaultHealthcheckTimeout + if c.HealthcheckTimeout != time.Duration(0) { + healthcheckTimeout = c.HealthcheckTimeout + } + ctx, cancel := context.WithTimeout(context.Background(), healthcheckTimeout) + defer cancel() + // See https://github.com/etcd-io/etcd/blob/c57f8b3af865d1b531b979889c602ba14377420e/etcdctl/ctlv3/command/ep_command.go#L118 + _, err := client.Get(ctx, path.Join("/", c.Prefix, "health")) + if err == nil { + return nil + } + return fmt.Errorf("error getting data from etcd: %v", err) + }, nil +} + +func newETCD3Client(c storagebackend.TransportConfig) (*clientv3.Client, error) { + tlsInfo := transport.TLSInfo{ + CertFile: c.CertFile, + KeyFile: c.KeyFile, + TrustedCAFile: c.TrustedCAFile, + } + tlsConfig, err := tlsInfo.ClientConfig() + if err != nil { + return nil, err + } + // NOTE: Client relies on nil tlsConfig + // for non-secure connections, update the implicit variable + if len(c.CertFile) == 0 && len(c.KeyFile) == 0 && len(c.TrustedCAFile) == 0 { + tlsConfig = nil + } + networkContext := egressselector.Etcd.AsNetworkContext() + var egressDialer utilnet.DialFunc + if c.EgressLookup != nil { + egressDialer, err = c.EgressLookup(networkContext) + if err != nil { + return nil, err + } + } + dialOptions := []grpc.DialOption{ + grpc.WithBlock(), // block until the underlying connection is up + grpc.WithUnaryInterceptor(grpcprom.UnaryClientInterceptor), + grpc.WithStreamInterceptor(grpcprom.StreamClientInterceptor), + } + if egressDialer != nil { + dialer := func(ctx context.Context, addr string) (net.Conn, error) { + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + return egressDialer(ctx, "tcp", u.Host) + } + dialOptions = append(dialOptions, grpc.WithContextDialer(dialer)) + } + cfg := clientv3.Config{ + DialTimeout: dialTimeout, + DialKeepAliveTime: keepaliveTime, + DialKeepAliveTimeout: keepaliveTimeout, + DialOptions: dialOptions, + Endpoints: c.ServerList, + TLS: tlsConfig, + } + + return clientv3.New(cfg) +} + +type runningCompactor struct { + interval time.Duration + cancel context.CancelFunc + client *clientv3.Client + refs int +} + +var ( + // compactorsMu guards access to compactors map + compactorsMu sync.Mutex + compactors = map[string]*runningCompactor{} + // dbMetricsMonitorsMu guards access to dbMetricsMonitors map + dbMetricsMonitorsMu sync.Mutex + dbMetricsMonitors map[string]struct{} +) + +// startCompactorOnce start one compactor per transport. If the interval get smaller on repeated calls, the +// compactor is replaced. A destroy func is returned. If all destroy funcs with the same transport are called, +// the compactor is stopped. +func startCompactorOnce(c storagebackend.TransportConfig, interval time.Duration) (func(), error) { + compactorsMu.Lock() + defer compactorsMu.Unlock() + + key := fmt.Sprintf("%v", c) // gives: {[server1 server2] keyFile certFile caFile} + if compactor, foundBefore := compactors[key]; !foundBefore || compactor.interval > interval { + compactorClient, err := newETCD3Client(c) + if err != nil { + return nil, err + } + + if foundBefore { + // replace compactor + compactor.cancel() + compactor.client.Close() + } else { + // start new compactor + compactor = &runningCompactor{} + compactors[key] = compactor + } + + ctx, cancel := context.WithCancel(context.Background()) + + compactor.interval = interval + compactor.cancel = cancel + compactor.client = compactorClient + + etcd3.StartCompactor(ctx, compactorClient, interval) + } + + compactors[key].refs++ + + return func() { + compactorsMu.Lock() + defer compactorsMu.Unlock() + + compactor := compactors[key] + compactor.refs-- + if compactor.refs == 0 { + compactor.cancel() + compactor.client.Close() + delete(compactors, key) + } + }, nil +} + +func newETCD3Storage(c storagebackend.Config, newFunc func() runtime.Object) (storage.Interface, DestroyFunc, error) { + stopCompactor, err := startCompactorOnce(c.Transport, c.CompactionInterval) + if err != nil { + return nil, nil, err + } + + client, err := newETCD3Client(c.Transport) + if err != nil { + stopCompactor() + return nil, nil, err + } + + stopDBSizeMonitor, err := startDBSizeMonitorPerEndpoint(client, c.DBMetricPollInterval) + if err != nil { + return nil, nil, err + } + + var once sync.Once + destroyFunc := func() { + // we know that storage destroy funcs are called multiple times (due to reuse in subresources). + // Hence, we only destroy once. + // TODO: fix duplicated storage destroy calls higher level + once.Do(func() { + stopCompactor() + stopDBSizeMonitor() + client.Close() + }) + } + transformer := c.Transformer + if transformer == nil { + transformer = value.IdentityTransformer + } + return etcd3.New(client, c.Codec, newFunc, c.Prefix, transformer, c.Paging), destroyFunc, nil +} + +// startDBSizeMonitorPerEndpoint starts a loop to monitor etcd database size and update the +// corresponding metric etcd_db_total_size_in_bytes for each etcd server endpoint. +func startDBSizeMonitorPerEndpoint(client *clientv3.Client, interval time.Duration) (func(), error) { + if interval == 0 { + return func() {}, nil + } + dbMetricsMonitorsMu.Lock() + defer dbMetricsMonitorsMu.Unlock() + + ctx, cancel := context.WithCancel(context.Background()) + for _, ep := range client.Endpoints() { + if _, found := dbMetricsMonitors[ep]; found { + continue + } + dbMetricsMonitors[ep] = struct{}{} + endpoint := ep + klog.V(4).Infof("Start monitoring storage db size metric for endpoint %s with polling interval %v", endpoint, interval) + go wait.JitterUntilWithContext(ctx, func(context.Context) { + epStatus, err := client.Maintenance.Status(ctx, endpoint) + if err != nil { + klog.V(4).Infof("Failed to get storage db size for ep %s: %v", endpoint, err) + metrics.UpdateEtcdDbSize(endpoint, -1) + } else { + metrics.UpdateEtcdDbSize(endpoint, epStatus.DbSize) + } + }, interval, dbMetricsMonitorJitter, true) + } + + return func() { + cancel() + }, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go new file mode 100644 index 000000000..1e8a8cdb0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go @@ -0,0 +1,52 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package factory + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/storage" + "k8s.io/apiserver/pkg/storage/storagebackend" +) + +// DestroyFunc is to destroy any resources used by the storage returned in Create() together. +type DestroyFunc func() + +// Create creates a storage backend based on given config. +func Create(c storagebackend.Config, newFunc func() runtime.Object) (storage.Interface, DestroyFunc, error) { + switch c.Type { + case "etcd2": + return nil, nil, fmt.Errorf("%v is no longer a supported storage backend", c.Type) + case storagebackend.StorageTypeUnset, storagebackend.StorageTypeETCD3: + return newETCD3Storage(c, newFunc) + default: + return nil, nil, fmt.Errorf("unknown storage type: %s", c.Type) + } +} + +// CreateHealthCheck creates a healthcheck function based on given config. +func CreateHealthCheck(c storagebackend.Config) (func() error, error) { + switch c.Type { + case "etcd2": + return nil, fmt.Errorf("%v is no longer a supported storage backend", c.Type) + case storagebackend.StorageTypeUnset, storagebackend.StorageTypeETCD3: + return newETCD3HealthCheck(c) + default: + return nil, fmt.Errorf("unknown storage type: %s", c.Type) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/util.go b/vendor/k8s.io/apiserver/pkg/storage/util.go new file mode 100644 index 000000000..9da8d9713 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/util.go @@ -0,0 +1,81 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "fmt" + "sync/atomic" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/validation/path" + "k8s.io/apimachinery/pkg/runtime" +) + +type SimpleUpdateFunc func(runtime.Object) (runtime.Object, error) + +// SimpleUpdateFunc converts SimpleUpdateFunc into UpdateFunc +func SimpleUpdate(fn SimpleUpdateFunc) UpdateFunc { + return func(input runtime.Object, _ ResponseMeta) (runtime.Object, *uint64, error) { + out, err := fn(input) + return out, nil, err + } +} + +func EverythingFunc(runtime.Object) bool { + return true +} + +func NamespaceKeyFunc(prefix string, obj runtime.Object) (string, error) { + meta, err := meta.Accessor(obj) + if err != nil { + return "", err + } + name := meta.GetName() + if msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 { + return "", fmt.Errorf("invalid name: %v", msgs) + } + return prefix + "/" + meta.GetNamespace() + "/" + name, nil +} + +func NoNamespaceKeyFunc(prefix string, obj runtime.Object) (string, error) { + meta, err := meta.Accessor(obj) + if err != nil { + return "", err + } + name := meta.GetName() + if msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 { + return "", fmt.Errorf("invalid name: %v", msgs) + } + return prefix + "/" + name, nil +} + +// HighWaterMark is a thread-safe object for tracking the maximum value seen +// for some quantity. +type HighWaterMark int64 + +// Update returns true if and only if 'current' is the highest value ever seen. +func (hwm *HighWaterMark) Update(current int64) bool { + for { + old := atomic.LoadInt64((*int64)(hwm)) + if current <= old { + return false + } + if atomic.CompareAndSwapInt64((*int64)(hwm), old, current) { + return true + } + } +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/aes/aes.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/aes/aes.go new file mode 100644 index 000000000..daa82f711 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/aes/aes.go @@ -0,0 +1,152 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package aes transforms values for storage at rest using AES-GCM. +package aes + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "errors" + "fmt" + "io" + + "k8s.io/apiserver/pkg/storage/value" +) + +// gcm implements AEAD encryption of the provided values given a cipher.Block algorithm. +// The authenticated data provided as part of the value.Context method must match when the same +// value is set to and loaded from storage. In order to ensure that values cannot be copied by +// an attacker from a location under their control, use characteristics of the storage location +// (such as the etcd key) as part of the authenticated data. +// +// Because this mode requires a generated IV and IV reuse is a known weakness of AES-GCM, keys +// must be rotated before a birthday attack becomes feasible. NIST SP 800-38D +// (http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf) recommends using the same +// key with random 96-bit nonces (the default nonce length) no more than 2^32 times, and +// therefore transformers using this implementation *must* ensure they allow for frequent key +// rotation. Future work should include investigation of AES-GCM-SIV as an alternative to +// random nonces. +type gcm struct { + block cipher.Block +} + +// NewGCMTransformer takes the given block cipher and performs encryption and decryption on the given +// data. +func NewGCMTransformer(block cipher.Block) value.Transformer { + return &gcm{block: block} +} + +func (t *gcm) TransformFromStorage(data []byte, context value.Context) ([]byte, bool, error) { + aead, err := cipher.NewGCM(t.block) + if err != nil { + return nil, false, err + } + nonceSize := aead.NonceSize() + if len(data) < nonceSize { + return nil, false, fmt.Errorf("the stored data was shorter than the required size") + } + result, err := aead.Open(nil, data[:nonceSize], data[nonceSize:], context.AuthenticatedData()) + return result, false, err +} + +func (t *gcm) TransformToStorage(data []byte, context value.Context) ([]byte, error) { + aead, err := cipher.NewGCM(t.block) + if err != nil { + return nil, err + } + nonceSize := aead.NonceSize() + result := make([]byte, nonceSize+aead.Overhead()+len(data)) + n, err := rand.Read(result[:nonceSize]) + if err != nil { + return nil, err + } + if n != nonceSize { + return nil, fmt.Errorf("unable to read sufficient random bytes") + } + cipherText := aead.Seal(result[nonceSize:nonceSize], result[:nonceSize], data, context.AuthenticatedData()) + return result[:nonceSize+len(cipherText)], nil +} + +// cbc implements encryption at rest of the provided values given a cipher.Block algorithm. +type cbc struct { + block cipher.Block +} + +// NewCBCTransformer takes the given block cipher and performs encryption and decryption on the given +// data. +func NewCBCTransformer(block cipher.Block) value.Transformer { + return &cbc{block: block} +} + +var ( + errInvalidBlockSize = fmt.Errorf("the stored data is not a multiple of the block size") + errInvalidPKCS7Data = errors.New("invalid PKCS7 data (empty or not padded)") + errInvalidPKCS7Padding = errors.New("invalid padding on input") +) + +func (t *cbc) TransformFromStorage(data []byte, context value.Context) ([]byte, bool, error) { + blockSize := aes.BlockSize + if len(data) < blockSize { + return nil, false, fmt.Errorf("the stored data was shorter than the required size") + } + iv := data[:blockSize] + data = data[blockSize:] + + if len(data)%blockSize != 0 { + return nil, false, errInvalidBlockSize + } + + result := make([]byte, len(data)) + copy(result, data) + mode := cipher.NewCBCDecrypter(t.block, iv) + mode.CryptBlocks(result, result) + + // remove and verify PKCS#7 padding for CBC + c := result[len(result)-1] + paddingSize := int(c) + size := len(result) - paddingSize + if paddingSize == 0 || paddingSize > len(result) { + return nil, false, errInvalidPKCS7Data + } + for i := 0; i < paddingSize; i++ { + if result[size+i] != c { + return nil, false, errInvalidPKCS7Padding + } + } + + return result[:size], false, nil +} + +func (t *cbc) TransformToStorage(data []byte, context value.Context) ([]byte, error) { + blockSize := aes.BlockSize + paddingSize := blockSize - (len(data) % blockSize) + result := make([]byte, blockSize+len(data)+paddingSize) + iv := result[:blockSize] + if _, err := io.ReadFull(rand.Reader, iv); err != nil { + return nil, fmt.Errorf("unable to read sufficient random bytes") + } + copy(result[blockSize:], data) + + // add PKCS#7 padding for CBC + copy(result[blockSize+len(data):], bytes.Repeat([]byte{byte(paddingSize)}, paddingSize)) + + mode := cipher.NewCBCEncrypter(t.block, iv) + mode.CryptBlocks(result[blockSize:], result[blockSize:]) + return result, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/envelope.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/envelope.go new file mode 100644 index 000000000..20ca3f6c9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/envelope.go @@ -0,0 +1,200 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package envelope transforms values for storage at rest using a Envelope provider +package envelope + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "encoding/base64" + "fmt" + "time" + + "k8s.io/apiserver/pkg/storage/value" + + lru "github.com/hashicorp/golang-lru" + "golang.org/x/crypto/cryptobyte" +) + +func init() { + value.RegisterMetrics() + registerMetrics() +} + +// Service allows encrypting and decrypting data using an external Key Management Service. +type Service interface { + // Decrypt a given bytearray to obtain the original data as bytes. + Decrypt(data []byte) ([]byte, error) + // Encrypt bytes to a ciphertext. + Encrypt(data []byte) ([]byte, error) +} + +type envelopeTransformer struct { + envelopeService Service + + // transformers is a thread-safe LRU cache which caches decrypted DEKs indexed by their encrypted form. + transformers *lru.Cache + + // baseTransformerFunc creates a new transformer for encrypting the data with the DEK. + baseTransformerFunc func(cipher.Block) value.Transformer + + cacheSize int + cacheEnabled bool +} + +// NewEnvelopeTransformer returns a transformer which implements a KEK-DEK based envelope encryption scheme. +// It uses envelopeService to encrypt and decrypt DEKs. Respective DEKs (in encrypted form) are prepended to +// the data items they encrypt. A cache (of size cacheSize) is maintained to store the most recently +// used decrypted DEKs in memory. +func NewEnvelopeTransformer(envelopeService Service, cacheSize int, baseTransformerFunc func(cipher.Block) value.Transformer) (value.Transformer, error) { + var ( + cache *lru.Cache + err error + ) + + if cacheSize > 0 { + cache, err = lru.New(cacheSize) + if err != nil { + return nil, err + } + } + return &envelopeTransformer{ + envelopeService: envelopeService, + transformers: cache, + baseTransformerFunc: baseTransformerFunc, + cacheEnabled: cacheSize > 0, + cacheSize: cacheSize, + }, nil +} + +// TransformFromStorage decrypts data encrypted by this transformer using envelope encryption. +func (t *envelopeTransformer) TransformFromStorage(data []byte, context value.Context) ([]byte, bool, error) { + recordArrival(fromStorageLabel, time.Now()) + + // Read the 16 bit length-of-DEK encoded at the start of the encrypted DEK. 16 bits can + // represent a maximum key length of 65536 bytes. We are using a 256 bit key, whose + // length cannot fit in 8 bits (1 byte). Thus, we use 16 bits (2 bytes) to store the length. + var encKey cryptobyte.String + s := cryptobyte.String(data) + if ok := s.ReadUint16LengthPrefixed(&encKey); !ok { + return nil, false, fmt.Errorf("invalid data encountered by envelope transformer: failed to read uint16 length prefixed data") + } + + encData := []byte(s) + + // Look up the decrypted DEK from cache or Envelope. + transformer := t.getTransformer(encKey) + if transformer == nil { + if t.cacheEnabled { + value.RecordCacheMiss() + } + key, err := t.envelopeService.Decrypt(encKey) + if err != nil { + // Do NOT wrap this err using fmt.Errorf() or similar functions + // because this gRPC status error has useful error code when + // record the metric. + return nil, false, err + } + + transformer, err = t.addTransformer(encKey, key) + if err != nil { + return nil, false, err + } + } + + return transformer.TransformFromStorage(encData, context) +} + +// TransformToStorage encrypts data to be written to disk using envelope encryption. +func (t *envelopeTransformer) TransformToStorage(data []byte, context value.Context) ([]byte, error) { + recordArrival(toStorageLabel, time.Now()) + newKey, err := generateKey(32) + if err != nil { + return nil, err + } + + encKey, err := t.envelopeService.Encrypt(newKey) + if err != nil { + // Do NOT wrap this err using fmt.Errorf() or similar functions + // because this gRPC status error has useful error code when + // record the metric. + return nil, err + } + + transformer, err := t.addTransformer(encKey, newKey) + if err != nil { + return nil, err + } + + result, err := transformer.TransformToStorage(data, context) + if err != nil { + return nil, err + } + // Append the length of the encrypted DEK as the first 2 bytes. + b := cryptobyte.NewBuilder(nil) + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes([]byte(encKey)) + }) + b.AddBytes(result) + + return b.Bytes() +} + +var _ value.Transformer = &envelopeTransformer{} + +// addTransformer inserts a new transformer to the Envelope cache of DEKs for future reads. +func (t *envelopeTransformer) addTransformer(encKey []byte, key []byte) (value.Transformer, error) { + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + transformer := t.baseTransformerFunc(block) + // Use base64 of encKey as the key into the cache because hashicorp/golang-lru + // cannot hash []uint8. + if t.cacheEnabled { + t.transformers.Add(base64.StdEncoding.EncodeToString(encKey), transformer) + dekCacheFillPercent.Set(float64(t.transformers.Len()) / float64(t.cacheSize)) + } + return transformer, nil +} + +// getTransformer fetches the transformer corresponding to encKey from cache, if it exists. +func (t *envelopeTransformer) getTransformer(encKey []byte) value.Transformer { + if !t.cacheEnabled { + return nil + } + + _transformer, found := t.transformers.Get(base64.StdEncoding.EncodeToString(encKey)) + if found { + return _transformer.(value.Transformer) + } + return nil +} + +// generateKey generates a random key using system randomness. +func generateKey(length int) (key []byte, err error) { + defer func(start time.Time) { + value.RecordDataKeyGeneration(start, err) + }(time.Now()) + key = make([]byte, length) + if _, err = rand.Read(key); err != nil { + return nil, err + } + + return key, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/grpc_service.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/grpc_service.go new file mode 100644 index 000000000..7aa5d232f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/grpc_service.go @@ -0,0 +1,181 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package envelope transforms values for storage at rest using a Envelope provider +package envelope + +import ( + "context" + "fmt" + "net" + "net/url" + "strings" + "sync" + "time" + + "k8s.io/klog/v2" + + "google.golang.org/grpc" + + kmsapi "k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1" +) + +const ( + // Now only supported unix domain socket. + unixProtocol = "unix" + + // Current version for the protocol interface definition. + kmsapiVersion = "v1beta1" + + versionErrorf = "KMS provider api version %s is not supported, only %s is supported now" +) + +// The gRPC implementation for envelope.Service. +type gRPCService struct { + kmsClient kmsapi.KeyManagementServiceClient + connection *grpc.ClientConn + callTimeout time.Duration + mux sync.RWMutex + versionChecked bool +} + +// NewGRPCService returns an envelope.Service which use gRPC to communicate the remote KMS provider. +func NewGRPCService(endpoint string, callTimeout time.Duration) (Service, error) { + klog.V(4).Infof("Configure KMS provider with endpoint: %s", endpoint) + + addr, err := parseEndpoint(endpoint) + if err != nil { + return nil, err + } + + s := &gRPCService{callTimeout: callTimeout} + s.connection, err = grpc.Dial( + addr, + grpc.WithInsecure(), + grpc.WithUnaryInterceptor(s.interceptor), + grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), + grpc.WithContextDialer( + func(context.Context, string) (net.Conn, error) { + // Ignoring addr and timeout arguments: + // addr - comes from the closure + c, err := net.DialUnix(unixProtocol, nil, &net.UnixAddr{Name: addr}) + if err != nil { + klog.Errorf("failed to create connection to unix socket: %s, error: %v", addr, err) + } else { + klog.V(4).Infof("Successfully dialed Unix socket %v", addr) + } + return c, err + })) + + if err != nil { + return nil, fmt.Errorf("failed to create connection to %s, error: %v", endpoint, err) + } + + s.kmsClient = kmsapi.NewKeyManagementServiceClient(s.connection) + return s, nil +} + +// Parse the endpoint to extract schema, host or path. +func parseEndpoint(endpoint string) (string, error) { + if len(endpoint) == 0 { + return "", fmt.Errorf("remote KMS provider can't use empty string as endpoint") + } + + u, err := url.Parse(endpoint) + if err != nil { + return "", fmt.Errorf("invalid endpoint %q for remote KMS provider, error: %v", endpoint, err) + } + + if u.Scheme != unixProtocol { + return "", fmt.Errorf("unsupported scheme %q for remote KMS provider", u.Scheme) + } + + // Linux abstract namespace socket - no physical file required + // Warning: Linux Abstract sockets have not concept of ACL (unlike traditional file based sockets). + // However, Linux Abstract sockets are subject to Linux networking namespace, so will only be accessible to + // containers within the same pod (unless host networking is used). + if strings.HasPrefix(u.Path, "/@") { + return strings.TrimPrefix(u.Path, "/"), nil + } + + return u.Path, nil +} + +func (g *gRPCService) checkAPIVersion(ctx context.Context) error { + g.mux.Lock() + defer g.mux.Unlock() + + if g.versionChecked { + return nil + } + + request := &kmsapi.VersionRequest{Version: kmsapiVersion} + response, err := g.kmsClient.Version(ctx, request) + if err != nil { + return fmt.Errorf("failed get version from remote KMS provider: %v", err) + } + if response.Version != kmsapiVersion { + return fmt.Errorf(versionErrorf, response.Version, kmsapiVersion) + } + g.versionChecked = true + + klog.V(4).Infof("Version of KMS provider is %s", response.Version) + return nil +} + +// Decrypt a given data string to obtain the original byte data. +func (g *gRPCService) Decrypt(cipher []byte) ([]byte, error) { + ctx, cancel := context.WithTimeout(context.Background(), g.callTimeout) + defer cancel() + + request := &kmsapi.DecryptRequest{Cipher: cipher, Version: kmsapiVersion} + response, err := g.kmsClient.Decrypt(ctx, request) + if err != nil { + return nil, err + } + return response.Plain, nil +} + +// Encrypt bytes to a string ciphertext. +func (g *gRPCService) Encrypt(plain []byte) ([]byte, error) { + ctx, cancel := context.WithTimeout(context.Background(), g.callTimeout) + defer cancel() + + request := &kmsapi.EncryptRequest{Plain: plain, Version: kmsapiVersion} + response, err := g.kmsClient.Encrypt(ctx, request) + if err != nil { + return nil, err + } + return response.Cipher, nil +} + +func (g *gRPCService) interceptor( + ctx context.Context, + method string, + req interface{}, + reply interface{}, + cc *grpc.ClientConn, + invoker grpc.UnaryInvoker, + opts ...grpc.CallOption, +) error { + if !kmsapi.IsVersionCheckMethod(method) { + if err := g.checkAPIVersion(ctx); err != nil { + return err + } + } + + return invoker(ctx, method, req, reply, cc, opts...) +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics.go new file mode 100644 index 000000000..285ae14be --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics.go @@ -0,0 +1,102 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envelope + +import ( + "sync" + "time" + + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +const ( + namespace = "apiserver" + subsystem = "envelope_encryption" + fromStorageLabel = "from_storage" + toStorageLabel = "to_storage" +) + +/* + * By default, all the following metrics are defined as falling under + * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes) + * + * Promoting the stability level of the metric is a responsibility of the component owner, since it + * involves explicitly acknowledging support for the metric across multiple releases, in accordance with + * the metric stability policy. + */ +var ( + lockLastFromStorage sync.Mutex + lockLastToStorage sync.Mutex + + lastFromStorage time.Time + lastToStorage time.Time + + dekCacheFillPercent = metrics.NewGauge( + &metrics.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "dek_cache_fill_percent", + Help: "Percent of the cache slots currently occupied by cached DEKs.", + StabilityLevel: metrics.ALPHA, + }, + ) + + dekCacheInterArrivals = metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "dek_cache_inter_arrival_time_seconds", + Help: "Time (in seconds) of inter arrival of transformation requests.", + StabilityLevel: metrics.ALPHA, + Buckets: metrics.ExponentialBuckets(60, 2, 10), + }, + []string{"transformation_type"}, + ) +) + +var registerMetricsFunc sync.Once + +func registerMetrics() { + registerMetricsFunc.Do(func() { + legacyregistry.MustRegister(dekCacheFillPercent) + legacyregistry.MustRegister(dekCacheInterArrivals) + }) +} + +func recordArrival(transformationType string, start time.Time) { + switch transformationType { + case fromStorageLabel: + lockLastFromStorage.Lock() + defer lockLastFromStorage.Unlock() + + if lastFromStorage.IsZero() { + lastFromStorage = start + } + dekCacheInterArrivals.WithLabelValues(transformationType).Observe(start.Sub(lastFromStorage).Seconds()) + lastFromStorage = start + case toStorageLabel: + lockLastToStorage.Lock() + defer lockLastToStorage.Unlock() + + if lastToStorage.IsZero() { + lastToStorage = start + } + dekCacheInterArrivals.WithLabelValues(transformationType).Observe(start.Sub(lastToStorage).Seconds()) + lastToStorage = start + } +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.pb.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.pb.go new file mode 100644 index 000000000..0d71bb2ba --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.pb.go @@ -0,0 +1,502 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: service.proto + +package v1beta1 + +import ( + context "context" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type VersionRequest struct { + // Version of the KMS plugin API. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VersionRequest) Reset() { *m = VersionRequest{} } +func (m *VersionRequest) String() string { return proto.CompactTextString(m) } +func (*VersionRequest) ProtoMessage() {} +func (*VersionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a0b84a42fa06f626, []int{0} +} +func (m *VersionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VersionRequest.Unmarshal(m, b) +} +func (m *VersionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VersionRequest.Marshal(b, m, deterministic) +} +func (m *VersionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_VersionRequest.Merge(m, src) +} +func (m *VersionRequest) XXX_Size() int { + return xxx_messageInfo_VersionRequest.Size(m) +} +func (m *VersionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_VersionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_VersionRequest proto.InternalMessageInfo + +func (m *VersionRequest) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +type VersionResponse struct { + // Version of the KMS plugin API. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Name of the KMS provider. + RuntimeName string `protobuf:"bytes,2,opt,name=runtime_name,json=runtimeName,proto3" json:"runtime_name,omitempty"` + // Version of the KMS provider. The string must be semver-compatible. + RuntimeVersion string `protobuf:"bytes,3,opt,name=runtime_version,json=runtimeVersion,proto3" json:"runtime_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VersionResponse) Reset() { *m = VersionResponse{} } +func (m *VersionResponse) String() string { return proto.CompactTextString(m) } +func (*VersionResponse) ProtoMessage() {} +func (*VersionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a0b84a42fa06f626, []int{1} +} +func (m *VersionResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VersionResponse.Unmarshal(m, b) +} +func (m *VersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VersionResponse.Marshal(b, m, deterministic) +} +func (m *VersionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_VersionResponse.Merge(m, src) +} +func (m *VersionResponse) XXX_Size() int { + return xxx_messageInfo_VersionResponse.Size(m) +} +func (m *VersionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_VersionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_VersionResponse proto.InternalMessageInfo + +func (m *VersionResponse) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *VersionResponse) GetRuntimeName() string { + if m != nil { + return m.RuntimeName + } + return "" +} + +func (m *VersionResponse) GetRuntimeVersion() string { + if m != nil { + return m.RuntimeVersion + } + return "" +} + +type DecryptRequest struct { + // Version of the KMS plugin API. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // The data to be decrypted. + Cipher []byte `protobuf:"bytes,2,opt,name=cipher,proto3" json:"cipher,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DecryptRequest) Reset() { *m = DecryptRequest{} } +func (m *DecryptRequest) String() string { return proto.CompactTextString(m) } +func (*DecryptRequest) ProtoMessage() {} +func (*DecryptRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a0b84a42fa06f626, []int{2} +} +func (m *DecryptRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DecryptRequest.Unmarshal(m, b) +} +func (m *DecryptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DecryptRequest.Marshal(b, m, deterministic) +} +func (m *DecryptRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DecryptRequest.Merge(m, src) +} +func (m *DecryptRequest) XXX_Size() int { + return xxx_messageInfo_DecryptRequest.Size(m) +} +func (m *DecryptRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DecryptRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DecryptRequest proto.InternalMessageInfo + +func (m *DecryptRequest) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *DecryptRequest) GetCipher() []byte { + if m != nil { + return m.Cipher + } + return nil +} + +type DecryptResponse struct { + // The decrypted data. + Plain []byte `protobuf:"bytes,1,opt,name=plain,proto3" json:"plain,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DecryptResponse) Reset() { *m = DecryptResponse{} } +func (m *DecryptResponse) String() string { return proto.CompactTextString(m) } +func (*DecryptResponse) ProtoMessage() {} +func (*DecryptResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a0b84a42fa06f626, []int{3} +} +func (m *DecryptResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DecryptResponse.Unmarshal(m, b) +} +func (m *DecryptResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DecryptResponse.Marshal(b, m, deterministic) +} +func (m *DecryptResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DecryptResponse.Merge(m, src) +} +func (m *DecryptResponse) XXX_Size() int { + return xxx_messageInfo_DecryptResponse.Size(m) +} +func (m *DecryptResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DecryptResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DecryptResponse proto.InternalMessageInfo + +func (m *DecryptResponse) GetPlain() []byte { + if m != nil { + return m.Plain + } + return nil +} + +type EncryptRequest struct { + // Version of the KMS plugin API. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // The data to be encrypted. + Plain []byte `protobuf:"bytes,2,opt,name=plain,proto3" json:"plain,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EncryptRequest) Reset() { *m = EncryptRequest{} } +func (m *EncryptRequest) String() string { return proto.CompactTextString(m) } +func (*EncryptRequest) ProtoMessage() {} +func (*EncryptRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a0b84a42fa06f626, []int{4} +} +func (m *EncryptRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EncryptRequest.Unmarshal(m, b) +} +func (m *EncryptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EncryptRequest.Marshal(b, m, deterministic) +} +func (m *EncryptRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_EncryptRequest.Merge(m, src) +} +func (m *EncryptRequest) XXX_Size() int { + return xxx_messageInfo_EncryptRequest.Size(m) +} +func (m *EncryptRequest) XXX_DiscardUnknown() { + xxx_messageInfo_EncryptRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_EncryptRequest proto.InternalMessageInfo + +func (m *EncryptRequest) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *EncryptRequest) GetPlain() []byte { + if m != nil { + return m.Plain + } + return nil +} + +type EncryptResponse struct { + // The encrypted data. + Cipher []byte `protobuf:"bytes,1,opt,name=cipher,proto3" json:"cipher,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EncryptResponse) Reset() { *m = EncryptResponse{} } +func (m *EncryptResponse) String() string { return proto.CompactTextString(m) } +func (*EncryptResponse) ProtoMessage() {} +func (*EncryptResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a0b84a42fa06f626, []int{5} +} +func (m *EncryptResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EncryptResponse.Unmarshal(m, b) +} +func (m *EncryptResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EncryptResponse.Marshal(b, m, deterministic) +} +func (m *EncryptResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_EncryptResponse.Merge(m, src) +} +func (m *EncryptResponse) XXX_Size() int { + return xxx_messageInfo_EncryptResponse.Size(m) +} +func (m *EncryptResponse) XXX_DiscardUnknown() { + xxx_messageInfo_EncryptResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_EncryptResponse proto.InternalMessageInfo + +func (m *EncryptResponse) GetCipher() []byte { + if m != nil { + return m.Cipher + } + return nil +} + +func init() { + proto.RegisterType((*VersionRequest)(nil), "v1beta1.VersionRequest") + proto.RegisterType((*VersionResponse)(nil), "v1beta1.VersionResponse") + proto.RegisterType((*DecryptRequest)(nil), "v1beta1.DecryptRequest") + proto.RegisterType((*DecryptResponse)(nil), "v1beta1.DecryptResponse") + proto.RegisterType((*EncryptRequest)(nil), "v1beta1.EncryptRequest") + proto.RegisterType((*EncryptResponse)(nil), "v1beta1.EncryptResponse") +} + +func init() { proto.RegisterFile("service.proto", fileDescriptor_a0b84a42fa06f626) } + +var fileDescriptor_a0b84a42fa06f626 = []byte{ + // 287 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0xcd, 0x4a, 0xc4, 0x30, + 0x10, 0xde, 0xae, 0xb8, 0xc5, 0xb1, 0xb6, 0x10, 0x16, 0x2d, 0x9e, 0x34, 0x97, 0x55, 0x0f, 0x85, + 0xd5, 0xbb, 0x88, 0xe8, 0x49, 0xf4, 0x50, 0xc1, 0xab, 0x64, 0xcb, 0xa0, 0x05, 0x9b, 0xc6, 0x24, + 0x5b, 0xd9, 0x17, 0xf5, 0x79, 0xc4, 0x66, 0x5a, 0xd3, 0x15, 0x71, 0x8f, 0x33, 0x99, 0xef, 0x6f, + 0x26, 0xb0, 0x67, 0x50, 0x37, 0x65, 0x81, 0x99, 0xd2, 0xb5, 0xad, 0x59, 0xd8, 0xcc, 0x17, 0x68, + 0xc5, 0x9c, 0x9f, 0x41, 0xfc, 0x84, 0xda, 0x94, 0xb5, 0xcc, 0xf1, 0x7d, 0x89, 0xc6, 0xb2, 0x14, + 0xc2, 0xc6, 0x75, 0xd2, 0xe0, 0x28, 0x38, 0xd9, 0xc9, 0xbb, 0x92, 0x7f, 0x40, 0xd2, 0xcf, 0x1a, + 0x55, 0x4b, 0x83, 0x7f, 0x0f, 0xb3, 0x63, 0x88, 0xf4, 0x52, 0xda, 0xb2, 0xc2, 0x67, 0x29, 0x2a, + 0x4c, 0xc7, 0xed, 0xf3, 0x2e, 0xf5, 0x1e, 0x44, 0x85, 0x6c, 0x06, 0x49, 0x37, 0xd2, 0x91, 0x6c, + 0xb5, 0x53, 0x31, 0xb5, 0x49, 0x8d, 0x5f, 0x43, 0x7c, 0x83, 0x85, 0x5e, 0x29, 0xfb, 0xaf, 0x49, + 0xb6, 0x0f, 0x93, 0xa2, 0x54, 0xaf, 0xa8, 0x5b, 0xc5, 0x28, 0xa7, 0x8a, 0xcf, 0x20, 0xe9, 0x39, + 0xc8, 0xfc, 0x14, 0xb6, 0xd5, 0x9b, 0x28, 0x1d, 0x45, 0x94, 0xbb, 0x82, 0x5f, 0x41, 0x7c, 0x2b, + 0x37, 0x14, 0xeb, 0x19, 0xc6, 0x3e, 0xc3, 0x29, 0x24, 0x3d, 0x03, 0x49, 0xfd, 0xb8, 0x0a, 0x7c, + 0x57, 0xe7, 0x9f, 0x01, 0x4c, 0xef, 0x70, 0x75, 0x2f, 0xa4, 0x78, 0xc1, 0x0a, 0xa5, 0x7d, 0x74, + 0x67, 0x62, 0x97, 0x10, 0x52, 0x7a, 0x76, 0x90, 0xd1, 0xb1, 0xb2, 0xe1, 0xa5, 0x0e, 0xd3, 0xdf, + 0x0f, 0x4e, 0x8e, 0x8f, 0xbe, 0xf1, 0x14, 0xd7, 0xc3, 0x0f, 0x97, 0xe8, 0xe1, 0xd7, 0x36, 0xe3, + 0xf0, 0x94, 0xc1, 0xc3, 0x0f, 0xf7, 0xe2, 0xe1, 0xd7, 0xe2, 0xf2, 0xd1, 0x62, 0xd2, 0xfe, 0xb3, + 0x8b, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x33, 0x8d, 0x09, 0xe1, 0x78, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// KeyManagementServiceClient is the client API for KeyManagementService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type KeyManagementServiceClient interface { + // Version returns the runtime name and runtime version of the KMS provider. + Version(ctx context.Context, in *VersionRequest, opts ...grpc.CallOption) (*VersionResponse, error) + // Execute decryption operation in KMS provider. + Decrypt(ctx context.Context, in *DecryptRequest, opts ...grpc.CallOption) (*DecryptResponse, error) + // Execute encryption operation in KMS provider. + Encrypt(ctx context.Context, in *EncryptRequest, opts ...grpc.CallOption) (*EncryptResponse, error) +} + +type keyManagementServiceClient struct { + cc *grpc.ClientConn +} + +func NewKeyManagementServiceClient(cc *grpc.ClientConn) KeyManagementServiceClient { + return &keyManagementServiceClient{cc} +} + +func (c *keyManagementServiceClient) Version(ctx context.Context, in *VersionRequest, opts ...grpc.CallOption) (*VersionResponse, error) { + out := new(VersionResponse) + err := c.cc.Invoke(ctx, "/v1beta1.KeyManagementService/Version", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) Decrypt(ctx context.Context, in *DecryptRequest, opts ...grpc.CallOption) (*DecryptResponse, error) { + out := new(DecryptResponse) + err := c.cc.Invoke(ctx, "/v1beta1.KeyManagementService/Decrypt", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) Encrypt(ctx context.Context, in *EncryptRequest, opts ...grpc.CallOption) (*EncryptResponse, error) { + out := new(EncryptResponse) + err := c.cc.Invoke(ctx, "/v1beta1.KeyManagementService/Encrypt", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// KeyManagementServiceServer is the server API for KeyManagementService service. +type KeyManagementServiceServer interface { + // Version returns the runtime name and runtime version of the KMS provider. + Version(context.Context, *VersionRequest) (*VersionResponse, error) + // Execute decryption operation in KMS provider. + Decrypt(context.Context, *DecryptRequest) (*DecryptResponse, error) + // Execute encryption operation in KMS provider. + Encrypt(context.Context, *EncryptRequest) (*EncryptResponse, error) +} + +// UnimplementedKeyManagementServiceServer can be embedded to have forward compatible implementations. +type UnimplementedKeyManagementServiceServer struct { +} + +func (*UnimplementedKeyManagementServiceServer) Version(ctx context.Context, req *VersionRequest) (*VersionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Version not implemented") +} +func (*UnimplementedKeyManagementServiceServer) Decrypt(ctx context.Context, req *DecryptRequest) (*DecryptResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Decrypt not implemented") +} +func (*UnimplementedKeyManagementServiceServer) Encrypt(ctx context.Context, req *EncryptRequest) (*EncryptResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Encrypt not implemented") +} + +func RegisterKeyManagementServiceServer(s *grpc.Server, srv KeyManagementServiceServer) { + s.RegisterService(&_KeyManagementService_serviceDesc, srv) +} + +func _KeyManagementService_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).Version(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v1beta1.KeyManagementService/Version", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).Version(ctx, req.(*VersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_Decrypt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DecryptRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).Decrypt(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v1beta1.KeyManagementService/Decrypt", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).Decrypt(ctx, req.(*DecryptRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_Encrypt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EncryptRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).Encrypt(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v1beta1.KeyManagementService/Encrypt", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).Encrypt(ctx, req.(*EncryptRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _KeyManagementService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "v1beta1.KeyManagementService", + HandlerType: (*KeyManagementServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Version", + Handler: _KeyManagementService_Version_Handler, + }, + { + MethodName: "Decrypt", + Handler: _KeyManagementService_Decrypt_Handler, + }, + { + MethodName: "Encrypt", + Handler: _KeyManagementService_Encrypt_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "service.proto", +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.proto b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.proto new file mode 100644 index 000000000..b6c2f31c7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.proto @@ -0,0 +1,70 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// To regenerate service.pb.go run hack/update-generated-kms.sh +syntax = "proto3"; + +package v1beta1; + +// This service defines the public APIs for remote KMS provider. +service KeyManagementService { + // Version returns the runtime name and runtime version of the KMS provider. + rpc Version(VersionRequest) returns (VersionResponse) {} + + // Execute decryption operation in KMS provider. + rpc Decrypt(DecryptRequest) returns (DecryptResponse) {} + // Execute encryption operation in KMS provider. + rpc Encrypt(EncryptRequest) returns (EncryptResponse) {} +} + +message VersionRequest { + // Version of the KMS plugin API. + string version = 1; +} + +message VersionResponse { + // Version of the KMS plugin API. + string version = 1; + // Name of the KMS provider. + string runtime_name = 2; + // Version of the KMS provider. The string must be semver-compatible. + string runtime_version = 3; +} + +message DecryptRequest { + // Version of the KMS plugin API. + string version = 1; + // The data to be decrypted. + bytes cipher = 2; +} + +message DecryptResponse { + // The decrypted data. + bytes plain = 1; +} + +message EncryptRequest { + // Version of the KMS plugin API. + string version = 1; + // The data to be encrypted. + bytes plain = 2; +} + +message EncryptResponse { + // The encrypted data. + bytes cipher = 1; +} + diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/v1beta1.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/v1beta1.go new file mode 100644 index 000000000..842d0a2fd --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/v1beta1.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta1 contains definition of kms-plugin's gRPC service. +package v1beta1 + +// IsVersionCheckMethod determines whether the supplied method is a version check against kms-plugin. +func IsVersionCheckMethod(method string) bool { + return method == "/v1beta1.KeyManagementService/Version" +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/identity/identity.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/identity/identity.go new file mode 100644 index 000000000..e322bd9b1 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/identity/identity.go @@ -0,0 +1,50 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package identity + +import ( + "bytes" + "fmt" + + "k8s.io/apiserver/pkg/storage/value" +) + +// identityTransformer performs no transformation on provided data, but validates +// that the data is not encrypted data during TransformFromStorage +type identityTransformer struct{} + +// NewEncryptCheckTransformer returns an identityTransformer which returns an error +// on attempts to read encrypted data +func NewEncryptCheckTransformer() value.Transformer { + return identityTransformer{} +} + +// TransformFromStorage returns the input bytes if the data is not encrypted +func (identityTransformer) TransformFromStorage(b []byte, context value.Context) ([]byte, bool, error) { + // identityTransformer has to return an error if the data is encoded using another transformer. + // JSON data starts with '{'. Protobuf data has a prefix 'k8s[\x00-\xFF]'. + // Prefix 'k8s:enc:' is reserved for encrypted data on disk. + if bytes.HasPrefix(b, []byte("k8s:enc:")) { + return []byte{}, false, fmt.Errorf("identity transformer tried to read encrypted data") + } + return b, false, nil +} + +// TransformToStorage implements the Transformer interface for identityTransformer +func (identityTransformer) TransformToStorage(b []byte, context value.Context) ([]byte, error) { + return b, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/secretbox.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/secretbox.go new file mode 100644 index 000000000..0eaa62824 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/secretbox.go @@ -0,0 +1,69 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package secretbox transforms values for storage at rest using XSalsa20 and Poly1305. +package secretbox + +import ( + "crypto/rand" + "fmt" + + "golang.org/x/crypto/nacl/secretbox" + + "k8s.io/apiserver/pkg/storage/value" +) + +// secretbox implements at rest encryption of the provided values given a 32 byte secret key. +// Uses a standard 24 byte nonce (placed at the beginning of the cipher text) generated +// from crypto/rand. Does not perform authentication of the data at rest. +type secretboxTransformer struct { + key [32]byte +} + +const nonceSize = 24 + +// NewSecretboxTransformer takes the given key and performs encryption and decryption on the given +// data. +func NewSecretboxTransformer(key [32]byte) value.Transformer { + return &secretboxTransformer{key: key} +} + +func (t *secretboxTransformer) TransformFromStorage(data []byte, context value.Context) ([]byte, bool, error) { + if len(data) < (secretbox.Overhead + nonceSize) { + return nil, false, fmt.Errorf("the stored data was shorter than the required size") + } + var nonce [nonceSize]byte + copy(nonce[:], data[:nonceSize]) + data = data[nonceSize:] + out := make([]byte, 0, len(data)-secretbox.Overhead) + result, ok := secretbox.Open(out, data, &nonce, &t.key) + if !ok { + return nil, false, fmt.Errorf("output array was not large enough for encryption") + } + return result, false, nil +} + +func (t *secretboxTransformer) TransformToStorage(data []byte, context value.Context) ([]byte, error) { + var nonce [nonceSize]byte + n, err := rand.Read(nonce[:]) + if err != nil { + return nil, err + } + if n != nonceSize { + return nil, fmt.Errorf("unable to read sufficient random bytes") + } + return secretbox.Seal(nonce[:], data, &nonce, &t.key), nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/metrics.go b/vendor/k8s.io/apiserver/pkg/storage/value/metrics.go new file mode 100644 index 000000000..292cfcd90 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/metrics.go @@ -0,0 +1,141 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "sync" + "time" + + "google.golang.org/grpc/status" + + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +const ( + namespace = "apiserver" + subsystem = "storage" +) + +/* + * By default, all the following metrics are defined as falling under + * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes) + * + * Promoting the stability level of the metric is a responsibility of the component owner, since it + * involves explicitly acknowledging support for the metric across multiple releases, in accordance with + * the metric stability policy. + */ +var ( + transformerLatencies = metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "transformation_duration_seconds", + Help: "Latencies in seconds of value transformation operations.", + // In-process transformations (ex. AES CBC) complete on the order of 20 microseconds. However, when + // external KMS is involved latencies may climb into milliseconds. + Buckets: metrics.ExponentialBuckets(5e-6, 2, 14), + StabilityLevel: metrics.ALPHA, + }, + []string{"transformation_type"}, + ) + + transformerOperationsTotal = metrics.NewCounterVec( + &metrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "transformation_operations_total", + Help: "Total number of transformations.", + StabilityLevel: metrics.ALPHA, + }, + []string{"transformation_type", "transformer_prefix", "status"}, + ) + + envelopeTransformationCacheMissTotal = metrics.NewCounter( + &metrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "envelope_transformation_cache_misses_total", + Help: "Total number of cache misses while accessing key decryption key(KEK).", + StabilityLevel: metrics.ALPHA, + }, + ) + + dataKeyGenerationLatencies = metrics.NewHistogram( + &metrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "data_key_generation_duration_seconds", + Help: "Latencies in seconds of data encryption key(DEK) generation operations.", + Buckets: metrics.ExponentialBuckets(5e-6, 2, 14), + StabilityLevel: metrics.ALPHA, + }, + ) + + dataKeyGenerationFailuresTotal = metrics.NewCounter( + &metrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "data_key_generation_failures_total", + Help: "Total number of failed data encryption key(DEK) generation operations.", + StabilityLevel: metrics.ALPHA, + }, + ) +) + +var registerMetrics sync.Once + +func RegisterMetrics() { + registerMetrics.Do(func() { + legacyregistry.MustRegister(transformerLatencies) + legacyregistry.MustRegister(transformerOperationsTotal) + legacyregistry.MustRegister(envelopeTransformationCacheMissTotal) + legacyregistry.MustRegister(dataKeyGenerationLatencies) + legacyregistry.MustRegister(dataKeyGenerationFailuresTotal) + }) +} + +// RecordTransformation records latencies and count of TransformFromStorage and TransformToStorage operations. +// Note that transformation_failures_total metric is deprecated, use transformation_operations_total instead. +func RecordTransformation(transformationType, transformerPrefix string, start time.Time, err error) { + transformerOperationsTotal.WithLabelValues(transformationType, transformerPrefix, status.Code(err).String()).Inc() + + switch { + case err == nil: + transformerLatencies.WithLabelValues(transformationType).Observe(sinceInSeconds(start)) + } +} + +// RecordCacheMiss records a miss on Key Encryption Key(KEK) - call to KMS was required to decrypt KEK. +func RecordCacheMiss() { + envelopeTransformationCacheMissTotal.Inc() +} + +// RecordDataKeyGeneration records latencies and count of Data Encryption Key generation operations. +func RecordDataKeyGeneration(start time.Time, err error) { + if err != nil { + dataKeyGenerationFailuresTotal.Inc() + return + } + + dataKeyGenerationLatencies.Observe(sinceInSeconds(start)) +} + +// sinceInSeconds gets the time since the specified start in seconds. +func sinceInSeconds(start time.Time) float64 { + return time.Since(start).Seconds() +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/transformer.go b/vendor/k8s.io/apiserver/pkg/storage/value/transformer.go new file mode 100644 index 000000000..3bc41cd9a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/transformer.go @@ -0,0 +1,209 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package value contains methods for assisting with transformation of values in storage. +package value + +import ( + "bytes" + "fmt" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/errors" +) + +func init() { + RegisterMetrics() +} + +// Context is additional information that a storage transformation may need to verify the data at rest. +type Context interface { + // AuthenticatedData should return an array of bytes that describes the current value. If the value changes, + // the transformer may report the value as unreadable or tampered. This may be nil if no such description exists + // or is needed. For additional verification, set this to data that strongly identifies the value, such as + // the key and creation version of the stored data. + AuthenticatedData() []byte +} + +// Transformer allows a value to be transformed before being read from or written to the underlying store. The methods +// must be able to undo the transformation caused by the other. +type Transformer interface { + // TransformFromStorage may transform the provided data from its underlying storage representation or return an error. + // Stale is true if the object on disk is stale and a write to etcd should be issued, even if the contents of the object + // have not changed. + TransformFromStorage(data []byte, context Context) (out []byte, stale bool, err error) + // TransformToStorage may transform the provided data into the appropriate form in storage or return an error. + TransformToStorage(data []byte, context Context) (out []byte, err error) +} + +type identityTransformer struct{} + +// IdentityTransformer performs no transformation of the provided data. +var IdentityTransformer Transformer = identityTransformer{} + +func (identityTransformer) TransformFromStorage(b []byte, ctx Context) ([]byte, bool, error) { + return b, false, nil +} +func (identityTransformer) TransformToStorage(b []byte, ctx Context) ([]byte, error) { + return b, nil +} + +// DefaultContext is a simple implementation of Context for a slice of bytes. +type DefaultContext []byte + +// AuthenticatedData returns itself. +func (c DefaultContext) AuthenticatedData() []byte { return []byte(c) } + +// MutableTransformer allows a transformer to be changed safely at runtime. +type MutableTransformer struct { + lock sync.RWMutex + transformer Transformer +} + +// NewMutableTransformer creates a transformer that can be updated at any time by calling Set() +func NewMutableTransformer(transformer Transformer) *MutableTransformer { + return &MutableTransformer{transformer: transformer} +} + +// Set updates the nested transformer. +func (t *MutableTransformer) Set(transformer Transformer) { + t.lock.Lock() + t.transformer = transformer + t.lock.Unlock() +} + +func (t *MutableTransformer) TransformFromStorage(data []byte, context Context) (out []byte, stale bool, err error) { + t.lock.RLock() + transformer := t.transformer + t.lock.RUnlock() + return transformer.TransformFromStorage(data, context) +} +func (t *MutableTransformer) TransformToStorage(data []byte, context Context) (out []byte, err error) { + t.lock.RLock() + transformer := t.transformer + t.lock.RUnlock() + return transformer.TransformToStorage(data, context) +} + +// PrefixTransformer holds a transformer interface and the prefix that the transformation is located under. +type PrefixTransformer struct { + Prefix []byte + Transformer Transformer +} + +type prefixTransformers struct { + transformers []PrefixTransformer + err error +} + +var _ Transformer = &prefixTransformers{} + +// NewPrefixTransformers supports the Transformer interface by checking the incoming data against the provided +// prefixes in order. The first matching prefix will be used to transform the value (the prefix is stripped +// before the Transformer interface is invoked). The first provided transformer will be used when writing to +// the store. +func NewPrefixTransformers(err error, transformers ...PrefixTransformer) Transformer { + if err == nil { + err = fmt.Errorf("the provided value does not match any of the supported transformers") + } + return &prefixTransformers{ + transformers: transformers, + err: err, + } +} + +// TransformFromStorage finds the first transformer with a prefix matching the provided data and returns +// the result of transforming the value. It will always mark any transformation as stale that is not using +// the first transformer. +func (t *prefixTransformers) TransformFromStorage(data []byte, context Context) ([]byte, bool, error) { + start := time.Now() + var errs []error + for i, transformer := range t.transformers { + if bytes.HasPrefix(data, transformer.Prefix) { + result, stale, err := transformer.Transformer.TransformFromStorage(data[len(transformer.Prefix):], context) + // To migrate away from encryption, user can specify an identity transformer higher up + // (in the config file) than the encryption transformer. In that scenario, the identity transformer needs to + // identify (during reads from disk) whether the data being read is encrypted or not. If the data is encrypted, + // it shall throw an error, but that error should not prevent the next subsequent transformer from being tried. + if len(transformer.Prefix) == 0 && err != nil { + continue + } + if len(transformer.Prefix) == 0 { + RecordTransformation("from_storage", "identity", start, err) + } else { + RecordTransformation("from_storage", string(transformer.Prefix), start, err) + } + + // It is valid to have overlapping prefixes when the same encryption provider + // is specified multiple times but with different keys (the first provider is + // being rotated to and some later provider is being rotated away from). + // + // Example: + // + // { + // "aescbc": { + // "keys": [ + // { + // "name": "2", + // "secret": "some key 2" + // } + // ] + // } + // }, + // { + // "aescbc": { + // "keys": [ + // { + // "name": "1", + // "secret": "some key 1" + // } + // ] + // } + // }, + // + // The transformers for both aescbc configs share the prefix k8s:enc:aescbc:v1: + // but a failure in the first one should not prevent a later match from being attempted. + // Thus we never short-circuit on a prefix match that results in an error. + if err != nil { + errs = append(errs, err) + continue + } + + return result, stale || i != 0, err + } + } + if err := errors.Reduce(errors.NewAggregate(errs)); err != nil { + return nil, false, err + } + RecordTransformation("from_storage", "unknown", start, t.err) + return nil, false, t.err +} + +// TransformToStorage uses the first transformer and adds its prefix to the data. +func (t *prefixTransformers) TransformToStorage(data []byte, context Context) ([]byte, error) { + start := time.Now() + transformer := t.transformers[0] + prefixedData := make([]byte, len(transformer.Prefix), len(data)+len(transformer.Prefix)) + copy(prefixedData, transformer.Prefix) + result, err := transformer.Transformer.TransformToStorage(data, context) + RecordTransformation("to_storage", string(transformer.Prefix), start, err) + if err != nil { + return nil, err + } + prefixedData = append(prefixedData, result...) + return prefixedData, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storageversion/OWNERS b/vendor/k8s.io/apiserver/pkg/storageversion/OWNERS new file mode 100644 index 000000000..ca9aa1358 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storageversion/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- caesarxuchao +- roycaihw diff --git a/vendor/k8s.io/apiserver/pkg/storageversion/manager.go b/vendor/k8s.io/apiserver/pkg/storageversion/manager.go new file mode 100644 index 000000000..03e21a4d6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storageversion/manager.go @@ -0,0 +1,277 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storageversion + +import ( + "fmt" + "sort" + "sync" + "sync/atomic" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + _ "k8s.io/component-base/metrics/prometheus/workqueue" // for workqueue metric registration + "k8s.io/klog/v2" +) + +// ResourceInfo contains the information to register the resource to the +// storage version API. +type ResourceInfo struct { + GroupResource schema.GroupResource + + EncodingVersion string + // Used to calculate decodable versions. Can only be used after all + // equivalent versions are registered by InstallREST. + EquivalentResourceMapper runtime.EquivalentResourceRegistry +} + +// Manager records the resources whose StorageVersions need updates, and provides a method to update those StorageVersions. +type Manager interface { + // AddResourceInfo records resources whose StorageVersions need updates + AddResourceInfo(resources ...*ResourceInfo) + // UpdateStorageVersions tries to update the StorageVersions of the recorded resources + UpdateStorageVersions(kubeAPIServerClientConfig *rest.Config, apiserverID string) + // PendingUpdate returns true if the StorageVersion of the given resource is still pending update. + PendingUpdate(gr schema.GroupResource) bool + // LastUpdateError returns the last error hit when updating the storage version of the given resource. + LastUpdateError(gr schema.GroupResource) error + // Completed returns true if updating StorageVersions of all recorded resources has completed. + Completed() bool +} + +var _ Manager = &defaultManager{} + +// defaultManager indicates if an apiserver has completed reporting its storage versions. +type defaultManager struct { + completed atomic.Value + + mu sync.RWMutex + // managedResourceInfos records the ResourceInfos whose StorageVersions will get updated in the next + // UpdateStorageVersions call + managedResourceInfos map[*ResourceInfo]struct{} + // managedStatus records the update status of StorageVersion for each GroupResource. Since one + // ResourceInfo may expand into multiple GroupResource (e.g. ingresses.networking.k8s.io and ingresses.extensions), + // this map allows quick status lookup for a GroupResource, during API request handling. + managedStatus map[schema.GroupResource]*updateStatus +} + +type updateStatus struct { + done bool + lastErr error +} + +// NewDefaultManager creates a new defaultManager. +func NewDefaultManager() Manager { + s := &defaultManager{} + s.completed.Store(false) + s.managedResourceInfos = make(map[*ResourceInfo]struct{}) + s.managedStatus = make(map[schema.GroupResource]*updateStatus) + return s +} + +// AddResourceInfo adds ResourceInfo to the manager. +func (s *defaultManager) AddResourceInfo(resources ...*ResourceInfo) { + s.mu.Lock() + defer s.mu.Unlock() + for _, r := range resources { + s.managedResourceInfos[r] = struct{}{} + s.addPendingManagedStatusLocked(r) + } +} + +func (s *defaultManager) addPendingManagedStatusLocked(r *ResourceInfo) { + gvrs := r.EquivalentResourceMapper.EquivalentResourcesFor(r.GroupResource.WithVersion(""), "") + for _, gvr := range gvrs { + gr := gvr.GroupResource() + if _, ok := s.managedStatus[gr]; !ok { + s.managedStatus[gr] = &updateStatus{} + } + } +} + +// UpdateStorageVersions tries to update the StorageVersions of the recorded resources +func (s *defaultManager) UpdateStorageVersions(kubeAPIServerClientConfig *rest.Config, serverID string) { + clientset, err := kubernetes.NewForConfig(kubeAPIServerClientConfig) + if err != nil { + utilruntime.HandleError(fmt.Errorf("failed to get clientset: %v", err)) + return + } + sc := clientset.InternalV1alpha1().StorageVersions() + + s.mu.RLock() + resources := []ResourceInfo{} + for resource := range s.managedResourceInfos { + resources = append(resources, *resource) + } + s.mu.RUnlock() + hasFailure := false + // Sorting the list to make sure we have a consistent dedup result, and + // therefore avoid creating unnecessarily duplicated StorageVersion objects. + // For example, extensions.ingresses and networking.k8s.io.ingresses share + // the same underlying storage. Without sorting, in an HA cluster, one + // apiserver may dedup and update StorageVersion for extensions.ingresses, + // while another apiserver may dedup and update StorageVersion for + // networking.k8s.io.ingresses. The storage migrator (which migrates objects + // per GroupResource) will migrate these resources twice, since both + // StorageVersion objects have CommonEncodingVersion (each with one server registered). + sortResourceInfosByGroupResource(resources) + for _, r := range dedupResourceInfos(resources) { + dv := decodableVersions(r.EquivalentResourceMapper, r.GroupResource) + gr := r.GroupResource + // Group must be a valid subdomain in DNS (RFC 1123) + if len(gr.Group) == 0 { + gr.Group = "core" + } + if err := updateStorageVersionFor(sc, serverID, gr, r.EncodingVersion, dv); err != nil { + utilruntime.HandleError(fmt.Errorf("failed to update storage version for %v: %v", r.GroupResource, err)) + s.recordStatusFailure(&r, err) + hasFailure = true + continue + } + klog.V(2).Infof("successfully updated storage version for %v", r.GroupResource) + s.recordStatusSuccess(&r) + } + if hasFailure { + return + } + klog.V(2).Infof("storage version updates complete") + s.setComplete() +} + +// dedupResourceInfos dedups ResourceInfos with the same underlying storage. +// ResourceInfos from the same Group with different Versions share the same underlying storage. +// ResourceInfos from different Groups may share the same underlying storage, e.g. +// networking.k8s.io ingresses and extensions ingresses. The StorageVersion manager +// only needs to update one StorageVersion for the equivalent Groups. +func dedupResourceInfos(infos []ResourceInfo) []ResourceInfo { + var ret []ResourceInfo + seen := make(map[schema.GroupResource]struct{}) + for _, info := range infos { + gr := info.GroupResource + if _, ok := seen[gr]; ok { + continue + } + gvrs := info.EquivalentResourceMapper.EquivalentResourcesFor(gr.WithVersion(""), "") + for _, gvr := range gvrs { + seen[gvr.GroupResource()] = struct{}{} + } + ret = append(ret, info) + } + return ret +} + +func sortResourceInfosByGroupResource(infos []ResourceInfo) { + sort.Sort(byGroupResource(infos)) +} + +type byGroupResource []ResourceInfo + +func (s byGroupResource) Len() int { return len(s) } + +func (s byGroupResource) Less(i, j int) bool { + if s[i].GroupResource.Group == s[j].GroupResource.Group { + return s[i].GroupResource.Resource < s[j].GroupResource.Resource + } + return s[i].GroupResource.Group < s[j].GroupResource.Group +} + +func (s byGroupResource) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// recordStatusSuccess marks updated ResourceInfo as completed. +func (s *defaultManager) recordStatusSuccess(r *ResourceInfo) { + s.mu.Lock() + defer s.mu.Unlock() + s.recordStatusSuccessLocked(r) +} + +func (s *defaultManager) recordStatusSuccessLocked(r *ResourceInfo) { + gvrs := r.EquivalentResourceMapper.EquivalentResourcesFor(r.GroupResource.WithVersion(""), "") + for _, gvr := range gvrs { + s.recordSuccessGroupResourceLocked(gvr.GroupResource()) + } +} + +func (s *defaultManager) recordSuccessGroupResourceLocked(gr schema.GroupResource) { + if _, ok := s.managedStatus[gr]; !ok { + return + } + s.managedStatus[gr].done = true + s.managedStatus[gr].lastErr = nil +} + +// recordStatusFailure records latest error updating ResourceInfo. +func (s *defaultManager) recordStatusFailure(r *ResourceInfo, err error) { + s.mu.Lock() + defer s.mu.Unlock() + s.recordStatusFailureLocked(r, err) +} + +func (s *defaultManager) recordStatusFailureLocked(r *ResourceInfo, err error) { + gvrs := r.EquivalentResourceMapper.EquivalentResourcesFor(r.GroupResource.WithVersion(""), "") + for _, gvr := range gvrs { + s.recordErrorGroupResourceLocked(gvr.GroupResource(), err) + } +} + +func (s *defaultManager) recordErrorGroupResourceLocked(gr schema.GroupResource, err error) { + if _, ok := s.managedStatus[gr]; !ok { + return + } + s.managedStatus[gr].lastErr = err +} + +// PendingUpdate returns if the StorageVersion of a resource is still wait to be updated. +func (s *defaultManager) PendingUpdate(gr schema.GroupResource) bool { + s.mu.RLock() + defer s.mu.RUnlock() + if _, ok := s.managedStatus[gr]; !ok { + return false + } + return !s.managedStatus[gr].done +} + +// LastUpdateError returns the last error hit when updating the storage version of the given resource. +func (s *defaultManager) LastUpdateError(gr schema.GroupResource) error { + s.mu.RLock() + defer s.mu.RUnlock() + if _, ok := s.managedStatus[gr]; !ok { + return fmt.Errorf("couldn't find managed status for %v", gr) + } + return s.managedStatus[gr].lastErr +} + +// setComplete marks the completion of updating StorageVersions. No write requests need to be blocked anymore. +func (s *defaultManager) setComplete() { + s.completed.Store(true) +} + +// Completed returns if updating StorageVersions has completed. +func (s *defaultManager) Completed() bool { + return s.completed.Load().(bool) +} + +func decodableVersions(e runtime.EquivalentResourceRegistry, gr schema.GroupResource) []string { + var versions []string + decodingGVRs := e.EquivalentResourcesFor(gr.WithVersion(""), "") + for _, v := range decodingGVRs { + versions = append(versions, v.GroupVersion().String()) + } + return versions +} diff --git a/vendor/k8s.io/apiserver/pkg/storageversion/updater.go b/vendor/k8s.io/apiserver/pkg/storageversion/updater.go new file mode 100644 index 000000000..10927fb0f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storageversion/updater.go @@ -0,0 +1,128 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storageversion + +import ( + "context" + "fmt" + "time" + + "k8s.io/api/apiserverinternal/v1alpha1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog/v2" +) + +// Client has the methods required to update the storage version. +type Client interface { + Create(context.Context, *v1alpha1.StorageVersion, metav1.CreateOptions) (*v1alpha1.StorageVersion, error) + UpdateStatus(context.Context, *v1alpha1.StorageVersion, metav1.UpdateOptions) (*v1alpha1.StorageVersion, error) + Get(context.Context, string, metav1.GetOptions) (*v1alpha1.StorageVersion, error) +} + +func setCommonEncodingVersion(sv *v1alpha1.StorageVersion) { + if len(sv.Status.StorageVersions) == 0 { + return + } + firstVersion := sv.Status.StorageVersions[0].EncodingVersion + agreed := true + for _, ssv := range sv.Status.StorageVersions { + if ssv.EncodingVersion != firstVersion { + agreed = false + break + } + } + if agreed { + sv.Status.CommonEncodingVersion = &firstVersion + } else { + sv.Status.CommonEncodingVersion = nil + } +} + +// updateStorageVersionFor updates the storage version object for the resource. +func updateStorageVersionFor(c Client, apiserverID string, gr schema.GroupResource, encodingVersion string, decodableVersions []string) error { + retries := 3 + var retry int + var err error + for retry < retries { + err = singleUpdate(c, apiserverID, gr, encodingVersion, decodableVersions) + if err == nil { + return nil + } + if apierrors.IsAlreadyExists(err) || apierrors.IsConflict(err) { + time.Sleep(1 * time.Second) + continue + } + if err != nil { + klog.Errorf("retry %d, failed to update storage version for %v: %v", retry, gr, err) + retry++ + time.Sleep(1 * time.Second) + } + } + return err +} + +func singleUpdate(c Client, apiserverID string, gr schema.GroupResource, encodingVersion string, decodableVersions []string) error { + shouldCreate := false + name := fmt.Sprintf("%s.%s", gr.Group, gr.Resource) + sv, err := c.Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + if apierrors.IsNotFound(err) { + shouldCreate = true + sv = &v1alpha1.StorageVersion{} + sv.ObjectMeta.Name = name + } + updatedSV := localUpdateStorageVersion(sv, apiserverID, encodingVersion, decodableVersions) + if shouldCreate { + createdSV, err := c.Create(context.TODO(), updatedSV, metav1.CreateOptions{}) + if err != nil { + return err + } + // assign the calculated status to the object just created, then update status + createdSV.Status = updatedSV.Status + _, err = c.UpdateStatus(context.TODO(), createdSV, metav1.UpdateOptions{}) + return err + } + _, err = c.UpdateStatus(context.TODO(), updatedSV, metav1.UpdateOptions{}) + return err +} + +// localUpdateStorageVersion updates the input storageversion with given server storageversion info. +// The function updates the input storageversion in place. +func localUpdateStorageVersion(sv *v1alpha1.StorageVersion, apiserverID, encodingVersion string, decodableVersions []string) *v1alpha1.StorageVersion { + newSSV := v1alpha1.ServerStorageVersion{ + APIServerID: apiserverID, + EncodingVersion: encodingVersion, + DecodableVersions: decodableVersions, + } + foundSSV := false + for i, ssv := range sv.Status.StorageVersions { + if ssv.APIServerID == apiserverID { + sv.Status.StorageVersions[i] = newSSV + foundSSV = true + break + } + } + if !foundSSV { + sv.Status.StorageVersions = append(sv.Status.StorageVersions, newSSV) + } + setCommonEncodingVersion(sv) + return sv +} diff --git a/vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go b/vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go new file mode 100644 index 000000000..905523c73 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go @@ -0,0 +1,100 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apihelpers + +import ( + "sort" + + flowcontrol "k8s.io/api/flowcontrol/v1beta1" +) + +// SetFlowSchemaCondition sets conditions. +func SetFlowSchemaCondition(flowSchema *flowcontrol.FlowSchema, newCondition flowcontrol.FlowSchemaCondition) { + existingCondition := GetFlowSchemaConditionByType(flowSchema, newCondition.Type) + if existingCondition == nil { + flowSchema.Status.Conditions = append(flowSchema.Status.Conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status { + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = newCondition.LastTransitionTime + } + + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message +} + +// GetFlowSchemaConditionByType gets conditions. +func GetFlowSchemaConditionByType(flowSchema *flowcontrol.FlowSchema, conditionType flowcontrol.FlowSchemaConditionType) *flowcontrol.FlowSchemaCondition { + for i := range flowSchema.Status.Conditions { + if flowSchema.Status.Conditions[i].Type == conditionType { + return &flowSchema.Status.Conditions[i] + } + } + return nil +} + +// SetPriorityLevelConfigurationCondition sets conditions. +func SetPriorityLevelConfigurationCondition(priorityLevel *flowcontrol.PriorityLevelConfiguration, newCondition flowcontrol.PriorityLevelConfigurationCondition) { + existingCondition := GetPriorityLevelConfigurationConditionByType(priorityLevel, newCondition.Type) + if existingCondition == nil { + priorityLevel.Status.Conditions = append(priorityLevel.Status.Conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status { + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = newCondition.LastTransitionTime + } + + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message +} + +// GetPriorityLevelConfigurationConditionByType gets conditions. +func GetPriorityLevelConfigurationConditionByType(priorityLevel *flowcontrol.PriorityLevelConfiguration, conditionType flowcontrol.PriorityLevelConfigurationConditionType) *flowcontrol.PriorityLevelConfigurationCondition { + for i := range priorityLevel.Status.Conditions { + if priorityLevel.Status.Conditions[i].Type == conditionType { + return &priorityLevel.Status.Conditions[i] + } + } + return nil +} + +var _ sort.Interface = FlowSchemaSequence{} + +// FlowSchemaSequence holds sorted set of pointers to FlowSchema objects. +// FlowSchemaSequence implements `sort.Interface` +type FlowSchemaSequence []*flowcontrol.FlowSchema + +func (s FlowSchemaSequence) Len() int { + return len(s) +} + +func (s FlowSchemaSequence) Less(i, j int) bool { + // the flow-schema w/ lower matching-precedence is prior + if ip, jp := s[i].Spec.MatchingPrecedence, s[j].Spec.MatchingPrecedence; ip != jp { + return ip < jp + } + // sort alphabetically + return s[i].Name < s[j].Name +} + +func (s FlowSchemaSequence) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} diff --git a/vendor/k8s.io/apiserver/pkg/util/dryrun/dryrun.go b/vendor/k8s.io/apiserver/pkg/util/dryrun/dryrun.go new file mode 100644 index 000000000..3e28c2934 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/dryrun/dryrun.go @@ -0,0 +1,22 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dryrun + +// IsDryRun returns true if the DryRun flag is an actual dry-run. +func IsDryRun(flag []string) bool { + return len(flag) > 0 +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go new file mode 100644 index 000000000..2f93df73d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go @@ -0,0 +1,764 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flowcontrol + +import ( + "context" + "crypto/sha256" + "encoding/binary" + "encoding/json" + "fmt" + "math" + "sort" + "sync" + "time" + + "github.com/pkg/errors" + + apiequality "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + apitypes "k8s.io/apimachinery/pkg/types" + apierrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/wait" + fcboot "k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/util/apihelpers" + fq "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing" + fcfmt "k8s.io/apiserver/pkg/util/flowcontrol/format" + "k8s.io/apiserver/pkg/util/flowcontrol/metrics" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + + flowcontrol "k8s.io/api/flowcontrol/v1beta1" + flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" + flowcontrollister "k8s.io/client-go/listers/flowcontrol/v1beta1" +) + +// This file contains a simple local (to the apiserver) controller +// that digests API Priority and Fairness config objects (FlowSchema +// and PriorityLevelConfiguration) into the data structure that the +// filter uses. At this first level of development this controller +// takes the simplest possible approach: whenever notified of any +// change to any config object, or when any priority level that is +// undesired becomes completely unused, all the config objects are +// read and processed as a whole. + +// StartFunction begins the process of handlig a request. If the +// request gets queued then this function uses the given hashValue as +// the source of entropy as it shuffle-shards the request into a +// queue. The descr1 and descr2 values play no role in the logic but +// appear in log messages. This method does not return until the +// queuing, if any, for this request is done. If `execute` is false +// then `afterExecution` is irrelevant and the request should be +// rejected. Otherwise the request should be executed and +// `afterExecution` must be called exactly once. +type StartFunction func(ctx context.Context, hashValue uint64) (execute bool, afterExecution func()) + +// RequestDigest holds necessary info from request for flow-control +type RequestDigest struct { + RequestInfo *request.RequestInfo + User user.Info +} + +// `*configController` maintains eventual consistency with the API +// objects that configure API Priority and Fairness, and provides a +// procedural interface to the configured behavior. The methods of +// this type and cfgMeal follow the convention that the suffix +// "Locked" means that the caller must hold the configController lock. +type configController struct { + queueSetFactory fq.QueueSetFactory + obsPairGenerator metrics.TimedObserverPairGenerator + + // configQueue holds `(interface{})(0)` when the configuration + // objects need to be reprocessed. + configQueue workqueue.RateLimitingInterface + + plLister flowcontrollister.PriorityLevelConfigurationLister + plInformerSynced cache.InformerSynced + + fsLister flowcontrollister.FlowSchemaLister + fsInformerSynced cache.InformerSynced + + flowcontrolClient flowcontrolclient.FlowcontrolV1beta1Interface + + // serverConcurrencyLimit is the limit on the server's total + // number of non-exempt requests being served at once. This comes + // from server configuration. + serverConcurrencyLimit int + + // requestWaitLimit comes from server configuration. + requestWaitLimit time.Duration + + // This must be locked while accessing flowSchemas or + // priorityLevelStates. It is the lock involved in + // LockingWriteMultiple. + lock sync.Mutex + + // flowSchemas holds the flow schema objects, sorted by increasing + // numerical (decreasing logical) matching precedence. Every + // FlowSchema in this slice is immutable. + flowSchemas apihelpers.FlowSchemaSequence + + // priorityLevelStates maps the PriorityLevelConfiguration object + // name to the state for that level. Every name referenced from a + // member of `flowSchemas` has an entry here. + priorityLevelStates map[string]*priorityLevelState +} + +// priorityLevelState holds the state specific to a priority level. +type priorityLevelState struct { + // the API object or prototype prescribing this level. Nothing + // reached through this pointer is mutable. + pl *flowcontrol.PriorityLevelConfiguration + + // qsCompleter holds the QueueSetCompleter derived from `config` + // and `queues` if config is not exempt, nil otherwise. + qsCompleter fq.QueueSetCompleter + + // The QueueSet for this priority level. This is nil if and only + // if the priority level is exempt. + queues fq.QueueSet + + // quiescing==true indicates that this priority level should be + // removed when its queues have all drained. May be true only if + // queues is non-nil. + quiescing bool + + // number of goroutines between Controller::Match and calling the + // returned StartFunction + numPending int + + // Observers tracking number waiting, executing + obsPair metrics.TimedObserverPair +} + +// NewTestableController is extra flexible to facilitate testing +func newTestableController( + informerFactory kubeinformers.SharedInformerFactory, + flowcontrolClient flowcontrolclient.FlowcontrolV1beta1Interface, + serverConcurrencyLimit int, + requestWaitLimit time.Duration, + obsPairGenerator metrics.TimedObserverPairGenerator, + queueSetFactory fq.QueueSetFactory, +) *configController { + cfgCtlr := &configController{ + queueSetFactory: queueSetFactory, + obsPairGenerator: obsPairGenerator, + serverConcurrencyLimit: serverConcurrencyLimit, + requestWaitLimit: requestWaitLimit, + flowcontrolClient: flowcontrolClient, + priorityLevelStates: make(map[string]*priorityLevelState), + } + klog.V(2).Infof("NewTestableController with serverConcurrencyLimit=%d, requestWaitLimit=%s", serverConcurrencyLimit, requestWaitLimit) + cfgCtlr.initializeConfigController(informerFactory) + // ensure the data structure reflects the mandatory config + cfgCtlr.lockAndDigestConfigObjects(nil, nil) + return cfgCtlr +} + +// initializeConfigController sets up the controller that processes +// config API objects. +func (cfgCtlr *configController) initializeConfigController(informerFactory kubeinformers.SharedInformerFactory) { + cfgCtlr.configQueue = workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(200*time.Millisecond, 8*time.Hour), "priority_and_fairness_config_queue") + fci := informerFactory.Flowcontrol().V1beta1() + pli := fci.PriorityLevelConfigurations() + fsi := fci.FlowSchemas() + cfgCtlr.plLister = pli.Lister() + cfgCtlr.plInformerSynced = pli.Informer().HasSynced + cfgCtlr.fsLister = fsi.Lister() + cfgCtlr.fsInformerSynced = fsi.Informer().HasSynced + pli.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + pl := obj.(*flowcontrol.PriorityLevelConfiguration) + klog.V(7).Infof("Triggered API priority and fairness config reloading due to creation of PLC %s", pl.Name) + cfgCtlr.configQueue.Add(0) + }, + UpdateFunc: func(oldObj, newObj interface{}) { + newPL := newObj.(*flowcontrol.PriorityLevelConfiguration) + oldPL := oldObj.(*flowcontrol.PriorityLevelConfiguration) + if !apiequality.Semantic.DeepEqual(oldPL.Spec, newPL.Spec) { + klog.V(7).Infof("Triggered API priority and fairness config reloading due to spec update of PLC %s", newPL.Name) + cfgCtlr.configQueue.Add(0) + } + }, + DeleteFunc: func(obj interface{}) { + name, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + klog.V(7).Infof("Triggered API priority and fairness config reloading due to deletion of PLC %s", name) + cfgCtlr.configQueue.Add(0) + + }}) + fsi.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + fs := obj.(*flowcontrol.FlowSchema) + klog.V(7).Infof("Triggered API priority and fairness config reloading due to creation of FS %s", fs.Name) + cfgCtlr.configQueue.Add(0) + }, + UpdateFunc: func(oldObj, newObj interface{}) { + newFS := newObj.(*flowcontrol.FlowSchema) + oldFS := oldObj.(*flowcontrol.FlowSchema) + if !apiequality.Semantic.DeepEqual(oldFS.Spec, newFS.Spec) { + klog.V(7).Infof("Triggered API priority and fairness config reloading due to spec update of FS %s", newFS.Name) + cfgCtlr.configQueue.Add(0) + } + }, + DeleteFunc: func(obj interface{}) { + name, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + klog.V(7).Infof("Triggered API priority and fairness config reloading due to deletion of FS %s", name) + cfgCtlr.configQueue.Add(0) + + }}) +} + +// MaintainObservations keeps the observers from +// metrics.PriorityLevelConcurrencyObserverPairGenerator from falling +// too far behind +func (cfgCtlr *configController) MaintainObservations(stopCh <-chan struct{}) { + wait.Until(cfgCtlr.updateObservations, 10*time.Second, stopCh) +} + +func (cfgCtlr *configController) updateObservations() { + cfgCtlr.lock.Lock() + defer cfgCtlr.lock.Unlock() + for _, plc := range cfgCtlr.priorityLevelStates { + if plc.queues != nil { + plc.queues.UpdateObservations() + } + } +} + +func (cfgCtlr *configController) Run(stopCh <-chan struct{}) error { + defer cfgCtlr.configQueue.ShutDown() + klog.Info("Starting API Priority and Fairness config controller") + if ok := cache.WaitForCacheSync(stopCh, cfgCtlr.plInformerSynced, cfgCtlr.fsInformerSynced); !ok { + return fmt.Errorf("Never achieved initial sync") + } + klog.Info("Running API Priority and Fairness config worker") + wait.Until(cfgCtlr.runWorker, time.Second, stopCh) + klog.Info("Shutting down API Priority and Fairness config worker") + return nil +} + +func (cfgCtlr *configController) runWorker() { + for cfgCtlr.processNextWorkItem() { + } +} + +func (cfgCtlr *configController) processNextWorkItem() bool { + obj, shutdown := cfgCtlr.configQueue.Get() + if shutdown { + return false + } + + func(obj interface{}) { + defer cfgCtlr.configQueue.Done(obj) + if !cfgCtlr.syncOne() { + cfgCtlr.configQueue.AddRateLimited(obj) + } else { + cfgCtlr.configQueue.Forget(obj) + } + }(obj) + + return true +} + +// syncOne attempts to sync all the API Priority and Fairness config +// objects. It either succeeds and returns `true` or logs an error +// and returns `false`. +func (cfgCtlr *configController) syncOne() bool { + all := labels.Everything() + newPLs, err := cfgCtlr.plLister.List(all) + if err != nil { + klog.Errorf("Unable to list PriorityLevelConfiguration objects: %s", err.Error()) + return false + } + newFSs, err := cfgCtlr.fsLister.List(all) + if err != nil { + klog.Errorf("Unable to list FlowSchema objects: %s", err.Error()) + return false + } + err = cfgCtlr.digestConfigObjects(newPLs, newFSs) + if err == nil { + return true + } + klog.Error(err) + return false +} + +// cfgMeal is the data involved in the process of digesting the API +// objects that configure API Priority and Fairness. All the config +// objects are digested together, because this is the simplest way to +// cope with the various dependencies between objects. The process of +// digestion is done in four passes over config objects --- three +// passes over PriorityLevelConfigurations and one pass over the +// FlowSchemas --- with the work dvided among the passes according to +// those dependencies. +type cfgMeal struct { + cfgCtlr *configController + + newPLStates map[string]*priorityLevelState + + // The sum of the concurrency shares of the priority levels in the + // new configuration + shareSum float64 + + // These keep track of which mandatory priority level config + // objects have been digested + haveExemptPL, haveCatchAllPL bool + + // Buffered FlowSchema status updates to do. Do them when the + // lock is not held, to avoid a deadlock due to such a request + // provoking a call into this controller while the lock held + // waiting on that request to complete. + fsStatusUpdates []fsStatusUpdate +} + +// A buffered set of status updates for a FlowSchema +type fsStatusUpdate struct { + flowSchema *flowcontrol.FlowSchema + condition flowcontrol.FlowSchemaCondition + oldValue flowcontrol.FlowSchemaCondition +} + +// digestConfigObjects is given all the API objects that configure +// cfgCtlr and writes its consequent new configState. +func (cfgCtlr *configController) digestConfigObjects(newPLs []*flowcontrol.PriorityLevelConfiguration, newFSs []*flowcontrol.FlowSchema) error { + fsStatusUpdates := cfgCtlr.lockAndDigestConfigObjects(newPLs, newFSs) + var errs []error + for _, fsu := range fsStatusUpdates { + enc, err := json.Marshal(fsu.condition) + if err != nil { + // should never happen because these conditions are created here and well formed + panic(fmt.Sprintf("Failed to json.Marshall(%#+v): %s", fsu.condition, err.Error())) + } + klog.V(4).Infof("Writing Condition %s to FlowSchema %s because its previous value was %s", string(enc), fsu.flowSchema.Name, fcfmt.Fmt(fsu.oldValue)) + _, err = cfgCtlr.flowcontrolClient.FlowSchemas().Patch(context.TODO(), fsu.flowSchema.Name, apitypes.StrategicMergePatchType, []byte(fmt.Sprintf(`{"status": {"conditions": [ %s ] } }`, string(enc))), metav1.PatchOptions{FieldManager: "api-priority-and-fairness-config-consumer-v1"}, "status") + if err != nil { + errs = append(errs, errors.Wrap(err, fmt.Sprintf("failed to set a status.condition for FlowSchema %s", fsu.flowSchema.Name))) + } + } + if len(errs) == 0 { + return nil + } + return apierrors.NewAggregate(errs) +} + +func (cfgCtlr *configController) lockAndDigestConfigObjects(newPLs []*flowcontrol.PriorityLevelConfiguration, newFSs []*flowcontrol.FlowSchema) []fsStatusUpdate { + cfgCtlr.lock.Lock() + defer cfgCtlr.lock.Unlock() + meal := cfgMeal{ + cfgCtlr: cfgCtlr, + newPLStates: make(map[string]*priorityLevelState), + } + + meal.digestNewPLsLocked(newPLs) + meal.digestFlowSchemasLocked(newFSs) + meal.processOldPLsLocked() + + // Supply missing mandatory PriorityLevelConfiguration objects + if !meal.haveExemptPL { + meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationExempt, cfgCtlr.requestWaitLimit) + } + if !meal.haveCatchAllPL { + meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationCatchAll, cfgCtlr.requestWaitLimit) + } + + meal.finishQueueSetReconfigsLocked() + + // The new config has been constructed + cfgCtlr.priorityLevelStates = meal.newPLStates + klog.V(5).Infof("Switched to new API Priority and Fairness configuration") + return meal.fsStatusUpdates +} + +// Digest the new set of PriorityLevelConfiguration objects. +// Pretend broken ones do not exist. +func (meal *cfgMeal) digestNewPLsLocked(newPLs []*flowcontrol.PriorityLevelConfiguration) { + for _, pl := range newPLs { + state := meal.cfgCtlr.priorityLevelStates[pl.Name] + if state == nil { + state = &priorityLevelState{obsPair: meal.cfgCtlr.obsPairGenerator.Generate(1, 1, []string{pl.Name})} + } + qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, state.queues, pl, meal.cfgCtlr.requestWaitLimit, state.obsPair) + if err != nil { + klog.Warningf("Ignoring PriorityLevelConfiguration object %s because its spec (%s) is broken: %s", pl.Name, fcfmt.Fmt(pl.Spec), err) + continue + } + meal.newPLStates[pl.Name] = state + state.pl = pl + state.qsCompleter = qsCompleter + if state.quiescing { // it was undesired, but no longer + klog.V(3).Infof("Priority level %q was undesired and has become desired again", pl.Name) + state.quiescing = false + } + if state.pl.Spec.Limited != nil { + meal.shareSum += float64(state.pl.Spec.Limited.AssuredConcurrencyShares) + } + meal.haveExemptPL = meal.haveExemptPL || pl.Name == flowcontrol.PriorityLevelConfigurationNameExempt + meal.haveCatchAllPL = meal.haveCatchAllPL || pl.Name == flowcontrol.PriorityLevelConfigurationNameCatchAll + } +} + +// Digest the given FlowSchema objects. Ones that reference a missing +// or broken priority level are not to be passed on to the filter for +// use. We do this before holding over old priority levels so that +// requests stop going to those levels and FlowSchemaStatus values +// reflect this. This function also adds any missing mandatory +// FlowSchema objects. The given objects must all have distinct +// names. +func (meal *cfgMeal) digestFlowSchemasLocked(newFSs []*flowcontrol.FlowSchema) { + fsSeq := make(apihelpers.FlowSchemaSequence, 0, len(newFSs)) + fsMap := make(map[string]*flowcontrol.FlowSchema, len(newFSs)) + var haveExemptFS, haveCatchAllFS bool + for i, fs := range newFSs { + otherFS := fsMap[fs.Name] + if otherFS != nil { + // This client is forbidden to do this. + panic(fmt.Sprintf("Given two FlowSchema objects with the same name: %s and %s", fcfmt.Fmt(otherFS), fcfmt.Fmt(fs))) + } + fsMap[fs.Name] = fs + _, goodPriorityRef := meal.newPLStates[fs.Spec.PriorityLevelConfiguration.Name] + + // Ensure the object's status reflects whether its priority + // level reference is broken. + // + // TODO: consider not even trying if server is not handling + // requests yet. + meal.presyncFlowSchemaStatus(fs, !goodPriorityRef, fs.Spec.PriorityLevelConfiguration.Name) + + if !goodPriorityRef { + klog.V(6).Infof("Ignoring FlowSchema %s because of bad priority level reference %q", fs.Name, fs.Spec.PriorityLevelConfiguration.Name) + continue + } + fsSeq = append(fsSeq, newFSs[i]) + haveExemptFS = haveExemptFS || fs.Name == flowcontrol.FlowSchemaNameExempt + haveCatchAllFS = haveCatchAllFS || fs.Name == flowcontrol.FlowSchemaNameCatchAll + } + // sort into the order to be used for matching + sort.Sort(fsSeq) + + // Supply missing mandatory FlowSchemas, in correct position + if !haveExemptFS { + fsSeq = append(apihelpers.FlowSchemaSequence{fcboot.MandatoryFlowSchemaExempt}, fsSeq...) + } + if !haveCatchAllFS { + fsSeq = append(fsSeq, fcboot.MandatoryFlowSchemaCatchAll) + } + + meal.cfgCtlr.flowSchemas = fsSeq + if klog.V(5).Enabled() { + for _, fs := range fsSeq { + klog.Infof("Using FlowSchema %s", fcfmt.Fmt(fs)) + } + } +} + +// Consider all the priority levels in the previous configuration. +// Keep the ones that are in the new config, supply mandatory +// behavior, or are still busy; for the rest: drop it if it has no +// queues, otherwise start the quiescing process if that has not +// already been started. +func (meal *cfgMeal) processOldPLsLocked() { + for plName, plState := range meal.cfgCtlr.priorityLevelStates { + if meal.newPLStates[plName] != nil { + // Still desired and already updated + continue + } + if plName == flowcontrol.PriorityLevelConfigurationNameExempt && !meal.haveExemptPL || plName == flowcontrol.PriorityLevelConfigurationNameCatchAll && !meal.haveCatchAllPL { + // BTW, we know the Spec has not changed because the + // mandatory objects have immutable Specs + klog.V(3).Infof("Retaining mandatory priority level %q despite lack of API object", plName) + } else { + if plState.queues == nil || plState.numPending == 0 && plState.queues.IsIdle() { + // Either there are no queues or they are done + // draining and no use is coming from another + // goroutine + klog.V(3).Infof("Removing undesired priority level %q (nilQueues=%v), Type=%v", plName, plState.queues == nil, plState.pl.Spec.Type) + continue + } + if !plState.quiescing { + klog.V(3).Infof("Priority level %q became undesired", plName) + plState.quiescing = true + } + } + var err error + plState.qsCompleter, err = queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, plState.queues, plState.pl, meal.cfgCtlr.requestWaitLimit, plState.obsPair) + if err != nil { + // This can not happen because queueSetCompleterForPL already approved this config + panic(fmt.Sprintf("%s from name=%q spec=%s", err, plName, fcfmt.Fmt(plState.pl.Spec))) + } + if plState.pl.Spec.Limited != nil { + // We deliberately include the lingering priority levels + // here so that their queues get some concurrency and they + // continue to drain. During this interim a lingering + // priority level continues to get a concurrency + // allocation determined by all the share values in the + // regular way. + meal.shareSum += float64(plState.pl.Spec.Limited.AssuredConcurrencyShares) + } + meal.haveExemptPL = meal.haveExemptPL || plName == flowcontrol.PriorityLevelConfigurationNameExempt + meal.haveCatchAllPL = meal.haveCatchAllPL || plName == flowcontrol.PriorityLevelConfigurationNameCatchAll + meal.newPLStates[plName] = plState + } +} + +// For all the priority levels of the new config, divide up the +// server's total concurrency limit among them and create/update their +// QueueSets. +func (meal *cfgMeal) finishQueueSetReconfigsLocked() { + for plName, plState := range meal.newPLStates { + if plState.pl.Spec.Limited == nil { + klog.V(5).Infof("Using exempt priority level %q: quiescing=%v", plName, plState.quiescing) + continue + } + + // The use of math.Ceil here means that the results might sum + // to a little more than serverConcurrencyLimit but the + // difference will be negligible. + concurrencyLimit := int(math.Ceil(float64(meal.cfgCtlr.serverConcurrencyLimit) * float64(plState.pl.Spec.Limited.AssuredConcurrencyShares) / meal.shareSum)) + metrics.UpdateSharedConcurrencyLimit(plName, concurrencyLimit) + + if plState.queues == nil { + klog.V(5).Infof("Introducing queues for priority level %q: config=%s, concurrencyLimit=%d, quiescing=%v (shares=%v, shareSum=%v)", plName, fcfmt.Fmt(plState.pl.Spec), concurrencyLimit, plState.quiescing, plState.pl.Spec.Limited.AssuredConcurrencyShares, meal.shareSum) + } else { + klog.V(5).Infof("Retaining queues for priority level %q: config=%s, concurrencyLimit=%d, quiescing=%v, numPending=%d (shares=%v, shareSum=%v)", plName, fcfmt.Fmt(plState.pl.Spec), concurrencyLimit, plState.quiescing, plState.numPending, plState.pl.Spec.Limited.AssuredConcurrencyShares, meal.shareSum) + } + plState.queues = plState.qsCompleter.Complete(fq.DispatchingConfig{ConcurrencyLimit: concurrencyLimit}) + } +} + +// queueSetCompleterForPL returns an appropriate QueueSetCompleter for the +// given priority level configuration. Returns nil if that config +// does not call for limiting. Returns nil and an error if the given +// object is malformed in a way that is a problem for this package. +func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flowcontrol.PriorityLevelConfiguration, requestWaitLimit time.Duration, intPair metrics.TimedObserverPair) (fq.QueueSetCompleter, error) { + if (pl.Spec.Type == flowcontrol.PriorityLevelEnablementExempt) != (pl.Spec.Limited == nil) { + return nil, errors.New("broken union structure at the top") + } + if (pl.Spec.Type == flowcontrol.PriorityLevelEnablementExempt) != (pl.Name == flowcontrol.PriorityLevelConfigurationNameExempt) { + // This package does not attempt to cope with a priority level dynamically switching between exempt and not. + return nil, errors.New("non-alignment between name and type") + } + if pl.Spec.Limited == nil { + return nil, nil + } + if (pl.Spec.Limited.LimitResponse.Type == flowcontrol.LimitResponseTypeReject) != (pl.Spec.Limited.LimitResponse.Queuing == nil) { + return nil, errors.New("broken union structure for limit response") + } + qcAPI := pl.Spec.Limited.LimitResponse.Queuing + qcQS := fq.QueuingConfig{Name: pl.Name} + if qcAPI != nil { + qcQS = fq.QueuingConfig{Name: pl.Name, + DesiredNumQueues: int(qcAPI.Queues), + QueueLengthLimit: int(qcAPI.QueueLengthLimit), + HandSize: int(qcAPI.HandSize), + RequestWaitLimit: requestWaitLimit, + } + } + var qsc fq.QueueSetCompleter + var err error + if queues != nil { + qsc, err = queues.BeginConfigChange(qcQS) + } else { + qsc, err = qsf.BeginConstruction(qcQS, intPair) + } + if err != nil { + err = errors.Wrap(err, fmt.Sprintf("priority level %q has QueuingConfiguration %#+v, which is invalid", pl.Name, qcAPI)) + } + return qsc, err +} + +func (meal *cfgMeal) presyncFlowSchemaStatus(fs *flowcontrol.FlowSchema, isDangling bool, plName string) { + danglingCondition := apihelpers.GetFlowSchemaConditionByType(fs, flowcontrol.FlowSchemaConditionDangling) + if danglingCondition == nil { + danglingCondition = &flowcontrol.FlowSchemaCondition{ + Type: flowcontrol.FlowSchemaConditionDangling, + } + } + desiredStatus := flowcontrol.ConditionFalse + var desiredReason, desiredMessage string + if isDangling { + desiredStatus = flowcontrol.ConditionTrue + desiredReason = "NotFound" + desiredMessage = fmt.Sprintf("This FlowSchema references the PriorityLevelConfiguration object named %q but there is no such object", plName) + } else { + desiredReason = "Found" + desiredMessage = fmt.Sprintf("This FlowSchema references the PriorityLevelConfiguration object named %q and it exists", plName) + } + if danglingCondition.Status == desiredStatus && danglingCondition.Reason == desiredReason && danglingCondition.Message == desiredMessage { + return + } + meal.fsStatusUpdates = append(meal.fsStatusUpdates, fsStatusUpdate{ + flowSchema: fs, + condition: flowcontrol.FlowSchemaCondition{ + Type: flowcontrol.FlowSchemaConditionDangling, + Status: desiredStatus, + LastTransitionTime: metav1.Now(), + Reason: desiredReason, + Message: desiredMessage, + }, + oldValue: *danglingCondition}) +} + +// imaginePL adds a priority level based on one of the mandatory ones +// that does not actually exist (right now) as a real API object. +func (meal *cfgMeal) imaginePL(proto *flowcontrol.PriorityLevelConfiguration, requestWaitLimit time.Duration) { + klog.V(3).Infof("No %s PriorityLevelConfiguration found, imagining one", proto.Name) + obsPair := meal.cfgCtlr.obsPairGenerator.Generate(1, 1, []string{proto.Name}) + qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, nil, proto, requestWaitLimit, obsPair) + if err != nil { + // This can not happen because proto is one of the mandatory + // objects and these are not erroneous + panic(err) + } + meal.newPLStates[proto.Name] = &priorityLevelState{ + pl: proto, + qsCompleter: qsCompleter, + obsPair: obsPair, + } + if proto.Spec.Limited != nil { + meal.shareSum += float64(proto.Spec.Limited.AssuredConcurrencyShares) + } + return +} + +type immediateRequest struct{} + +func (immediateRequest) Finish(execute func()) bool { + execute() + return false +} + +// startRequest classifies and, if appropriate, enqueues the request. +// Returns a nil Request if and only if the request is to be rejected. +// The returned bool indicates whether the request is exempt from +// limitation. The startWaitingTime is when the request started +// waiting in its queue, or `Time{}` if this did not happen. +func (cfgCtlr *configController) startRequest(ctx context.Context, rd RequestDigest, queueNoteFn fq.QueueNoteFn) (fs *flowcontrol.FlowSchema, pl *flowcontrol.PriorityLevelConfiguration, isExempt bool, req fq.Request, startWaitingTime time.Time) { + klog.V(7).Infof("startRequest(%#+v)", rd) + cfgCtlr.lock.Lock() + defer cfgCtlr.lock.Unlock() + var selectedFlowSchema *flowcontrol.FlowSchema + for _, fs := range cfgCtlr.flowSchemas { + if matchesFlowSchema(rd, fs) { + selectedFlowSchema = fs + break + } + } + if selectedFlowSchema == nil { + // This should never happen. If the requestDigest's User is a part of + // system:authenticated or system:unauthenticated, the catch-all flow + // schema should match it. However, if that invariant somehow fails, + // fallback to the catch-all flow schema anyway. + for _, fs := range cfgCtlr.flowSchemas { + if fs.Name == flowcontrol.FlowSchemaNameCatchAll { + selectedFlowSchema = fs + break + } + } + if selectedFlowSchema == nil { + // This should absolutely never, ever happen! APF guarantees two + // undeletable flow schemas at all times: an exempt flow schema and a + // catch-all flow schema. + panic(fmt.Sprintf("no fallback catch-all flow schema found for request %#+v and user %#+v", rd.RequestInfo, rd.User)) + } + klog.Warningf("no match found for request %#+v and user %#+v; selecting catchAll=%s as fallback flow schema", rd.RequestInfo, rd.User, fcfmt.Fmt(selectedFlowSchema)) + } + plName := selectedFlowSchema.Spec.PriorityLevelConfiguration.Name + plState := cfgCtlr.priorityLevelStates[plName] + if plState.pl.Spec.Type == flowcontrol.PriorityLevelEnablementExempt { + klog.V(7).Infof("startRequest(%#+v) => fsName=%q, distMethod=%#+v, plName=%q, immediate", rd, selectedFlowSchema.Name, selectedFlowSchema.Spec.DistinguisherMethod, plName) + return selectedFlowSchema, plState.pl, true, immediateRequest{}, time.Time{} + } + var numQueues int32 + if plState.pl.Spec.Limited.LimitResponse.Type == flowcontrol.LimitResponseTypeQueue { + numQueues = plState.pl.Spec.Limited.LimitResponse.Queuing.Queues + } + var flowDistinguisher string + var hashValue uint64 + if numQueues > 1 { + flowDistinguisher = computeFlowDistinguisher(rd, selectedFlowSchema.Spec.DistinguisherMethod) + hashValue = hashFlowID(selectedFlowSchema.Name, flowDistinguisher) + } + startWaitingTime = time.Now() + klog.V(7).Infof("startRequest(%#+v) => fsName=%q, distMethod=%#+v, plName=%q, numQueues=%d", rd, selectedFlowSchema.Name, selectedFlowSchema.Spec.DistinguisherMethod, plName, numQueues) + req, idle := plState.queues.StartRequest(ctx, hashValue, flowDistinguisher, selectedFlowSchema.Name, rd.RequestInfo, rd.User, queueNoteFn) + if idle { + cfgCtlr.maybeReapLocked(plName, plState) + } + return selectedFlowSchema, plState.pl, false, req, startWaitingTime +} + +// Call this after getting a clue that the given priority level is undesired and idle +func (cfgCtlr *configController) maybeReap(plName string) { + cfgCtlr.lock.Lock() + defer cfgCtlr.lock.Unlock() + plState := cfgCtlr.priorityLevelStates[plName] + if plState == nil { + klog.V(7).Infof("plName=%s, plState==nil", plName) + return + } + if plState.queues != nil { + useless := plState.quiescing && plState.numPending == 0 && plState.queues.IsIdle() + klog.V(7).Infof("plState.quiescing=%v, plState.numPending=%d, useless=%v", plState.quiescing, plState.numPending, useless) + if !useless { + return + } + } + klog.V(3).Infof("Triggered API priority and fairness config reloading because priority level %s is undesired and idle", plName) + cfgCtlr.configQueue.Add(0) +} + +// Call this if both (1) plState.queues is non-nil and reported being +// idle, and (2) cfgCtlr's lock has not been released since then. +func (cfgCtlr *configController) maybeReapLocked(plName string, plState *priorityLevelState) { + if !(plState.quiescing && plState.numPending == 0) { + return + } + klog.V(3).Infof("Triggered API priority and fairness config reloading because priority level %s is undesired and idle", plName) + cfgCtlr.configQueue.Add(0) +} + +// computeFlowDistinguisher extracts the flow distinguisher according to the given method +func computeFlowDistinguisher(rd RequestDigest, method *flowcontrol.FlowDistinguisherMethod) string { + if method == nil { + return "" + } + switch method.Type { + case flowcontrol.FlowDistinguisherMethodByUserType: + return rd.User.GetName() + case flowcontrol.FlowDistinguisherMethodByNamespaceType: + return rd.RequestInfo.Namespace + default: + // this line shall never reach + panic("invalid flow-distinguisher method") + } +} + +func hashFlowID(fsName, fDistinguisher string) uint64 { + hash := sha256.New() + var sep = [1]byte{0} + hash.Write([]byte(fsName)) + hash.Write(sep[:]) + hash.Write([]byte(fDistinguisher)) + var sum [32]byte + hash.Sum(sum[:0]) + return binary.LittleEndian.Uint64(sum[:8]) +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller_debug.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller_debug.go new file mode 100644 index 000000000..3c2c9fc74 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller_debug.go @@ -0,0 +1,268 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flowcontrol + +import ( + "fmt" + "io" + "net/http" + "strconv" + "strings" + "text/tabwriter" + "time" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/server/mux" +) + +const ( + queryIncludeRequestDetails = "includeRequestDetails" +) + +func (cfgCtlr *configController) Install(c *mux.PathRecorderMux) { + // TODO(yue9944882): handle "Accept" header properly + // debugging dumps a CSV content for three levels of granularity + // 1. row per priority-level + c.UnlistedHandleFunc("/debug/api_priority_and_fairness/dump_priority_levels", cfgCtlr.dumpPriorityLevels) + // 2. row per queue + c.UnlistedHandleFunc("/debug/api_priority_and_fairness/dump_queues", cfgCtlr.dumpQueues) + // 3. row per request + c.UnlistedHandleFunc("/debug/api_priority_and_fairness/dump_requests", cfgCtlr.dumpRequests) +} + +func (cfgCtlr *configController) dumpPriorityLevels(w http.ResponseWriter, r *http.Request) { + cfgCtlr.lock.Lock() + defer cfgCtlr.lock.Unlock() + tabWriter := tabwriter.NewWriter(w, 8, 0, 1, ' ', 0) + columnHeaders := []string{ + "PriorityLevelName", // 1 + "ActiveQueues", // 2 + "IsIdle", // 3 + "IsQuiescing", // 4 + "WaitingRequests", // 5 + "ExecutingRequests", // 6 + } + tabPrint(tabWriter, rowForHeaders(columnHeaders)) + endLine(tabWriter) + for _, plState := range cfgCtlr.priorityLevelStates { + if plState.queues == nil { + tabPrint(tabWriter, row( + plState.pl.Name, // 1 + "", // 2 + "", // 3 + "", // 4 + "", // 5 + "", // 6 + )) + endLine(tabWriter) + continue + } + queueSetDigest := plState.queues.Dump(false) + activeQueueNum := 0 + for _, q := range queueSetDigest.Queues { + if len(q.Requests) > 0 { + activeQueueNum++ + } + } + + tabPrint(tabWriter, rowForPriorityLevel( + plState.pl.Name, // 1 + activeQueueNum, // 2 + plState.queues.IsIdle(), // 3 + plState.quiescing, // 4 + queueSetDigest.Waiting, // 5 + queueSetDigest.Executing, // 6 + )) + endLine(tabWriter) + } + runtime.HandleError(tabWriter.Flush()) +} + +func (cfgCtlr *configController) dumpQueues(w http.ResponseWriter, r *http.Request) { + cfgCtlr.lock.Lock() + defer cfgCtlr.lock.Unlock() + tabWriter := tabwriter.NewWriter(w, 8, 0, 1, ' ', 0) + columnHeaders := []string{ + "PriorityLevelName", // 1 + "Index", // 2 + "PendingRequests", // 3 + "ExecutingRequests", // 4 + "VirtualStart", // 5 + } + tabPrint(tabWriter, rowForHeaders(columnHeaders)) + endLine(tabWriter) + for _, plState := range cfgCtlr.priorityLevelStates { + if plState.queues == nil { + tabPrint(tabWriter, row( + plState.pl.Name, // 1 + "", // 2 + "", // 3 + "", // 4 + "", // 5 + )) + endLine(tabWriter) + continue + } + queueSetDigest := plState.queues.Dump(false) + for i, q := range queueSetDigest.Queues { + tabPrint(tabWriter, rowForQueue( + plState.pl.Name, // 1 + i, // 2 + len(q.Requests), // 3 + q.ExecutingRequests, // 4 + q.VirtualStart, // 5 + )) + endLine(tabWriter) + } + } + runtime.HandleError(tabWriter.Flush()) +} + +func (cfgCtlr *configController) dumpRequests(w http.ResponseWriter, r *http.Request) { + cfgCtlr.lock.Lock() + defer cfgCtlr.lock.Unlock() + + includeRequestDetails := len(r.URL.Query().Get(queryIncludeRequestDetails)) > 0 + + tabWriter := tabwriter.NewWriter(w, 8, 0, 1, ' ', 0) + tabPrint(tabWriter, rowForHeaders([]string{ + "PriorityLevelName", // 1 + "FlowSchemaName", // 2 + "QueueIndex", // 3 + "RequestIndexInQueue", // 4 + "FlowDistingsher", // 5 + "ArriveTime", // 6 + })) + if includeRequestDetails { + continueLine(tabWriter) + tabPrint(tabWriter, rowForHeaders([]string{ + "UserName", // 7 + "Verb", // 8 + "APIPath", // 9 + "Namespace", // 10 + "Name", // 11 + "APIVersion", // 12 + "Resource", // 13 + "SubResource", // 14 + })) + } + endLine(tabWriter) + for _, plState := range cfgCtlr.priorityLevelStates { + if plState.queues == nil { + continue + } + queueSetDigest := plState.queues.Dump(includeRequestDetails) + for iq, q := range queueSetDigest.Queues { + for ir, r := range q.Requests { + tabPrint(tabWriter, rowForRequest( + plState.pl.Name, // 1 + r.MatchedFlowSchema, // 2 + iq, // 3 + ir, // 4 + r.FlowDistinguisher, // 5 + r.ArriveTime, // 6 + )) + if includeRequestDetails { + continueLine(tabWriter) + tabPrint(tabWriter, rowForRequestDetails( + r.UserName, // 7 + r.RequestInfo.Verb, // 8 + r.RequestInfo.Path, // 9 + r.RequestInfo.Namespace, // 10 + r.RequestInfo.Name, // 11 + schema.GroupVersion{ + Group: r.RequestInfo.APIGroup, + Version: r.RequestInfo.APIVersion, + }.String(), // 12 + r.RequestInfo.Resource, // 13 + r.RequestInfo.Subresource, // 14 + )) + } + endLine(tabWriter) + } + } + } + runtime.HandleError(tabWriter.Flush()) +} + +func tabPrint(w io.Writer, row string) { + _, err := fmt.Fprint(w, row) + runtime.HandleError(err) +} + +func continueLine(w io.Writer) { + _, err := fmt.Fprint(w, ",\t") + runtime.HandleError(err) +} +func endLine(w io.Writer) { + _, err := fmt.Fprint(w, "\n") + runtime.HandleError(err) +} + +func rowForHeaders(headers []string) string { + return row(headers...) +} + +func rowForPriorityLevel(plName string, activeQueues int, isIdle, isQuiescing bool, waitingRequests, executingRequests int) string { + return row( + plName, + strconv.Itoa(activeQueues), + strconv.FormatBool(isIdle), + strconv.FormatBool(isQuiescing), + strconv.Itoa(waitingRequests), + strconv.Itoa(executingRequests), + ) +} + +func rowForQueue(plName string, index, waitingRequests, executingRequests int, virtualStart float64) string { + return row( + plName, + strconv.Itoa(index), + strconv.Itoa(waitingRequests), + strconv.Itoa(executingRequests), + fmt.Sprintf("%.4f", virtualStart), + ) +} + +func rowForRequest(plName, fsName string, queueIndex, requestIndex int, flowDistinguisher string, arriveTime time.Time) string { + return row( + plName, + fsName, + strconv.Itoa(queueIndex), + strconv.Itoa(requestIndex), + flowDistinguisher, + arriveTime.UTC().Format(time.RFC3339Nano), + ) +} + +func rowForRequestDetails(username, verb, path, namespace, name, apiVersion, resource, subResource string) string { + return row( + username, + verb, + path, + namespace, + name, + apiVersion, + resource, + subResource, + ) +} + +func row(columns ...string) string { + return strings.Join(columns, ",\t") +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go new file mode 100644 index 000000000..327d50c14 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go @@ -0,0 +1,132 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flowcontrol + +import ( + "context" + "strconv" + "time" + + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apiserver/pkg/server/mux" + "k8s.io/apiserver/pkg/util/flowcontrol/counter" + fq "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing" + fqs "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset" + "k8s.io/apiserver/pkg/util/flowcontrol/metrics" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/klog/v2" + + flowcontrol "k8s.io/api/flowcontrol/v1beta1" + flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" +) + +// Interface defines how the API Priority and Fairness filter interacts with the underlying system. +type Interface interface { + // Handle takes care of queuing and dispatching a request + // characterized by the given digest. The given `noteFn` will be + // invoked with the results of request classification. If the + // request is queued then `queueNoteFn` will be called twice, + // first with `true` and then with `false`; otherwise + // `queueNoteFn` will not be called at all. If Handle decides + // that the request should be executed then `execute()` will be + // invoked once to execute the request; otherwise `execute()` will + // not be invoked. + Handle(ctx context.Context, + requestDigest RequestDigest, + noteFn func(fs *flowcontrol.FlowSchema, pl *flowcontrol.PriorityLevelConfiguration), + queueNoteFn fq.QueueNoteFn, + execFn func(), + ) + + // MaintainObservations is a helper for maintaining statistics. + MaintainObservations(stopCh <-chan struct{}) + + // Run monitors config objects from the main apiservers and causes + // any needed changes to local behavior. This method ceases + // activity and returns after the given channel is closed. + Run(stopCh <-chan struct{}) error + + // Install installs debugging endpoints to the web-server. + Install(c *mux.PathRecorderMux) +} + +// This request filter implements https://github.com/kubernetes/enhancements/blob/master/keps/sig-api-machinery/20190228-priority-and-fairness.md + +// New creates a new instance to implement API priority and fairness +func New( + informerFactory kubeinformers.SharedInformerFactory, + flowcontrolClient flowcontrolclient.FlowcontrolV1beta1Interface, + serverConcurrencyLimit int, + requestWaitLimit time.Duration, +) Interface { + grc := counter.NoOp{} + return NewTestable( + informerFactory, + flowcontrolClient, + serverConcurrencyLimit, + requestWaitLimit, + metrics.PriorityLevelConcurrencyObserverPairGenerator, + fqs.NewQueueSetFactory(&clock.RealClock{}, grc), + ) +} + +// NewTestable is extra flexible to facilitate testing +func NewTestable( + informerFactory kubeinformers.SharedInformerFactory, + flowcontrolClient flowcontrolclient.FlowcontrolV1beta1Interface, + serverConcurrencyLimit int, + requestWaitLimit time.Duration, + obsPairGenerator metrics.TimedObserverPairGenerator, + queueSetFactory fq.QueueSetFactory, +) Interface { + return newTestableController(informerFactory, flowcontrolClient, serverConcurrencyLimit, requestWaitLimit, obsPairGenerator, queueSetFactory) +} + +func (cfgCtlr *configController) Handle(ctx context.Context, requestDigest RequestDigest, + noteFn func(fs *flowcontrol.FlowSchema, pl *flowcontrol.PriorityLevelConfiguration), + queueNoteFn fq.QueueNoteFn, + execFn func()) { + fs, pl, isExempt, req, startWaitingTime := cfgCtlr.startRequest(ctx, requestDigest, queueNoteFn) + queued := startWaitingTime != time.Time{} + noteFn(fs, pl) + if req == nil { + if queued { + metrics.ObserveWaitingDuration(pl.Name, fs.Name, strconv.FormatBool(req != nil), time.Since(startWaitingTime)) + } + klog.V(7).Infof("Handle(%#+v) => fsName=%q, distMethod=%#+v, plName=%q, isExempt=%v, reject", requestDigest, fs.Name, fs.Spec.DistinguisherMethod, pl.Name, isExempt) + return + } + klog.V(7).Infof("Handle(%#+v) => fsName=%q, distMethod=%#+v, plName=%q, isExempt=%v, queued=%v", requestDigest, fs.Name, fs.Spec.DistinguisherMethod, pl.Name, isExempt, queued) + var executed bool + idle := req.Finish(func() { + if queued { + metrics.ObserveWaitingDuration(pl.Name, fs.Name, strconv.FormatBool(req != nil), time.Since(startWaitingTime)) + } + metrics.AddDispatch(pl.Name, fs.Name) + executed = true + startExecutionTime := time.Now() + execFn() + metrics.ObserveExecutionDuration(pl.Name, fs.Name, time.Since(startExecutionTime)) + }) + if queued && !executed { + metrics.ObserveWaitingDuration(pl.Name, fs.Name, strconv.FormatBool(req != nil), time.Since(startWaitingTime)) + } + klog.V(7).Infof("Handle(%#+v) => fsName=%q, distMethod=%#+v, plName=%q, isExempt=%v, queued=%v, Finish() => idle=%v", requestDigest, fs.Name, fs.Spec.DistinguisherMethod, pl.Name, isExempt, queued, idle) + if idle { + cfgCtlr.maybeReap(pl.Name) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/counter/interface.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/counter/interface.go new file mode 100644 index 000000000..0418e1217 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/counter/interface.go @@ -0,0 +1,33 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package counter + +// GoRoutineCounter keeps track of the number of active goroutines +// working on/for something. This is a utility that makes such code more +// testable. The code uses this utility to report the number of active +// goroutines to the test code, so that the test code can advance a fake +// clock when and only when the code being tested has finished all +// the work that is ready to do at the present time. +type GoRoutineCounter interface { + // Add adds the given delta to the count of active goroutines. + // Call Add(1) before forking a goroutine, Add(-1) at the end of that goroutine. + // Call Add(-1) just before waiting on something from another goroutine (e.g., + // just before a `select`). + // Call Add(1) just before doing something that unblocks a goroutine that is + // waiting on that something. + Add(delta int) +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/counter/noop.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/counter/noop.go new file mode 100644 index 000000000..fa946f6f0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/counter/noop.go @@ -0,0 +1,25 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package counter + +// NoOp is a GoRoutineCounter that does not actually count +type NoOp struct{} + +var _ GoRoutineCounter = NoOp{} + +// Add would adjust the count, if a count were being kept +func (NoOp) Add(int) {} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/debug/dump.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/debug/dump.go new file mode 100644 index 000000000..5e4467649 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/debug/dump.go @@ -0,0 +1,48 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package debug + +import ( + "time" + + "k8s.io/apiserver/pkg/endpoints/request" +) + +// QueueSetDump is an instant dump of queue-set. +type QueueSetDump struct { + Queues []QueueDump + Waiting int + Executing int +} + +// QueueDump is an instant dump of one queue in a queue-set. +type QueueDump struct { + Requests []RequestDump + VirtualStart float64 + ExecutingRequests int +} + +// RequestDump is an instant dump of one requests pending in the queue. +type RequestDump struct { + MatchedFlowSchema string + FlowDistinguisher string + ArriveTime time.Time + StartTime time.Time + // request details + UserName string + RequestInfo request.RequestInfo +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/integrator.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/integrator.go new file mode 100644 index 000000000..dcba6f2c2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/integrator.go @@ -0,0 +1,180 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fairqueuing + +import ( + "math" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apiserver/pkg/util/flowcontrol/metrics" +) + +// Integrator computes the moments of some variable X over time as +// read from a particular clock. The integrals start when the +// Integrator is created, and ends at the latest operation on the +// Integrator. As a `metrics.TimedObserver` this fixes X1=1 and +// ignores attempts to change X1. +type Integrator interface { + metrics.TimedObserver + + GetResults() IntegratorResults + + // Return the results of integrating to now, and reset integration to start now + Reset() IntegratorResults +} + +// IntegratorResults holds statistical abstracts of the integration +type IntegratorResults struct { + Duration float64 //seconds + Average float64 //time-weighted + Deviation float64 //standard deviation: sqrt(avg((value-avg)^2)) + Min, Max float64 +} + +// Equal tests for semantic equality. +// This considers all NaN values to be equal to each other. +func (x *IntegratorResults) Equal(y *IntegratorResults) bool { + return x == y || x != nil && y != nil && x.Duration == y.Duration && x.Min == y.Min && x.Max == y.Max && (x.Average == y.Average || math.IsNaN(x.Average) && math.IsNaN(y.Average)) && (x.Deviation == y.Deviation || math.IsNaN(x.Deviation) && math.IsNaN(y.Deviation)) +} + +type integrator struct { + clock clock.PassiveClock + sync.Mutex + lastTime time.Time + x float64 + moments Moments + min, max float64 +} + +// NewIntegrator makes one that uses the given clock +func NewIntegrator(clock clock.PassiveClock) Integrator { + return &integrator{ + clock: clock, + lastTime: clock.Now(), + } +} + +func (igr *integrator) SetX1(x1 float64) { +} + +func (igr *integrator) Set(x float64) { + igr.Lock() + igr.setLocked(x) + igr.Unlock() +} + +func (igr *integrator) setLocked(x float64) { + igr.updateLocked() + igr.x = x + if x < igr.min { + igr.min = x + } + if x > igr.max { + igr.max = x + } +} + +func (igr *integrator) Add(deltaX float64) { + igr.Lock() + igr.setLocked(igr.x + deltaX) + igr.Unlock() +} + +func (igr *integrator) updateLocked() { + now := igr.clock.Now() + dt := now.Sub(igr.lastTime).Seconds() + igr.lastTime = now + igr.moments = igr.moments.Add(ConstantMoments(dt, igr.x)) +} + +func (igr *integrator) GetResults() IntegratorResults { + igr.Lock() + defer igr.Unlock() + return igr.getResultsLocked() +} + +func (igr *integrator) Reset() IntegratorResults { + igr.Lock() + defer igr.Unlock() + results := igr.getResultsLocked() + igr.moments = Moments{} + igr.min = igr.x + igr.max = igr.x + return results +} + +func (igr *integrator) getResultsLocked() (results IntegratorResults) { + igr.updateLocked() + results.Min, results.Max = igr.min, igr.max + results.Duration = igr.moments.ElapsedSeconds + results.Average, results.Deviation = igr.moments.AvgAndStdDev() + return +} + +// Moments are the integrals of the 0, 1, and 2 powers of some +// variable X over some range of time. +type Moments struct { + ElapsedSeconds float64 // integral of dt + IntegralX float64 // integral of x dt + IntegralXX float64 // integral of x*x dt +} + +// ConstantMoments is for a constant X +func ConstantMoments(dt, x float64) Moments { + return Moments{ + ElapsedSeconds: dt, + IntegralX: x * dt, + IntegralXX: x * x * dt, + } +} + +// Add combines over two ranges of time +func (igr Moments) Add(ogr Moments) Moments { + return Moments{ + ElapsedSeconds: igr.ElapsedSeconds + ogr.ElapsedSeconds, + IntegralX: igr.IntegralX + ogr.IntegralX, + IntegralXX: igr.IntegralXX + ogr.IntegralXX, + } +} + +// Sub finds the difference between a range of time and a subrange +func (igr Moments) Sub(ogr Moments) Moments { + return Moments{ + ElapsedSeconds: igr.ElapsedSeconds - ogr.ElapsedSeconds, + IntegralX: igr.IntegralX - ogr.IntegralX, + IntegralXX: igr.IntegralXX - ogr.IntegralXX, + } +} + +// AvgAndStdDev returns the average and standard devation +func (igr Moments) AvgAndStdDev() (float64, float64) { + if igr.ElapsedSeconds <= 0 { + return math.NaN(), math.NaN() + } + avg := igr.IntegralX / igr.ElapsedSeconds + // standard deviation is sqrt( average( (x - xbar)^2 ) ) + // = sqrt( Integral( x^2 + xbar^2 -2*x*xbar dt ) / Duration ) + // = sqrt( ( Integral( x^2 dt ) + Duration * xbar^2 - 2*xbar*Integral(x dt) ) / Duration) + // = sqrt( Integral(x^2 dt)/Duration - xbar^2 ) + variance := igr.IntegralXX/igr.ElapsedSeconds - avg*avg + if variance >= 0 { + return avg, math.Sqrt(variance) + } + return avg, math.NaN() +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go new file mode 100644 index 000000000..882a505c8 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go @@ -0,0 +1,135 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fairqueuing + +import ( + "context" + "time" + + "k8s.io/apiserver/pkg/util/flowcontrol/debug" + "k8s.io/apiserver/pkg/util/flowcontrol/metrics" +) + +// QueueSetFactory is used to create QueueSet objects. Creation, like +// config update, is done in two phases: the first phase consumes the +// QueuingConfig and the second consumes the DispatchingConfig. They +// are separated so that errors from the first phase can be found +// before committing to a concurrency allotment for the second. +type QueueSetFactory interface { + // BeginConstruction does the first phase of creating a QueueSet + BeginConstruction(QueuingConfig, metrics.TimedObserverPair) (QueueSetCompleter, error) +} + +// QueueSetCompleter finishes the two-step process of creating or +// reconfiguring a QueueSet +type QueueSetCompleter interface { + // Complete returns a QueueSet configured by the given + // dispatching configuration. + Complete(DispatchingConfig) QueueSet +} + +// QueueSet is the abstraction for the queuing and dispatching +// functionality of one non-exempt priority level. It covers the +// functionality described in the "Assignment to a Queue", "Queuing", +// and "Dispatching" sections of +// https://github.com/kubernetes/enhancements/blob/master/keps/sig-api-machinery/20190228-priority-and-fairness.md +// . Some day we may have connections between priority levels, but +// today is not that day. +type QueueSet interface { + // BeginConfigChange starts the two-step process of updating the + // configuration. No change is made until Complete is called. If + // `C := X.BeginConstruction(q)` then `C.Complete(d)` returns the + // same value `X`. If the QueuingConfig's DesiredNumQueues field + // is zero then the other queuing-specific config parameters are + // not changed, so that the queues continue draining as before. + // In any case, reconfiguration does not discard any queue unless + // and until it is undesired and empty. + BeginConfigChange(QueuingConfig) (QueueSetCompleter, error) + + // IsIdle returns a bool indicating whether the QueueSet was idle + // at the moment of the return. Idle means the QueueSet has zero + // requests queued and zero executing. This bit can change only + // (1) during a call to StartRequest and (2) during a call to + // Request::Finish. In the latter case idleness can only change + // from false to true. + IsIdle() bool + + // StartRequest begins the process of handling a request. If the + // request gets queued and the number of queues is greater than 1 + // then StartRequest uses the given hashValue as the source of + // entropy as it shuffle-shards the request into a queue. The + // descr1 and descr2 values play no role in the logic but appear + // in log messages. This method always returns quickly (without + // waiting for the request to be dequeued). If this method + // returns a nil Request value then caller should reject the + // request and the returned bool indicates whether the QueueSet + // was idle at the moment of the return. Otherwise idle==false + // and the client must call the Finish method of the Request + // exactly once. + StartRequest(ctx context.Context, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn QueueNoteFn) (req Request, idle bool) + + // UpdateObservations makes sure any time-based statistics have + // caught up with the current clock reading + UpdateObservations() + + // Dump saves and returns the instant internal state of the queue-set. + // Note that dumping process will stop the queue-set from proceeding + // any requests. + // For debugging only. + Dump(includeRequestDetails bool) debug.QueueSetDump +} + +// QueueNoteFn is called when a request enters and leaves a queue +type QueueNoteFn func(inQueue bool) + +// Request represents the remainder of the handling of one request +type Request interface { + // Finish determines whether to execute or reject the request and + // invokes `execute` if the decision is to execute the request. + // The returned `idle bool` value indicates whether the QueueSet + // was idle when the value was calculated, but might no longer be + // accurate by the time the client examines that value. + Finish(execute func()) (idle bool) +} + +// QueuingConfig defines the configuration of the queuing aspect of a QueueSet. +type QueuingConfig struct { + // Name is used to identify a queue set, allowing for descriptive information about its intended use + Name string + + // DesiredNumQueues is the number of queues that the API says + // should exist now. This may be zero, in which case + // QueueLengthLimit, HandSize, and RequestWaitLimit are ignored. + DesiredNumQueues int + + // QueueLengthLimit is the maximum number of requests that may be waiting in a given queue at a time + QueueLengthLimit int + + // HandSize is a parameter of shuffle sharding. Upon arrival of a request, a queue is chosen by randomly + // dealing a "hand" of this many queues and then picking one of minimum length. + HandSize int + + // RequestWaitLimit is the maximum amount of time that a request may wait in a queue. + // If, by the end of that time, the request has not been dispatched then it is rejected. + RequestWaitLimit time.Duration +} + +// DispatchingConfig defines the configuration of the dispatching aspect of a QueueSet. +type DispatchingConfig struct { + // ConcurrencyLimit is the maximum number of requests of this QueueSet that may be executing at a time + ConcurrencyLimit int +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/interface.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/interface.go new file mode 100644 index 000000000..1977f7522 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/interface.go @@ -0,0 +1,129 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package promise + +// This file defines interfaces for promises and futures and related +// things. These are about coordination among multiple goroutines and +// so are safe for concurrent calls --- although moderated in some +// cases by a requirement that the caller hold a certain lock. + +// Readable represents a variable that is initially not set and later +// becomes set. Some instances may be set to multiple values in +// series. A Readable for a variable that can only get one value is +// commonly known as a "future". +type Readable interface { + // Get reads the current value of this variable. If this variable + // is not set yet then this call blocks until this variable gets a + // value. + Get() interface{} + + // IsSet returns immediately with an indication of whether this + // variable has been set. + IsSet() bool +} + +// LockingReadable is a Readable whose implementation is protected by +// a lock +type LockingReadable interface { + Readable + + // GetLocked is like Get but the caller must already hold the + // lock. GetLocked may release, and later re-acquire, the lock + // any number of times. Get may acquire, and later release, the + // lock any number of times. + GetLocked() interface{} + + // IsSetLocked is like IsSet but the caller must already hold the + // lock. IsSetLocked may release, and later re-acquire, the lock + // any number of times. IsSet may acquire, and later release, the + // lock any number of times. + IsSetLocked() bool +} + +// WriteOnceOnly represents a variable that is initially not set and +// can be set once. +type WriteOnceOnly interface { + // Set normally writes a value into this variable, unblocks every + // goroutine waiting for this variable to have a value, and + // returns true. In the unhappy case that this variable is + // already set, this method returns false without modifying the + // variable's value. + Set(interface{}) bool +} + +// WriteOnce represents a variable that is initially not set and can +// be set once and is readable. This is the common meaning for +// "promise". +type WriteOnce interface { + Readable + WriteOnceOnly +} + +// LockingWriteOnceOnly is a WriteOnceOnly whose implementation is +// protected by a lock. +type LockingWriteOnceOnly interface { + WriteOnceOnly + + // SetLocked is like Set but the caller must already hold the + // lock. SetLocked may release, and later re-acquire, the lock + // any number of times. Set may acquire, and later release, the + // lock any number of times + SetLocked(interface{}) bool +} + +// LockingWriteOnce is a WriteOnce whose implementation is protected +// by a lock. +type LockingWriteOnce interface { + LockingReadable + LockingWriteOnceOnly +} + +// WriteMultipleOnly represents a variable that is initially not set +// and can be set one or more times (unlike a traditional "promise", +// which can be written only once). +type WriteMultipleOnly interface { + // Set writes a value into this variable and unblocks every + // goroutine waiting for this variable to have a value + Set(interface{}) +} + +// WriteMultiple represents a variable that is initially not set and +// can be set one or more times (unlike a traditional "promise", which +// can be written only once) and is readable. +type WriteMultiple interface { + Readable + WriteMultipleOnly +} + +// LockingWriteMultipleOnly is a WriteMultipleOnly whose +// implementation is protected by a lock. +type LockingWriteMultipleOnly interface { + WriteMultipleOnly + + // SetLocked is like Set but the caller must already hold the + // lock. SetLocked may release, and later re-acquire, the lock + // any number of times. Set may acquire, and later release, the + // lock any number of times + SetLocked(interface{}) +} + +// LockingWriteMultiple is a WriteMultiple whose implementation is +// protected by a lock. +type LockingWriteMultiple interface { + LockingReadable + LockingWriteMultipleOnly +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/lockingpromise/lockingpromise.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/lockingpromise/lockingpromise.go new file mode 100644 index 000000000..db5598f89 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/lockingpromise/lockingpromise.go @@ -0,0 +1,124 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lockingpromise + +import ( + "sync" + + "k8s.io/apiserver/pkg/util/flowcontrol/counter" + "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise" +) + +// promisoid is the data and behavior common to all the promise-like +// abstractions implemented here. This implementation is based on a +// condition variable. This implementation tracks active goroutines: +// the given counter is decremented for a goroutine waiting for this +// varible to be set and incremented when such a goroutine is +// unblocked. +type promisoid struct { + lock sync.Locker + cond sync.Cond + activeCounter counter.GoRoutineCounter // counter of active goroutines + waitingCount int // number of goroutines idle due to this being unset + isSet bool + value interface{} +} + +func (pr *promisoid) Get() interface{} { + pr.lock.Lock() + defer pr.lock.Unlock() + return pr.GetLocked() +} + +func (pr *promisoid) GetLocked() interface{} { + if !pr.isSet { + pr.waitingCount++ + pr.activeCounter.Add(-1) + pr.cond.Wait() + } + return pr.value +} + +func (pr *promisoid) IsSet() bool { + pr.lock.Lock() + defer pr.lock.Unlock() + return pr.IsSetLocked() +} + +func (pr *promisoid) IsSetLocked() bool { + return pr.isSet +} + +func (pr *promisoid) SetLocked(value interface{}) { + pr.isSet = true + pr.value = value + if pr.waitingCount > 0 { + pr.activeCounter.Add(pr.waitingCount) + pr.waitingCount = 0 + pr.cond.Broadcast() + } +} + +type writeOnce struct { + promisoid +} + +var _ promise.LockingWriteOnce = &writeOnce{} + +// NewWriteOnce makes a new promise.LockingWriteOnce +func NewWriteOnce(lock sync.Locker, activeCounter counter.GoRoutineCounter) promise.LockingWriteOnce { + return &writeOnce{promisoid{ + lock: lock, + cond: *sync.NewCond(lock), + activeCounter: activeCounter, + }} +} + +func (wr *writeOnce) Set(value interface{}) bool { + wr.lock.Lock() + defer wr.lock.Unlock() + return wr.SetLocked(value) +} + +func (wr *writeOnce) SetLocked(value interface{}) bool { + if wr.isSet { + return false + } + wr.promisoid.SetLocked(value) + return true +} + +type writeMultiple struct { + promisoid +} + +var _ promise.LockingWriteMultiple = &writeMultiple{} + +// NewWriteMultiple makes a new promise.LockingWriteMultiple +func NewWriteMultiple(lock sync.Locker, activeCounter counter.GoRoutineCounter) promise.LockingWriteMultiple { + return &writeMultiple{promisoid{ + lock: lock, + cond: *sync.NewCond(lock), + activeCounter: activeCounter, + }} +} + +func (wr *writeMultiple) Set(value interface{}) { + wr.lock.Lock() + defer wr.lock.Unlock() + wr.SetLocked(value) +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/doc.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/doc.go new file mode 100644 index 000000000..840d78ea1 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/doc.go @@ -0,0 +1,120 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package queueset implements a technique called "fair queuing for +// server requests". One QueueSet is a set of queues operating +// according to this technique. +// +// Fair queuing for server requests is inspired by the fair queuing +// technique from the world of networking. You can find a good paper +// on that at https://dl.acm.org/citation.cfm?doid=75247.75248 or +// http://people.csail.mit.edu/imcgraw/links/research/pubs/networks/WFQ.pdf +// and there is an implementation outline in the Wikipedia article at +// https://en.wikipedia.org/wiki/Fair_queuing . +// +// Fair queuing for server requests differs from traditional fair +// queuing in three ways: (1) we are dispatching application layer +// requests to a server rather than transmitting packets on a network +// link, (2) multiple requests can be executing at once, and (3) the +// service time (execution duration) is not known until the execution +// completes. +// +// The first two differences can easily be handled by straightforward +// adaptation of the concept called "R(t)" in the original paper and +// "virtual time" in the implementation outline. In that +// implementation outline, the notation now() is used to mean reading +// the virtual clock. In the original paper’s terms, "R(t)" is the +// number of "rounds" that have been completed at real time t --- +// where a round consists of virtually transmitting one bit from every +// non-empty queue in the router (regardless of which queue holds the +// packet that is really being transmitted at the moment); in this +// conception, a packet is considered to be "in" its queue until the +// packet’s transmission is finished. For our problem, we can define a +// round to be giving one nanosecond of CPU to every non-empty queue +// in the apiserver (where emptiness is judged based on both queued +// and executing requests from that queue), and define R(t) = (server +// start time) + (1 ns) * (number of rounds since server start). Let +// us write NEQ(t) for that number of non-empty queues in the +// apiserver at time t. Let us also write C for the concurrency +// limit. In the original paper, the partial derivative of R(t) with +// respect to t is +// +// 1 / NEQ(t) . +// +// To generalize from transmitting one packet at a time to executing C +// requests at a time, that derivative becomes +// +// C / NEQ(t) . +// +// However, sometimes there are fewer than C requests available to +// execute. For a given queue "q", let us also write "reqs(q, t)" for +// the number of requests of that queue that are executing at that +// time. The total number of requests executing is sum[over q] +// reqs(q, t) and if that is less than C then virtual time is not +// advancing as fast as it would if all C seats were occupied; in this +// case the numerator of the quotient in that derivative should be +// adjusted proportionally. Putting it all together for fair queing +// for server requests: at a particular time t, the partial derivative +// of R(t) with respect to t is +// +// min( C, sum[over q] reqs(q, t) ) / NEQ(t) . +// +// In terms of the implementation outline, this is the rate at which +// virtual time is advancing at time t (in virtual nanoseconds per +// real nanosecond). Where the networking implementation outline adds +// packet size to a virtual time, in our version this corresponds to +// adding a service time (i.e., duration) to virtual time. +// +// The third difference is handled by modifying the algorithm to +// dispatch based on an initial guess at the request’s service time +// (duration) and then make the corresponding adjustments once the +// request’s actual service time is known. This is similar, although +// not exactly isomorphic, to the original paper’s adjustment by +// `$\delta$` for the sake of promptness. +// +// For implementation simplicity (see below), let us use the same +// initial service time guess for every request; call that duration +// G. A good choice might be the service time limit (1 +// minute). Different guesses will give slightly different dynamics, +// but any positive number can be used for G without ruining the +// long-term behavior. +// +// As in ordinary fair queuing, there is a bound on divergence from +// the ideal. In plain fair queuing the bound is one packet; in our +// version it is C requests. +// +// To support efficiently making the necessary adjustments once a +// request’s actual service time is known, the virtual finish time of +// a request and the last virtual finish time of a queue are not +// represented directly but instead computed from queue length, +// request position in the queue, and an alternate state variable that +// holds the queue’s virtual start time. While the queue is empty and +// has no requests executing: the value of its virtual start time +// variable is ignored and its last virtual finish time is considered +// to be in the virtual past. When a request arrives to an empty queue +// with no requests executing, the queue’s virtual start time is set +// to the current virtual time. The virtual finish time of request +// number J in the queue (counting from J=1 for the head) is J * G + +// (queue's virtual start time). While the queue is non-empty: the +// last virtual finish time of the queue is the virtual finish time of +// the last request in the queue. While the queue is empty and has a +// request executing: the last virtual finish time is the queue’s +// virtual start time. When a request is dequeued for service the +// queue’s virtual start time is advanced by G. When a request +// finishes being served, and the actual service time was S, the +// queue’s virtual start time is decremented by G - S. +// +package queueset diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go new file mode 100644 index 000000000..b61d8ce7d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go @@ -0,0 +1,781 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queueset + +import ( + "context" + "fmt" + "math" + "sync" + "time" + + "github.com/pkg/errors" + + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/util/flowcontrol/counter" + "k8s.io/apiserver/pkg/util/flowcontrol/debug" + fq "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing" + "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/lockingpromise" + "k8s.io/apiserver/pkg/util/flowcontrol/metrics" + "k8s.io/apiserver/pkg/util/shufflesharding" + "k8s.io/klog/v2" +) + +const nsTimeFmt = "2006-01-02 15:04:05.000000000" + +// queueSetFactory implements the QueueSetFactory interface +// queueSetFactory makes QueueSet objects. +type queueSetFactory struct { + counter counter.GoRoutineCounter + clock clock.PassiveClock +} + +// `*queueSetCompleter` implements QueueSetCompleter. Exactly one of +// the fields `factory` and `theSet` is non-nil. +type queueSetCompleter struct { + factory *queueSetFactory + obsPair metrics.TimedObserverPair + theSet *queueSet + qCfg fq.QueuingConfig + dealer *shufflesharding.Dealer +} + +// queueSet implements the Fair Queuing for Server Requests technique +// described in this package's doc, and a pointer to one implements +// the QueueSet interface. The clock, GoRoutineCounter, and estimated +// service time should not be changed; the fields listed after the +// lock must be accessed only while holding the lock. The methods of +// this type follow the naming convention that the suffix "Locked" +// means the caller must hold the lock; for a method whose name does +// not end in "Locked" either acquires the lock or does not care about +// locking. +type queueSet struct { + clock clock.PassiveClock + counter counter.GoRoutineCounter + estimatedServiceTime float64 + obsPair metrics.TimedObserverPair + + lock sync.Mutex + + // qCfg holds the current queuing configuration. Its + // DesiredNumQueues may be less than the current number of queues. + // If its DesiredNumQueues is zero then its other queuing + // parameters retain the settings they had when DesiredNumQueues + // was last non-zero (if ever). + qCfg fq.QueuingConfig + + // the current dispatching configuration. + dCfg fq.DispatchingConfig + + // If `config.DesiredNumQueues` is non-zero then dealer is not nil + // and is good for `config`. + dealer *shufflesharding.Dealer + + // queues may be longer than the desired number, while the excess + // queues are still draining. + queues []*queue + + // virtualTime is the number of virtual seconds since process startup + virtualTime float64 + + // lastRealTime is what `clock.Now()` yielded when `virtualTime` was last updated + lastRealTime time.Time + + // robinIndex is the index of the last queue dispatched + robinIndex int + + // totRequestsWaiting is the sum, over all the queues, of the + // number of requests waiting in that queue + totRequestsWaiting int + + // totRequestsExecuting is the total number of requests of this + // queueSet that are currently executing. That is the same as the + // sum, over all the queues, of the number of requests executing + // from that queue. + totRequestsExecuting int +} + +// NewQueueSetFactory creates a new QueueSetFactory object +func NewQueueSetFactory(c clock.PassiveClock, counter counter.GoRoutineCounter) fq.QueueSetFactory { + return &queueSetFactory{ + counter: counter, + clock: c, + } +} + +func (qsf *queueSetFactory) BeginConstruction(qCfg fq.QueuingConfig, obsPair metrics.TimedObserverPair) (fq.QueueSetCompleter, error) { + dealer, err := checkConfig(qCfg) + if err != nil { + return nil, err + } + return &queueSetCompleter{ + factory: qsf, + obsPair: obsPair, + qCfg: qCfg, + dealer: dealer}, nil +} + +// checkConfig returns a non-nil Dealer if the config is valid and +// calls for one, and returns a non-nil error if the given config is +// invalid. +func checkConfig(qCfg fq.QueuingConfig) (*shufflesharding.Dealer, error) { + if qCfg.DesiredNumQueues == 0 { + return nil, nil + } + dealer, err := shufflesharding.NewDealer(qCfg.DesiredNumQueues, qCfg.HandSize) + if err != nil { + err = errors.Wrap(err, "the QueueSetConfig implies an invalid shuffle sharding config (DesiredNumQueues is deckSize)") + } + return dealer, err +} + +func (qsc *queueSetCompleter) Complete(dCfg fq.DispatchingConfig) fq.QueueSet { + qs := qsc.theSet + if qs == nil { + qs = &queueSet{ + clock: qsc.factory.clock, + counter: qsc.factory.counter, + estimatedServiceTime: 60, + obsPair: qsc.obsPair, + qCfg: qsc.qCfg, + virtualTime: 0, + lastRealTime: qsc.factory.clock.Now(), + } + } + qs.setConfiguration(qsc.qCfg, qsc.dealer, dCfg) + return qs +} + +// createQueues is a helper method for initializing an array of n queues +func createQueues(n, baseIndex int) []*queue { + fqqueues := make([]*queue, n) + for i := 0; i < n; i++ { + fqqueues[i] = &queue{index: baseIndex + i, requests: make([]*request, 0)} + } + return fqqueues +} + +func (qs *queueSet) BeginConfigChange(qCfg fq.QueuingConfig) (fq.QueueSetCompleter, error) { + dealer, err := checkConfig(qCfg) + if err != nil { + return nil, err + } + return &queueSetCompleter{ + theSet: qs, + qCfg: qCfg, + dealer: dealer}, nil +} + +// SetConfiguration is used to set the configuration for a queueSet. +// Update handling for when fields are updated is handled here as well - +// eg: if DesiredNum is increased, SetConfiguration reconciles by +// adding more queues. +func (qs *queueSet) setConfiguration(qCfg fq.QueuingConfig, dealer *shufflesharding.Dealer, dCfg fq.DispatchingConfig) { + qs.lockAndSyncTime() + defer qs.lock.Unlock() + + if qCfg.DesiredNumQueues > 0 { + // Adding queues is the only thing that requires immediate action + // Removing queues is handled by omitting indexes >DesiredNum from + // chooseQueueIndexLocked + numQueues := len(qs.queues) + if qCfg.DesiredNumQueues > numQueues { + qs.queues = append(qs.queues, + createQueues(qCfg.DesiredNumQueues-numQueues, len(qs.queues))...) + } + } else { + qCfg.QueueLengthLimit = qs.qCfg.QueueLengthLimit + qCfg.HandSize = qs.qCfg.HandSize + qCfg.RequestWaitLimit = qs.qCfg.RequestWaitLimit + } + + qs.qCfg = qCfg + qs.dCfg = dCfg + qs.dealer = dealer + qll := qCfg.QueueLengthLimit + if qll < 1 { + qll = 1 + } + qs.obsPair.RequestsWaiting.SetX1(float64(qll)) + qs.obsPair.RequestsExecuting.SetX1(float64(dCfg.ConcurrencyLimit)) + + qs.dispatchAsMuchAsPossibleLocked() +} + +// A decision about a request +type requestDecision int + +// Values passed through a request's decision +const ( + decisionExecute requestDecision = iota + decisionReject + decisionCancel +) + +// StartRequest begins the process of handling a request. We take the +// approach of updating the metrics about total requests queued and +// executing at each point where there is a change in that quantity, +// because the metrics --- and only the metrics --- track that +// quantity per FlowSchema. +func (qs *queueSet) StartRequest(ctx context.Context, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn fq.QueueNoteFn) (fq.Request, bool) { + qs.lockAndSyncTime() + defer qs.lock.Unlock() + var req *request + + // ======================================================================== + // Step 0: + // Apply only concurrency limit, if zero queues desired + if qs.qCfg.DesiredNumQueues < 1 { + if qs.totRequestsExecuting >= qs.dCfg.ConcurrencyLimit { + klog.V(5).Infof("QS(%s): rejecting request %q %#+v %#+v because %d are executing and the limit is %d", qs.qCfg.Name, fsName, descr1, descr2, qs.totRequestsExecuting, qs.dCfg.ConcurrencyLimit) + metrics.AddReject(qs.qCfg.Name, fsName, "concurrency-limit") + return nil, qs.isIdleLocked() + } + req = qs.dispatchSansQueueLocked(ctx, flowDistinguisher, fsName, descr1, descr2) + return req, false + } + + // ======================================================================== + // Step 1: + // 1) Start with shuffle sharding, to pick a queue. + // 2) Reject old requests that have been waiting too long + // 3) Reject current request if there is not enough concurrency shares and + // we are at max queue length + // 4) If not rejected, create a request and enqueue + req = qs.timeoutOldRequestsAndRejectOrEnqueueLocked(ctx, hashValue, flowDistinguisher, fsName, descr1, descr2, queueNoteFn) + // req == nil means that the request was rejected - no remaining + // concurrency shares and at max queue length already + if req == nil { + klog.V(5).Infof("QS(%s): rejecting request %q %#+v %#+v due to queue full", qs.qCfg.Name, fsName, descr1, descr2) + metrics.AddReject(qs.qCfg.Name, fsName, "queue-full") + return nil, qs.isIdleLocked() + } + + // ======================================================================== + // Step 2: + // The next step is to invoke the method that dequeues as much + // as possible. + // This method runs a loop, as long as there are non-empty + // queues and the number currently executing is less than the + // assured concurrency value. The body of the loop uses the + // fair queuing technique to pick a queue and dispatch a + // request from that queue. + qs.dispatchAsMuchAsPossibleLocked() + + // ======================================================================== + // Step 3: + + // Set up a relay from the context's Done channel to the world + // of well-counted goroutines. We Are Told that every + // request's context's Done channel gets closed by the time + // the request is done being processed. + doneCh := ctx.Done() + if doneCh != nil { + qs.preCreateOrUnblockGoroutine() + go func() { + defer runtime.HandleCrash() + qs.goroutineDoneOrBlocked() + _ = <-doneCh + // Whatever goroutine unblocked the preceding receive MUST + // have already either (a) incremented qs.counter or (b) + // known that said counter is not actually counting or (c) + // known that the count does not need to be accurate. + // BTW, the count only needs to be accurate in a test that + // uses FakeEventClock::Run(). + klog.V(6).Infof("QS(%s): Context of request %q %#+v %#+v is Done", qs.qCfg.Name, fsName, descr1, descr2) + qs.cancelWait(req) + qs.goroutineDoneOrBlocked() + }() + } + return req, false +} + +func (req *request) NoteQueued(inQueue bool) { + if req.queueNoteFn != nil { + req.queueNoteFn(inQueue) + } +} + +func (req *request) Finish(execFn func()) bool { + exec, idle := req.wait() + if !exec { + return idle + } + execFn() + return req.qs.finishRequestAndDispatchAsMuchAsPossible(req) +} + +func (req *request) wait() (bool, bool) { + qs := req.qs + qs.lock.Lock() + defer qs.lock.Unlock() + if req.waitStarted { + // This can not happen, because the client is forbidden to + // call Wait twice on the same request + panic(fmt.Sprintf("Multiple calls to the Wait method, QueueSet=%s, startTime=%s, descr1=%#+v, descr2=%#+v", req.qs.qCfg.Name, req.startTime, req.descr1, req.descr2)) + } + req.waitStarted = true + + // ======================================================================== + // Step 4: + // The final step is to wait on a decision from + // somewhere and then act on it. + decisionAny := req.decision.GetLocked() + qs.syncTimeLocked() + decision, isDecision := decisionAny.(requestDecision) + if !isDecision { + panic(fmt.Sprintf("QS(%s): Impossible decision %#+v (of type %T) for request %#+v %#+v", qs.qCfg.Name, decisionAny, decisionAny, req.descr1, req.descr2)) + } + switch decision { + case decisionReject: + klog.V(5).Infof("QS(%s): request %#+v %#+v timed out after being enqueued\n", qs.qCfg.Name, req.descr1, req.descr2) + metrics.AddReject(qs.qCfg.Name, req.fsName, "time-out") + return false, qs.isIdleLocked() + case decisionCancel: + // TODO(aaron-prindle) add metrics for this case + klog.V(5).Infof("QS(%s): Ejecting request %#+v %#+v from its queue", qs.qCfg.Name, req.descr1, req.descr2) + return false, qs.isIdleLocked() + case decisionExecute: + klog.V(5).Infof("QS(%s): Dispatching request %#+v %#+v from its queue", qs.qCfg.Name, req.descr1, req.descr2) + return true, false + default: + // This can not happen, all possible values are handled above + panic(decision) + } +} + +func (qs *queueSet) IsIdle() bool { + qs.lock.Lock() + defer qs.lock.Unlock() + return qs.isIdleLocked() +} + +func (qs *queueSet) isIdleLocked() bool { + return qs.totRequestsWaiting == 0 && qs.totRequestsExecuting == 0 +} + +// lockAndSyncTime acquires the lock and updates the virtual time. +// Doing them together avoids the mistake of modify some queue state +// before calling syncTimeLocked. +func (qs *queueSet) lockAndSyncTime() { + qs.lock.Lock() + qs.syncTimeLocked() +} + +// syncTimeLocked updates the virtual time based on the assumption +// that the current state of the queues has been in effect since +// `qs.lastRealTime`. Thus, it should be invoked after acquiring the +// lock and before modifying the state of any queue. +func (qs *queueSet) syncTimeLocked() { + realNow := qs.clock.Now() + timeSinceLast := realNow.Sub(qs.lastRealTime).Seconds() + qs.lastRealTime = realNow + qs.virtualTime += timeSinceLast * qs.getVirtualTimeRatioLocked() +} + +// getVirtualTimeRatio calculates the rate at which virtual time has +// been advancing, according to the logic in `doc.go`. +func (qs *queueSet) getVirtualTimeRatioLocked() float64 { + activeQueues := 0 + reqs := 0 + for _, queue := range qs.queues { + reqs += queue.requestsExecuting + if len(queue.requests) > 0 || queue.requestsExecuting > 0 { + activeQueues++ + } + } + if activeQueues == 0 { + return 0 + } + return math.Min(float64(reqs), float64(qs.dCfg.ConcurrencyLimit)) / float64(activeQueues) +} + +// timeoutOldRequestsAndRejectOrEnqueueLocked encapsulates the logic required +// to validate and enqueue a request for the queueSet/QueueSet: +// 1) Start with shuffle sharding, to pick a queue. +// 2) Reject old requests that have been waiting too long +// 3) Reject current request if there is not enough concurrency shares and +// we are at max queue length +// 4) If not rejected, create a request and enqueue +// returns the enqueud request on a successful enqueue +// returns nil in the case that there is no available concurrency or +// the queuelengthlimit has been reached +func (qs *queueSet) timeoutOldRequestsAndRejectOrEnqueueLocked(ctx context.Context, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn fq.QueueNoteFn) *request { + // Start with the shuffle sharding, to pick a queue. + queueIdx := qs.chooseQueueIndexLocked(hashValue, descr1, descr2) + queue := qs.queues[queueIdx] + // The next step is the logic to reject requests that have been waiting too long + qs.removeTimedOutRequestsFromQueueLocked(queue, fsName) + // NOTE: currently timeout is only checked for each new request. This means that there can be + // requests that are in the queue longer than the timeout if there are no new requests + // We prefer the simplicity over the promptness, at least for now. + + // Create a request and enqueue + req := &request{ + qs: qs, + fsName: fsName, + flowDistinguisher: flowDistinguisher, + ctx: ctx, + decision: lockingpromise.NewWriteOnce(&qs.lock, qs.counter), + arrivalTime: qs.clock.Now(), + queue: queue, + descr1: descr1, + descr2: descr2, + queueNoteFn: queueNoteFn, + } + if ok := qs.rejectOrEnqueueLocked(req); !ok { + return nil + } + metrics.ObserveQueueLength(qs.qCfg.Name, fsName, len(queue.requests)) + return req +} + +// chooseQueueIndexLocked uses shuffle sharding to select a queue index +// using the given hashValue and the shuffle sharding parameters of the queueSet. +func (qs *queueSet) chooseQueueIndexLocked(hashValue uint64, descr1, descr2 interface{}) int { + bestQueueIdx := -1 + bestQueueLen := int(math.MaxInt32) + // the dealer uses the current desired number of queues, which is no larger than the number in `qs.queues`. + qs.dealer.Deal(hashValue, func(queueIdx int) { + thisLen := len(qs.queues[queueIdx].requests) + klog.V(7).Infof("QS(%s): For request %#+v %#+v considering queue %d of length %d", qs.qCfg.Name, descr1, descr2, queueIdx, thisLen) + if thisLen < bestQueueLen { + bestQueueIdx, bestQueueLen = queueIdx, thisLen + } + }) + klog.V(6).Infof("QS(%s) at r=%s v=%.9fs: For request %#+v %#+v chose queue %d, had %d waiting & %d executing", qs.qCfg.Name, qs.clock.Now().Format(nsTimeFmt), qs.virtualTime, descr1, descr2, bestQueueIdx, bestQueueLen, qs.queues[bestQueueIdx].requestsExecuting) + return bestQueueIdx +} + +// removeTimedOutRequestsFromQueueLocked rejects old requests that have been enqueued +// past the requestWaitLimit +func (qs *queueSet) removeTimedOutRequestsFromQueueLocked(queue *queue, fsName string) { + timeoutIdx := -1 + now := qs.clock.Now() + reqs := queue.requests + // reqs are sorted oldest -> newest + // can short circuit loop (break) if oldest requests are not timing out + // as newer requests also will not have timed out + + // now - requestWaitLimit = waitLimit + waitLimit := now.Add(-qs.qCfg.RequestWaitLimit) + for i, req := range reqs { + if waitLimit.After(req.arrivalTime) { + req.decision.SetLocked(decisionReject) + // get index for timed out requests + timeoutIdx = i + metrics.AddRequestsInQueues(qs.qCfg.Name, req.fsName, -1) + req.NoteQueued(false) + } else { + break + } + } + // remove timed out requests from queue + if timeoutIdx != -1 { + // timeoutIdx + 1 to remove the last timeout req + removeIdx := timeoutIdx + 1 + // remove all the timeout requests + queue.requests = reqs[removeIdx:] + // decrement the # of requestsEnqueued + qs.totRequestsWaiting -= removeIdx + qs.obsPair.RequestsWaiting.Add(float64(-removeIdx)) + } +} + +// rejectOrEnqueueLocked rejects or enqueues the newly arrived +// request, which has been assigned to a queue. If up against the +// queue length limit and the concurrency limit then returns false. +// Otherwise enqueues and returns true. +func (qs *queueSet) rejectOrEnqueueLocked(request *request) bool { + queue := request.queue + curQueueLength := len(queue.requests) + // rejects the newly arrived request if resource criteria not met + if qs.totRequestsExecuting >= qs.dCfg.ConcurrencyLimit && + curQueueLength >= qs.qCfg.QueueLengthLimit { + return false + } + + qs.enqueueLocked(request) + return true +} + +// enqueues a request into its queue. +func (qs *queueSet) enqueueLocked(request *request) { + queue := request.queue + now := qs.clock.Now() + if len(queue.requests) == 0 && queue.requestsExecuting == 0 { + // the queue’s virtual start time is set to the virtual time. + queue.virtualStart = qs.virtualTime + if klog.V(6).Enabled() { + klog.Infof("QS(%s) at r=%s v=%.9fs: initialized queue %d virtual start time due to request %#+v %#+v", qs.qCfg.Name, now.Format(nsTimeFmt), queue.virtualStart, queue.index, request.descr1, request.descr2) + } + } + queue.Enqueue(request) + qs.totRequestsWaiting++ + metrics.AddRequestsInQueues(qs.qCfg.Name, request.fsName, 1) + request.NoteQueued(true) + qs.obsPair.RequestsWaiting.Add(1) +} + +// dispatchAsMuchAsPossibleLocked runs a loop, as long as there +// are non-empty queues and the number currently executing is less than the +// assured concurrency value. The body of the loop uses the fair queuing +// technique to pick a queue, dequeue the request at the head of that +// queue, increment the count of the number executing, and send true +// to the request's channel. +func (qs *queueSet) dispatchAsMuchAsPossibleLocked() { + for qs.totRequestsWaiting != 0 && qs.totRequestsExecuting < qs.dCfg.ConcurrencyLimit { + ok := qs.dispatchLocked() + if !ok { + break + } + } +} + +func (qs *queueSet) dispatchSansQueueLocked(ctx context.Context, flowDistinguisher, fsName string, descr1, descr2 interface{}) *request { + now := qs.clock.Now() + req := &request{ + qs: qs, + fsName: fsName, + flowDistinguisher: flowDistinguisher, + ctx: ctx, + startTime: now, + decision: lockingpromise.NewWriteOnce(&qs.lock, qs.counter), + arrivalTime: now, + descr1: descr1, + descr2: descr2, + } + req.decision.SetLocked(decisionExecute) + qs.totRequestsExecuting++ + metrics.AddRequestsExecuting(qs.qCfg.Name, fsName, 1) + qs.obsPair.RequestsExecuting.Add(1) + if klog.V(5).Enabled() { + klog.Infof("QS(%s) at r=%s v=%.9fs: immediate dispatch of request %q %#+v %#+v, qs will have %d executing", qs.qCfg.Name, now.Format(nsTimeFmt), qs.virtualTime, fsName, descr1, descr2, qs.totRequestsExecuting) + } + return req +} + +// dispatchLocked uses the Fair Queuing for Server Requests method to +// select a queue and dispatch the oldest request in that queue. The +// return value indicates whether a request was dispatched; this will +// be false when there are no requests waiting in any queue. +func (qs *queueSet) dispatchLocked() bool { + queue := qs.selectQueueLocked() + if queue == nil { + return false + } + request, ok := queue.Dequeue() + if !ok { // This should never happen. But if it does... + return false + } + request.startTime = qs.clock.Now() + // At this moment the request leaves its queue and starts + // executing. We do not recognize any interim state between + // "queued" and "executing". While that means "executing" + // includes a little overhead from this package, this is not a + // problem because other overhead is also included. + qs.totRequestsWaiting-- + qs.totRequestsExecuting++ + queue.requestsExecuting++ + metrics.AddRequestsInQueues(qs.qCfg.Name, request.fsName, -1) + request.NoteQueued(false) + metrics.AddRequestsExecuting(qs.qCfg.Name, request.fsName, 1) + qs.obsPair.RequestsWaiting.Add(-1) + qs.obsPair.RequestsExecuting.Add(1) + if klog.V(6).Enabled() { + klog.Infof("QS(%s) at r=%s v=%.9fs: dispatching request %#+v %#+v from queue %d with virtual start time %.9fs, queue will have %d waiting & %d executing", qs.qCfg.Name, request.startTime.Format(nsTimeFmt), qs.virtualTime, request.descr1, request.descr2, queue.index, queue.virtualStart, len(queue.requests), queue.requestsExecuting) + } + // When a request is dequeued for service -> qs.virtualStart += G + queue.virtualStart += qs.estimatedServiceTime + request.decision.SetLocked(decisionExecute) + return ok +} + +// cancelWait ensures the request is not waiting. This is only +// applicable to a request that has been assigned to a queue. +func (qs *queueSet) cancelWait(req *request) { + qs.lock.Lock() + defer qs.lock.Unlock() + if req.decision.IsSetLocked() { + // The request has already been removed from the queue + // and so we consider its wait to be over. + return + } + req.decision.SetLocked(decisionCancel) + queue := req.queue + // remove the request from the queue as it has timed out + for i := range queue.requests { + if req == queue.requests[i] { + // remove the request + queue.requests = append(queue.requests[:i], queue.requests[i+1:]...) + qs.totRequestsWaiting-- + metrics.AddRequestsInQueues(qs.qCfg.Name, req.fsName, -1) + req.NoteQueued(false) + qs.obsPair.RequestsWaiting.Add(-1) + break + } + } + return +} + +// selectQueueLocked examines the queues in round robin order and +// returns the first one of those for which the virtual finish time of +// the oldest waiting request is minimal. +func (qs *queueSet) selectQueueLocked() *queue { + minVirtualFinish := math.Inf(1) + var minQueue *queue + var minIndex int + nq := len(qs.queues) + for range qs.queues { + qs.robinIndex = (qs.robinIndex + 1) % nq + queue := qs.queues[qs.robinIndex] + if len(queue.requests) != 0 { + + currentVirtualFinish := queue.GetVirtualFinish(0, qs.estimatedServiceTime) + if currentVirtualFinish < minVirtualFinish { + minVirtualFinish = currentVirtualFinish + minQueue = queue + minIndex = qs.robinIndex + } + } + } + // we set the round robin indexing to start at the chose queue + // for the next round. This way the non-selected queues + // win in the case that the virtual finish times are the same + qs.robinIndex = minIndex + // according to the original FQ formula: + // + // Si = MAX(R(t), Fi-1) + // + // the virtual start (excluding the estimated cost) of the chose + // queue should always be greater or equal to the global virtual + // time. + // + // hence we're refreshing the per-queue virtual time for the chosen + // queue here. if the last virtual start time (excluded estimated cost) + // falls behind the global virtual time, we update the latest virtual + // start by: + + previouslyEstimatedServiceTime := float64(minQueue.requestsExecuting) * qs.estimatedServiceTime + if qs.virtualTime > minQueue.virtualStart-previouslyEstimatedServiceTime { + // per-queue virtual time should not fall behind the global + minQueue.virtualStart = qs.virtualTime + previouslyEstimatedServiceTime + } + return minQueue +} + +// finishRequestAndDispatchAsMuchAsPossible is a convenience method +// which calls finishRequest for a given request and then dispatches +// as many requests as possible. This is all of what needs to be done +// once a request finishes execution or is canceled. This returns a bool +// indicating whether the QueueSet is now idle. +func (qs *queueSet) finishRequestAndDispatchAsMuchAsPossible(req *request) bool { + qs.lockAndSyncTime() + defer qs.lock.Unlock() + + qs.finishRequestLocked(req) + qs.dispatchAsMuchAsPossibleLocked() + return qs.isIdleLocked() +} + +// finishRequestLocked is a callback that should be used when a +// previously dispatched request has completed it's service. This +// callback updates important state in the queueSet +func (qs *queueSet) finishRequestLocked(r *request) { + now := qs.clock.Now() + qs.totRequestsExecuting-- + metrics.AddRequestsExecuting(qs.qCfg.Name, r.fsName, -1) + qs.obsPair.RequestsExecuting.Add(-1) + + if r.queue == nil { + if klog.V(6).Enabled() { + klog.Infof("QS(%s) at r=%s v=%.9fs: request %#+v %#+v finished, qs will have %d executing", qs.qCfg.Name, now.Format(nsTimeFmt), qs.virtualTime, r.descr1, r.descr2, qs.totRequestsExecuting) + } + return + } + + S := now.Sub(r.startTime).Seconds() + + // When a request finishes being served, and the actual service time was S, + // the queue’s virtual start time is decremented by G - S. + r.queue.virtualStart -= qs.estimatedServiceTime - S + + // request has finished, remove from requests executing + r.queue.requestsExecuting-- + + if klog.V(6).Enabled() { + klog.Infof("QS(%s) at r=%s v=%.9fs: request %#+v %#+v finished, adjusted queue %d virtual start time to %.9fs due to service time %.9fs, queue will have %d waiting & %d executing", qs.qCfg.Name, now.Format(nsTimeFmt), qs.virtualTime, r.descr1, r.descr2, r.queue.index, r.queue.virtualStart, S, len(r.queue.requests), r.queue.requestsExecuting) + } + + // If there are more queues than desired and this one has no + // requests then remove it + if len(qs.queues) > qs.qCfg.DesiredNumQueues && + len(r.queue.requests) == 0 && + r.queue.requestsExecuting == 0 { + qs.queues = removeQueueAndUpdateIndexes(qs.queues, r.queue.index) + + // decrement here to maintain the invariant that (qs.robinIndex+1) % numQueues + // is the index of the next queue after the one last dispatched from + if qs.robinIndex >= r.queue.index { + qs.robinIndex-- + } + } +} + +// removeQueueAndUpdateIndexes uses reslicing to remove an index from a slice +// and then updates the 'index' field of the queues to be correct +func removeQueueAndUpdateIndexes(queues []*queue, index int) []*queue { + keptQueues := append(queues[:index], queues[index+1:]...) + for i := index; i < len(keptQueues); i++ { + keptQueues[i].index-- + } + return keptQueues +} + +// preCreateOrUnblockGoroutine needs to be called before creating a +// goroutine associated with this queueSet or unblocking a blocked +// one, to properly update the accounting used in testing. +func (qs *queueSet) preCreateOrUnblockGoroutine() { + qs.counter.Add(1) +} + +// goroutineDoneOrBlocked needs to be called at the end of every +// goroutine associated with this queueSet or when such a goroutine is +// about to wait on some other goroutine to do something; this is to +// properly update the accounting used in testing. +func (qs *queueSet) goroutineDoneOrBlocked() { + qs.counter.Add(-1) +} + +func (qs *queueSet) UpdateObservations() { + qs.obsPair.RequestsWaiting.Add(0) + qs.obsPair.RequestsExecuting.Add(0) +} + +func (qs *queueSet) Dump(includeRequestDetails bool) debug.QueueSetDump { + qs.lock.Lock() + defer qs.lock.Unlock() + d := debug.QueueSetDump{ + Queues: make([]debug.QueueDump, len(qs.queues)), + Waiting: qs.totRequestsWaiting, + Executing: qs.totRequestsExecuting, + } + for i, q := range qs.queues { + d.Queues[i] = q.dump(includeRequestDetails) + } + return d +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/types.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/types.go new file mode 100644 index 000000000..a72023060 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/types.go @@ -0,0 +1,128 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queueset + +import ( + "context" + "time" + + genericrequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/util/flowcontrol/debug" + fq "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing" + "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise" +) + +// request is a temporary container for "requests" with additional +// tracking fields required for the functionality FQScheduler +type request struct { + ctx context.Context + + qs *queueSet + + flowDistinguisher string + fsName string + + // The relevant queue. Is nil if this request did not go through + // a queue. + queue *queue + + // startTime is the real time when the request began executing + startTime time.Time + + // decision gets set to a `requestDecision` indicating what to do + // with this request. It gets set exactly once, when the request + // is removed from its queue. The value will be decisionReject, + // decisionCancel, or decisionExecute; decisionTryAnother never + // appears here. + decision promise.LockingWriteOnce + + // arrivalTime is the real time when the request entered this system + arrivalTime time.Time + + // descr1 and descr2 are not used in any logic but they appear in + // log messages + descr1, descr2 interface{} + + // Indicates whether client has called Request::Wait() + waitStarted bool + + queueNoteFn fq.QueueNoteFn +} + +// queue is an array of requests with additional metadata required for +// the FQScheduler +type queue struct { + requests []*request + + // virtualStart is the virtual time (virtual seconds since process + // startup) when the oldest request in the queue (if there is any) + // started virtually executing + virtualStart float64 + + requestsExecuting int + index int +} + +// Enqueue enqueues a request into the queue +func (q *queue) Enqueue(request *request) { + q.requests = append(q.requests, request) +} + +// Dequeue dequeues a request from the queue +func (q *queue) Dequeue() (*request, bool) { + if len(q.requests) == 0 { + return nil, false + } + request := q.requests[0] + q.requests = q.requests[1:] + return request, true +} + +// GetVirtualFinish returns the expected virtual finish time of the request at +// index J in the queue with estimated finish time G +func (q *queue) GetVirtualFinish(J int, G float64) float64 { + // The virtual finish time of request number J in the queue + // (counting from J=1 for the head) is J * G + (virtual start time). + + // counting from J=1 for the head (eg: queue.requests[0] -> J=1) - J+1 + jg := float64(J+1) * float64(G) + return jg + q.virtualStart +} + +func (q *queue) dump(includeDetails bool) debug.QueueDump { + digest := make([]debug.RequestDump, len(q.requests)) + for i, r := range q.requests { + // dump requests. + digest[i].MatchedFlowSchema = r.fsName + digest[i].FlowDistinguisher = r.flowDistinguisher + digest[i].ArriveTime = r.arrivalTime + digest[i].StartTime = r.startTime + if includeDetails { + userInfo, _ := genericrequest.UserFrom(r.ctx) + digest[i].UserName = userInfo.GetName() + requestInfo, ok := genericrequest.RequestInfoFrom(r.ctx) + if ok { + digest[i].RequestInfo = *requestInfo + } + } + } + return debug.QueueDump{ + VirtualStart: q.virtualStart, + Requests: digest, + ExecutingRequests: q.requestsExecuting, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/format/formatting.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/format/formatting.go new file mode 100644 index 000000000..61ae65df9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/format/formatting.go @@ -0,0 +1,231 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package format + +import ( + "bytes" + "encoding/json" + "fmt" + + flowcontrol "k8s.io/api/flowcontrol/v1beta1" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/endpoints/request" +) + +// This file provides an easy way to mark a value for formatting to +// `%s` in full detail IF it is printed but without costing a lot of +// CPU or memory if the value is NOT printed. The API Priority and +// Fairness API objects are formatted into JSON. The other types of +// objects here are formatted into golang source. + +// Stringer marks the given value for custom formatting by this package. +type Stringer struct{ val interface{} } + +// Fmt marks the given value for custom formatting by this package. +func Fmt(val interface{}) Stringer { + return Stringer{val} +} + +// String formats to a string in full detail +func (sr Stringer) String() string { + if sr.val == nil { + return "nil" + } + switch typed := sr.val.(type) { + case *flowcontrol.FlowSchema, + flowcontrol.FlowSchema, + flowcontrol.FlowSchemaSpec, + flowcontrol.FlowDistinguisherMethod, + *flowcontrol.FlowDistinguisherMethod, + *flowcontrol.PolicyRulesWithSubjects, + flowcontrol.PolicyRulesWithSubjects, + flowcontrol.Subject, + flowcontrol.ResourcePolicyRule, + flowcontrol.NonResourcePolicyRule, + flowcontrol.FlowSchemaCondition, + *flowcontrol.PriorityLevelConfiguration, + flowcontrol.PriorityLevelConfiguration, + flowcontrol.PriorityLevelConfigurationSpec, + *flowcontrol.LimitedPriorityLevelConfiguration, + flowcontrol.LimitedPriorityLevelConfiguration, + flowcontrol.LimitResponse, + *flowcontrol.QueuingConfiguration, + flowcontrol.QueuingConfiguration: + return ToJSON(sr.val) + case []user.Info: + return FmtUsers(typed) + case []*request.RequestInfo: + return FmtRequests(typed) + default: + return fmt.Sprintf("%#+v", sr.val) + } +} + +// ToJSON converts using encoding/json and handles errors by +// formatting them +func ToJSON(val interface{}) string { + bs, err := json.Marshal(val) + str := string(bs) + if err != nil { + str = str + "<" + err.Error() + ">" + } + return str +} + +// FmtPriorityLevelConfiguration returns a golang source expression +// equivalent to the given value +func FmtPriorityLevelConfiguration(pl *flowcontrol.PriorityLevelConfiguration) string { + if pl == nil { + return "nil" + } + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("&flowcontrolv1beta1.PriorityLevelConfiguration{ObjectMeta: %#+v, Spec: ", + pl.ObjectMeta)) + BufferPriorityLevelConfigurationSpec(&buf, &pl.Spec) + buf.WriteString(fmt.Sprintf(", Status: %#+v}", pl.Status)) + return buf.String() +} + +// FmtPriorityLevelConfigurationSpec returns a golang source +// expression equivalent to the given value +func FmtPriorityLevelConfigurationSpec(plSpec *flowcontrol.PriorityLevelConfigurationSpec) string { + var buf bytes.Buffer + BufferPriorityLevelConfigurationSpec(&buf, plSpec) + return buf.String() +} + +// BufferPriorityLevelConfigurationSpec writes a golang source +// expression for the given value to the given buffer +func BufferPriorityLevelConfigurationSpec(buf *bytes.Buffer, plSpec *flowcontrol.PriorityLevelConfigurationSpec) { + buf.WriteString(fmt.Sprintf("flowcontrolv1beta1.PriorityLevelConfigurationSpec{Type: %#v", plSpec.Type)) + if plSpec.Limited != nil { + buf.WriteString(fmt.Sprintf(", Limited: &flowcontrol.LimitedPriorityLevelConfiguration{AssuredConcurrencyShares:%d, LimitResponse:flowcontrol.LimitResponse{Type:%#v", plSpec.Limited.AssuredConcurrencyShares, plSpec.Limited.LimitResponse.Type)) + if plSpec.Limited.LimitResponse.Queuing != nil { + buf.WriteString(fmt.Sprintf(", Queuing:&%#+v", *plSpec.Limited.LimitResponse.Queuing)) + } + buf.WriteString(" } }") + } + buf.WriteString("}") +} + +// FmtFlowSchema produces a golang source expression of the value. +func FmtFlowSchema(fs *flowcontrol.FlowSchema) string { + if fs == nil { + return "nil" + } + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("&flowcontrolv1beta1.FlowSchema{ObjectMeta: %#+v, Spec: ", + fs.ObjectMeta)) + BufferFlowSchemaSpec(&buf, &fs.Spec) + buf.WriteString(fmt.Sprintf(", Status: %#+v}", fs.Status)) + return buf.String() +} + +// FmtFlowSchemaSpec produces a golang source expression equivalent to +// the given spec +func FmtFlowSchemaSpec(fsSpec *flowcontrol.FlowSchemaSpec) string { + var buf bytes.Buffer + BufferFlowSchemaSpec(&buf, fsSpec) + return buf.String() +} + +// BufferFlowSchemaSpec writes a golang source expression for the +// given value to the given buffer +func BufferFlowSchemaSpec(buf *bytes.Buffer, fsSpec *flowcontrol.FlowSchemaSpec) { + buf.WriteString(fmt.Sprintf("flowcontrolv1beta1.FlowSchemaSpec{PriorityLevelConfiguration: %#+v, MatchingPrecedence: %d, DistinguisherMethod: ", + fsSpec.PriorityLevelConfiguration, + fsSpec.MatchingPrecedence)) + if fsSpec.DistinguisherMethod == nil { + buf.WriteString("nil") + } else { + buf.WriteString(fmt.Sprintf("&%#+v", *fsSpec.DistinguisherMethod)) + } + buf.WriteString(", Rules: []flowcontrol.PolicyRulesWithSubjects{") + for idx, rule := range fsSpec.Rules { + if idx > 0 { + buf.WriteString(", ") + } + BufferFmtPolicyRulesWithSubjectsSlim(buf, rule) + } + buf.WriteString("}}") +} + +// FmtPolicyRulesWithSubjects produces a golang source expression of the value. +func FmtPolicyRulesWithSubjects(rule flowcontrol.PolicyRulesWithSubjects) string { + return "flowcontrolv1beta1.PolicyRulesWithSubjects" + FmtPolicyRulesWithSubjectsSlim(rule) +} + +// FmtPolicyRulesWithSubjectsSlim produces a golang source expression +// of the value but without the leading type name. See above for an +// example context where this is useful. +func FmtPolicyRulesWithSubjectsSlim(rule flowcontrol.PolicyRulesWithSubjects) string { + var buf bytes.Buffer + BufferFmtPolicyRulesWithSubjectsSlim(&buf, rule) + return buf.String() +} + +// BufferFmtPolicyRulesWithSubjectsSlim writes a golang source +// expression for the given value to the given buffer but excludes the +// leading type name +func BufferFmtPolicyRulesWithSubjectsSlim(buf *bytes.Buffer, rule flowcontrol.PolicyRulesWithSubjects) { + buf.WriteString("{Subjects: []flowcontrolv1beta1.Subject{") + for jdx, subj := range rule.Subjects { + if jdx > 0 { + buf.WriteString(", ") + } + buf.WriteString(fmt.Sprintf("{Kind: %q", subj.Kind)) + if subj.User != nil { + buf.WriteString(fmt.Sprintf(", User: &%#+v", *subj.User)) + } + if subj.Group != nil { + buf.WriteString(fmt.Sprintf(", Group: &%#+v", *subj.Group)) + } + if subj.ServiceAccount != nil { + buf.WriteString(fmt.Sprintf(", ServiceAcount: &%#+v", *subj.ServiceAccount)) + } + buf.WriteString("}") + } + buf.WriteString(fmt.Sprintf("}, ResourceRules: %#+v, NonResourceRules: %#+v}", rule.ResourceRules, rule.NonResourceRules)) +} + +// FmtUsers produces a golang source expression of the value. +func FmtUsers(list []user.Info) string { + var buf bytes.Buffer + buf.WriteString("[]user.Info{") + for idx, member := range list { + if idx > 0 { + buf.WriteString(", ") + } + buf.WriteString(fmt.Sprintf("%#+v", member)) + } + buf.WriteString("}") + return buf.String() +} + +// FmtRequests produces a golang source expression of the value. +func FmtRequests(list []*request.RequestInfo) string { + var buf bytes.Buffer + buf.WriteString("[]*request.RequestInfo{") + for idx, member := range list { + if idx > 0 { + buf.WriteString(", ") + } + buf.WriteString(fmt.Sprintf("%#+v", member)) + } + buf.WriteString("}") + return buf.String() +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/formatting.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/formatting.go new file mode 100644 index 000000000..5b5b367bd --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/formatting.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flowcontrol + +import ( + "fmt" + + fcfmt "k8s.io/apiserver/pkg/util/flowcontrol/format" +) + +var _ fmt.GoStringer = RequestDigest{} + +// GoString produces a golang source expression of the value. +func (rd RequestDigest) GoString() string { + return fmt.Sprintf("RequestDigest{RequestInfo: %#+v, User: %#+v}", rd.RequestInfo, rd.User) +} + +var _ fmt.GoStringer = (*priorityLevelState)(nil) + +// GoString produces a golang source expression of the value. +func (pls *priorityLevelState) GoString() string { + if pls == nil { + return "nil" + } + return fmt.Sprintf("&priorityLevelState{pl:%s, qsCompleter:%#+v, queues:%#+v, quiescing:%#v, numPending:%d}", fcfmt.Fmt(pls.pl), pls.qsCompleter, pls.queues, pls.quiescing, pls.numPending) +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go new file mode 100644 index 000000000..bdbaa9460 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go @@ -0,0 +1,261 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "strings" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/clock" + compbasemetrics "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" + basemetricstestutil "k8s.io/component-base/metrics/testutil" +) + +const ( + namespace = "apiserver" + subsystem = "flowcontrol" +) + +const ( + requestKind = "request_kind" + priorityLevel = "priority_level" + flowSchema = "flow_schema" + phase = "phase" + mark = "mark" +) + +var ( + queueLengthBuckets = []float64{0, 10, 25, 50, 100, 250, 500, 1000} + requestDurationSecondsBuckets = []float64{0, 0.005, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 30} +) + +var registerMetrics sync.Once + +// Register all metrics. +func Register() { + registerMetrics.Do(func() { + for _, metric := range metrics { + legacyregistry.MustRegister(metric) + } + }) +} + +type resettable interface { + Reset() +} + +// Reset all metrics to zero +func Reset() { + for _, metric := range metrics { + rm := metric.(resettable) + rm.Reset() + } +} + +// GatherAndCompare the given metrics with the given Prometheus syntax expected value +func GatherAndCompare(expected string, metricNames ...string) error { + return basemetricstestutil.GatherAndCompare(legacyregistry.DefaultGatherer, strings.NewReader(expected), metricNames...) +} + +// Registerables is a slice of Registerable +type Registerables []compbasemetrics.Registerable + +// Append adds more +func (rs Registerables) Append(more ...compbasemetrics.Registerable) Registerables { + return append(rs, more...) +} + +var ( + apiserverRejectedRequestsTotal = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "rejected_requests_total", + Help: "Number of requests rejected by API Priority and Fairness system", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{priorityLevel, flowSchema, "reason"}, + ) + apiserverDispatchedRequestsTotal = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "dispatched_requests_total", + Help: "Number of requests released by API Priority and Fairness system for service", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{priorityLevel, flowSchema}, + ) + + // PriorityLevelConcurrencyObserverPairGenerator creates pairs that observe concurrency for priority levels + PriorityLevelConcurrencyObserverPairGenerator = NewSampleAndWaterMarkHistogramsPairGenerator(clock.RealClock{}, time.Millisecond, + &compbasemetrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "priority_level_request_count_samples", + Help: "Periodic observations of the number of requests", + Buckets: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1}, + StabilityLevel: compbasemetrics.ALPHA, + }, + &compbasemetrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "priority_level_request_count_watermarks", + Help: "Watermarks of the number of requests", + Buckets: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1}, + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{priorityLevel}) + + // ReadWriteConcurrencyObserverPairGenerator creates pairs that observe concurrency broken down by mutating vs readonly + ReadWriteConcurrencyObserverPairGenerator = NewSampleAndWaterMarkHistogramsPairGenerator(clock.RealClock{}, time.Millisecond, + &compbasemetrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "read_vs_write_request_count_samples", + Help: "Periodic observations of the number of requests", + Buckets: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1}, + StabilityLevel: compbasemetrics.ALPHA, + }, + &compbasemetrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "read_vs_write_request_count_watermarks", + Help: "Watermarks of the number of requests", + Buckets: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1}, + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{requestKind}) + + apiserverCurrentInqueueRequests = compbasemetrics.NewGaugeVec( + &compbasemetrics.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "current_inqueue_requests", + Help: "Number of requests currently pending in queues of the API Priority and Fairness system", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{priorityLevel, flowSchema}, + ) + apiserverRequestQueueLength = compbasemetrics.NewHistogramVec( + &compbasemetrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "request_queue_length_after_enqueue", + Help: "Length of queue in the API Priority and Fairness system, as seen by each request after it is enqueued", + Buckets: queueLengthBuckets, + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{priorityLevel, flowSchema}, + ) + apiserverRequestConcurrencyLimit = compbasemetrics.NewGaugeVec( + &compbasemetrics.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "request_concurrency_limit", + Help: "Shared concurrency limit in the API Priority and Fairness system", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{priorityLevel}, + ) + apiserverCurrentExecutingRequests = compbasemetrics.NewGaugeVec( + &compbasemetrics.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "current_executing_requests", + Help: "Number of requests currently executing in the API Priority and Fairness system", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{priorityLevel, flowSchema}, + ) + apiserverRequestWaitingSeconds = compbasemetrics.NewHistogramVec( + &compbasemetrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "request_wait_duration_seconds", + Help: "Length of time a request spent waiting in its queue", + Buckets: requestDurationSecondsBuckets, + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{priorityLevel, flowSchema, "execute"}, + ) + apiserverRequestExecutionSeconds = compbasemetrics.NewHistogramVec( + &compbasemetrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "request_execution_seconds", + Help: "Duration of request execution in the API Priority and Fairness system", + Buckets: requestDurationSecondsBuckets, + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{priorityLevel, flowSchema}, + ) + metrics = Registerables{ + apiserverRejectedRequestsTotal, + apiserverDispatchedRequestsTotal, + apiserverCurrentInqueueRequests, + apiserverRequestQueueLength, + apiserverRequestConcurrencyLimit, + apiserverCurrentExecutingRequests, + apiserverRequestWaitingSeconds, + apiserverRequestExecutionSeconds, + }. + Append(PriorityLevelConcurrencyObserverPairGenerator.metrics()...). + Append(ReadWriteConcurrencyObserverPairGenerator.metrics()...) +) + +// AddRequestsInQueues adds the given delta to the gauge of the # of requests in the queues of the specified flowSchema and priorityLevel +func AddRequestsInQueues(priorityLevel, flowSchema string, delta int) { + apiserverCurrentInqueueRequests.WithLabelValues(priorityLevel, flowSchema).Add(float64(delta)) +} + +// AddRequestsExecuting adds the given delta to the gauge of executing requests of the given flowSchema and priorityLevel +func AddRequestsExecuting(priorityLevel, flowSchema string, delta int) { + apiserverCurrentExecutingRequests.WithLabelValues(priorityLevel, flowSchema).Add(float64(delta)) +} + +// UpdateSharedConcurrencyLimit updates the value for the concurrency limit in flow control +func UpdateSharedConcurrencyLimit(priorityLevel string, limit int) { + apiserverRequestConcurrencyLimit.WithLabelValues(priorityLevel).Set(float64(limit)) +} + +// AddReject increments the # of rejected requests for flow control +func AddReject(priorityLevel, flowSchema, reason string) { + apiserverRejectedRequestsTotal.WithLabelValues(priorityLevel, flowSchema, reason).Add(1) +} + +// AddDispatch increments the # of dispatched requests for flow control +func AddDispatch(priorityLevel, flowSchema string) { + apiserverDispatchedRequestsTotal.WithLabelValues(priorityLevel, flowSchema).Add(1) +} + +// ObserveQueueLength observes the queue length for flow control +func ObserveQueueLength(priorityLevel, flowSchema string, length int) { + apiserverRequestQueueLength.WithLabelValues(priorityLevel, flowSchema).Observe(float64(length)) +} + +// ObserveWaitingDuration observes the queue length for flow control +func ObserveWaitingDuration(priorityLevel, flowSchema, execute string, waitTime time.Duration) { + apiserverRequestWaitingSeconds.WithLabelValues(priorityLevel, flowSchema, execute).Observe(waitTime.Seconds()) +} + +// ObserveExecutionDuration observes the execution duration for flow control +func ObserveExecutionDuration(priorityLevel, flowSchema string, executionTime time.Duration) { + apiserverRequestExecutionSeconds.WithLabelValues(priorityLevel, flowSchema).Observe(executionTime.Seconds()) +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/sample_and_watermark.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/sample_and_watermark.go new file mode 100644 index 000000000..aade70093 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/sample_and_watermark.go @@ -0,0 +1,211 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/clock" + compbasemetrics "k8s.io/component-base/metrics" + "k8s.io/klog/v2" +) + +const ( + labelNameMark = "mark" + labelValueLo = "low" + labelValueHi = "high" + labelNamePhase = "phase" + labelValueWaiting = "waiting" + labelValueExecuting = "executing" +) + +// SampleAndWaterMarkPairGenerator makes pairs of TimedObservers that +// track samples and watermarks. +type SampleAndWaterMarkPairGenerator struct { + urGenerator SampleAndWaterMarkObserverGenerator +} + +var _ TimedObserverPairGenerator = SampleAndWaterMarkPairGenerator{} + +// NewSampleAndWaterMarkHistogramsPairGenerator makes a new pair generator +func NewSampleAndWaterMarkHistogramsPairGenerator(clock clock.PassiveClock, samplePeriod time.Duration, sampleOpts, waterMarkOpts *compbasemetrics.HistogramOpts, labelNames []string) SampleAndWaterMarkPairGenerator { + return SampleAndWaterMarkPairGenerator{ + urGenerator: NewSampleAndWaterMarkHistogramsGenerator(clock, samplePeriod, sampleOpts, waterMarkOpts, append([]string{labelNamePhase}, labelNames...)), + } +} + +// Generate makes a new pair +func (spg SampleAndWaterMarkPairGenerator) Generate(waiting1, executing1 float64, labelValues []string) TimedObserverPair { + return TimedObserverPair{ + RequestsWaiting: spg.urGenerator.Generate(0, waiting1, append([]string{labelValueWaiting}, labelValues...)), + RequestsExecuting: spg.urGenerator.Generate(0, executing1, append([]string{labelValueExecuting}, labelValues...)), + } +} + +func (spg SampleAndWaterMarkPairGenerator) metrics() Registerables { + return spg.urGenerator.metrics() +} + +// SampleAndWaterMarkObserverGenerator creates TimedObservers that +// populate histograms of samples and low- and high-water-marks. The +// generator has a samplePeriod, and the histograms get an observation +// every samplePeriod. The sampling windows are quantized based on +// the monotonic rather than wall-clock times. The `t0` field is +// there so to provide a baseline for monotonic clock differences. +type SampleAndWaterMarkObserverGenerator struct { + *sampleAndWaterMarkObserverGenerator +} + +type sampleAndWaterMarkObserverGenerator struct { + clock clock.PassiveClock + t0 time.Time + samplePeriod time.Duration + samples *compbasemetrics.HistogramVec + waterMarks *compbasemetrics.HistogramVec +} + +var _ TimedObserverGenerator = (*sampleAndWaterMarkObserverGenerator)(nil) + +// NewSampleAndWaterMarkHistogramsGenerator makes a new one +func NewSampleAndWaterMarkHistogramsGenerator(clock clock.PassiveClock, samplePeriod time.Duration, sampleOpts, waterMarkOpts *compbasemetrics.HistogramOpts, labelNames []string) SampleAndWaterMarkObserverGenerator { + return SampleAndWaterMarkObserverGenerator{ + &sampleAndWaterMarkObserverGenerator{ + clock: clock, + t0: clock.Now(), + samplePeriod: samplePeriod, + samples: compbasemetrics.NewHistogramVec(sampleOpts, labelNames), + waterMarks: compbasemetrics.NewHistogramVec(waterMarkOpts, append([]string{labelNameMark}, labelNames...)), + }} +} + +func (swg *sampleAndWaterMarkObserverGenerator) quantize(when time.Time) int64 { + return int64(when.Sub(swg.t0) / swg.samplePeriod) +} + +// Generate makes a new TimedObserver +func (swg *sampleAndWaterMarkObserverGenerator) Generate(x, x1 float64, labelValues []string) TimedObserver { + relX := x / x1 + when := swg.clock.Now() + return &sampleAndWaterMarkHistograms{ + sampleAndWaterMarkObserverGenerator: swg, + labelValues: labelValues, + loLabelValues: append([]string{labelValueLo}, labelValues...), + hiLabelValues: append([]string{labelValueHi}, labelValues...), + x1: x1, + sampleAndWaterMarkAccumulator: sampleAndWaterMarkAccumulator{ + lastSet: when, + lastSetInt: swg.quantize(when), + x: x, + relX: relX, + loRelX: relX, + hiRelX: relX, + }} +} + +func (swg *sampleAndWaterMarkObserverGenerator) metrics() Registerables { + return Registerables{swg.samples, swg.waterMarks} +} + +type sampleAndWaterMarkHistograms struct { + *sampleAndWaterMarkObserverGenerator + labelValues []string + loLabelValues, hiLabelValues []string + + sync.Mutex + x1 float64 + sampleAndWaterMarkAccumulator +} + +type sampleAndWaterMarkAccumulator struct { + lastSet time.Time + lastSetInt int64 // lastSet / samplePeriod + x float64 + relX float64 // x / x1 + loRelX, hiRelX float64 +} + +var _ TimedObserver = (*sampleAndWaterMarkHistograms)(nil) + +func (saw *sampleAndWaterMarkHistograms) Add(deltaX float64) { + saw.innerSet(func() { + saw.x += deltaX + }) +} + +func (saw *sampleAndWaterMarkHistograms) Set(x float64) { + saw.innerSet(func() { + saw.x = x + }) +} + +func (saw *sampleAndWaterMarkHistograms) SetX1(x1 float64) { + saw.innerSet(func() { + saw.x1 = x1 + }) +} + +func (saw *sampleAndWaterMarkHistograms) innerSet(updateXOrX1 func()) { + var when time.Time + var whenInt int64 + var acc sampleAndWaterMarkAccumulator + var wellOrdered bool + func() { + saw.Lock() + defer saw.Unlock() + when = saw.clock.Now() + whenInt = saw.quantize(when) + acc = saw.sampleAndWaterMarkAccumulator + wellOrdered = !when.Before(acc.lastSet) + updateXOrX1() + saw.relX = saw.x / saw.x1 + if wellOrdered { + if acc.lastSetInt < whenInt { + saw.loRelX, saw.hiRelX = acc.relX, acc.relX + saw.lastSetInt = whenInt + } + saw.lastSet = when + } + // `wellOrdered` should always be true because we are using + // monotonic clock readings and they never go backwards. Yet + // very small backwards steps (under 1 microsecond) have been + // observed + // (https://github.com/kubernetes/kubernetes/issues/96459). + // In the backwards case, treat the current reading as if it + // had occurred at time `saw.lastSet` and log an error. It + // would be wrong to update `saw.lastSet` in this case because + // that plants a time bomb for future updates to + // `saw.lastSetInt`. + if saw.relX < saw.loRelX { + saw.loRelX = saw.relX + } else if saw.relX > saw.hiRelX { + saw.hiRelX = saw.relX + } + }() + if !wellOrdered { + lastSetS := acc.lastSet.String() + whenS := when.String() + klog.Errorf("Time went backwards from %s to %s for labelValues=%#+v", lastSetS, whenS, saw.labelValues) + } + for acc.lastSetInt < whenInt { + saw.samples.WithLabelValues(saw.labelValues...).Observe(acc.relX) + saw.waterMarks.WithLabelValues(saw.loLabelValues...).Observe(acc.loRelX) + saw.waterMarks.WithLabelValues(saw.hiLabelValues...).Observe(acc.hiRelX) + acc.lastSetInt++ + acc.loRelX, acc.hiRelX = acc.relX, acc.relX + } +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/timed_observer.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/timed_observer.go new file mode 100644 index 000000000..25f41493c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/timed_observer.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +// TimedObserver gets informed about the values assigned to a variable +// `X float64` over time, and reports on the ratio `X/X1`. +type TimedObserver interface { + // Add notes a change to the variable + Add(deltaX float64) + + // Set notes a setting of the variable + Set(x float64) + + // SetX1 changes the value to use for X1 + SetX1(x1 float64) +} + +// TimedObserverGenerator creates related observers that are +// differentiated by a series of label values +type TimedObserverGenerator interface { + Generate(x, x1 float64, labelValues []string) TimedObserver +} + +// TimedObserverPair is a corresponding pair of observers, one for the +// number of requests waiting in queue(s) and one for the number of +// requests being executed +type TimedObserverPair struct { + // RequestsWaiting is given observations of the number of currently queued requests + RequestsWaiting TimedObserver + + // RequestsExecuting is given observations of the number of requests currently executing + RequestsExecuting TimedObserver +} + +// TimedObserverPairGenerator generates pairs +type TimedObserverPairGenerator interface { + Generate(waiting1, executing1 float64, labelValues []string) TimedObserverPair +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/rule.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/rule.go new file mode 100644 index 000000000..765e28790 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/rule.go @@ -0,0 +1,203 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flowcontrol + +import ( + "strings" + + flowcontrol "k8s.io/api/flowcontrol/v1beta1" + "k8s.io/apiserver/pkg/authentication/serviceaccount" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/endpoints/request" +) + +// Tests whether a given request and FlowSchema match. Nobody mutates +// either input. +func matchesFlowSchema(digest RequestDigest, flowSchema *flowcontrol.FlowSchema) bool { + for _, policyRule := range flowSchema.Spec.Rules { + if matchesPolicyRule(digest, &policyRule) { + return true + } + } + return false +} + +func matchesPolicyRule(digest RequestDigest, policyRule *flowcontrol.PolicyRulesWithSubjects) bool { + if !matchesASubject(digest.User, policyRule.Subjects) { + return false + } + if digest.RequestInfo.IsResourceRequest { + return matchesAResourceRule(digest.RequestInfo, policyRule.ResourceRules) + } + return matchesANonResourceRule(digest.RequestInfo, policyRule.NonResourceRules) +} + +func matchesASubject(user user.Info, subjects []flowcontrol.Subject) bool { + for _, subject := range subjects { + if matchesSubject(user, subject) { + return true + } + } + return false +} + +func matchesSubject(user user.Info, subject flowcontrol.Subject) bool { + switch subject.Kind { + case flowcontrol.SubjectKindUser: + return subject.User != nil && (subject.User.Name == flowcontrol.NameAll || subject.User.Name == user.GetName()) + case flowcontrol.SubjectKindGroup: + if subject.Group == nil { + return false + } + seek := subject.Group.Name + if seek == "*" { + return true + } + for _, userGroup := range user.GetGroups() { + if userGroup == seek { + return true + } + } + return false + case flowcontrol.SubjectKindServiceAccount: + if subject.ServiceAccount == nil { + return false + } + if subject.ServiceAccount.Name == flowcontrol.NameAll { + return serviceAccountMatchesNamespace(subject.ServiceAccount.Namespace, user.GetName()) + } + return serviceaccount.MatchesUsername(subject.ServiceAccount.Namespace, subject.ServiceAccount.Name, user.GetName()) + default: + return false + } +} + +// serviceAccountMatchesNamespace checks whether the provided service account username matches the namespace, without +// allocating. Use this when checking a service account namespace against a known string. +// This is copied from `k8s.io/apiserver/pkg/authentication/serviceaccount::MatchesUsername` and simplified to not check the name part. +func serviceAccountMatchesNamespace(namespace string, username string) bool { + const ( + ServiceAccountUsernamePrefix = "system:serviceaccount:" + ServiceAccountUsernameSeparator = ":" + ) + if !strings.HasPrefix(username, ServiceAccountUsernamePrefix) { + return false + } + username = username[len(ServiceAccountUsernamePrefix):] + + if !strings.HasPrefix(username, namespace) { + return false + } + username = username[len(namespace):] + + return strings.HasPrefix(username, ServiceAccountUsernameSeparator) +} + +func matchesAResourceRule(ri *request.RequestInfo, rules []flowcontrol.ResourcePolicyRule) bool { + for _, rr := range rules { + if matchesResourcePolicyRule(ri, rr) { + return true + } + } + return false +} + +func matchesResourcePolicyRule(ri *request.RequestInfo, policyRule flowcontrol.ResourcePolicyRule) bool { + if !matchPolicyRuleVerb(policyRule.Verbs, ri.Verb) { + return false + } + if !matchPolicyRuleResource(policyRule.Resources, ri.Resource, ri.Subresource) { + return false + } + if !matchPolicyRuleAPIGroup(policyRule.APIGroups, ri.APIGroup) { + return false + } + if len(ri.Namespace) == 0 { + return policyRule.ClusterScope + } + return containsString(ri.Namespace, policyRule.Namespaces, flowcontrol.NamespaceEvery) +} + +func matchesANonResourceRule(ri *request.RequestInfo, rules []flowcontrol.NonResourcePolicyRule) bool { + for _, rr := range rules { + if matchesNonResourcePolicyRule(ri, rr) { + return true + } + } + return false +} + +func matchesNonResourcePolicyRule(ri *request.RequestInfo, policyRule flowcontrol.NonResourcePolicyRule) bool { + if !matchPolicyRuleVerb(policyRule.Verbs, ri.Verb) { + return false + } + return matchPolicyRuleNonResourceURL(policyRule.NonResourceURLs, ri.Path) +} + +func matchPolicyRuleVerb(policyRuleVerbs []string, requestVerb string) bool { + return containsString(requestVerb, policyRuleVerbs, flowcontrol.VerbAll) +} + +func matchPolicyRuleNonResourceURL(policyRuleRequestURLs []string, requestPath string) bool { + for _, rulePath := range policyRuleRequestURLs { + if rulePath == flowcontrol.NonResourceAll || rulePath == requestPath { + return true + } + rulePrefix := strings.TrimSuffix(rulePath, "*") + if !strings.HasSuffix(rulePrefix, "/") { + rulePrefix = rulePrefix + "/" + } + if strings.HasPrefix(requestPath, rulePrefix) { + return true + } + } + return false +} + +func matchPolicyRuleAPIGroup(policyRuleAPIGroups []string, requestAPIGroup string) bool { + return containsString(requestAPIGroup, policyRuleAPIGroups, flowcontrol.APIGroupAll) +} + +func rsJoin(requestResource, requestSubresource string) string { + seekString := requestResource + if requestSubresource != "" { + seekString = requestResource + "/" + requestSubresource + } + return seekString +} + +func matchPolicyRuleResource(policyRuleRequestResources []string, requestResource, requestSubresource string) bool { + return containsString(rsJoin(requestResource, requestSubresource), policyRuleRequestResources, flowcontrol.ResourceAll) +} + +// containsString returns true if either `x` or `wildcard` is in +// `list`. The wildcard is not a pattern to match against `x`; rather +// the presence of the wildcard in the list is the caller's way of +// saying that all values of `x` should match the list. This function +// assumes that if `wildcard` is in `list` then it is the only member +// of the list, which is enforced by validation. +func containsString(x string, list []string, wildcard string) bool { + if len(list) == 1 && list[0] == wildcard { + return true + } + for _, y := range list { + if x == y { + return true + } + } + return false +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flushwriter/doc.go b/vendor/k8s.io/apiserver/pkg/util/flushwriter/doc.go new file mode 100644 index 000000000..f81e09a29 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flushwriter/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package flushwriter implements a wrapper for a writer that flushes on every +// write if that writer implements the io.Flusher interface +package flushwriter // import "k8s.io/apiserver/pkg/util/flushwriter" diff --git a/vendor/k8s.io/apiserver/pkg/util/flushwriter/writer.go b/vendor/k8s.io/apiserver/pkg/util/flushwriter/writer.go new file mode 100644 index 000000000..748bd0108 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flushwriter/writer.go @@ -0,0 +1,53 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flushwriter + +import ( + "io" + "net/http" +) + +// Wrap wraps an io.Writer into a writer that flushes after every write if +// the writer implements the Flusher interface. +func Wrap(w io.Writer) io.Writer { + fw := &flushWriter{ + writer: w, + } + if flusher, ok := w.(http.Flusher); ok { + fw.flusher = flusher + } + return fw +} + +// flushWriter provides wrapper for responseWriter with HTTP streaming capabilities +type flushWriter struct { + flusher http.Flusher + writer io.Writer +} + +// Write is a FlushWriter implementation of the io.Writer that sends any buffered +// data to the client. +func (fw *flushWriter) Write(p []byte) (n int, err error) { + n, err = fw.writer.Write(p) + if err != nil { + return + } + if fw.flusher != nil { + fw.flusher.Flush() + } + return +} diff --git a/vendor/k8s.io/apiserver/pkg/util/openapi/proto.go b/vendor/k8s.io/apiserver/pkg/util/openapi/proto.go new file mode 100644 index 000000000..0f95a5f79 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/openapi/proto.go @@ -0,0 +1,54 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openapi + +import ( + "encoding/json" + + "github.com/go-openapi/spec" + "github.com/googleapis/gnostic/compiler" + openapi_v2 "github.com/googleapis/gnostic/openapiv2" + yaml "gopkg.in/yaml.v2" + + "k8s.io/kube-openapi/pkg/util/proto" +) + +// ToProtoModels builds the proto formatted models from OpenAPI spec +func ToProtoModels(openAPISpec *spec.Swagger) (proto.Models, error) { + specBytes, err := json.MarshalIndent(openAPISpec, " ", " ") + if err != nil { + return nil, err + } + + var info yaml.MapSlice + err = yaml.Unmarshal(specBytes, &info) + if err != nil { + return nil, err + } + + doc, err := openapi_v2.NewDocument(info, compiler.NewContext("$root", nil)) + if err != nil { + return nil, err + } + + models, err := proto.NewOpenAPIData(doc) + if err != nil { + return nil, err + } + + return models, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/util/shufflesharding/shufflesharding.go b/vendor/k8s.io/apiserver/pkg/util/shufflesharding/shufflesharding.go new file mode 100644 index 000000000..6ef4ed890 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/shufflesharding/shufflesharding.go @@ -0,0 +1,107 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package shufflesharding + +import ( + "fmt" + "math" +) + +// MaxHashBits is the max bit length which can be used from hash value. +// If we use all bits of hash value, the critical(last) card shuffled by +// Dealer will be uneven to 2:3 (first half:second half) at most, +// in order to reduce this unevenness to 32:33, we set MaxHashBits to 60 here. +const MaxHashBits = 60 + +// RequiredEntropyBits makes a quick and slightly conservative estimate of the number +// of bits of hash value that are consumed in shuffle sharding a deck of the given size +// to a hand of the given size. The result is meaningful only if +// 1 <= handSize <= deckSize <= 1<<26. +func RequiredEntropyBits(deckSize, handSize int) int { + return int(math.Ceil(math.Log2(float64(deckSize)) * float64(handSize))) +} + +// Dealer contains some necessary parameters and provides some methods for shuffle sharding. +// Dealer is thread-safe. +type Dealer struct { + deckSize int + handSize int +} + +// NewDealer will create a Dealer with the given deckSize and handSize, will return error when +// deckSize or handSize is invalid as below. +// 1. deckSize or handSize is not positive +// 2. handSize is greater than deckSize +// 3. deckSize is impractically large (greater than 1<<26) +// 4. required entropy bits of deckSize and handSize is greater than MaxHashBits +func NewDealer(deckSize, handSize int) (*Dealer, error) { + if deckSize <= 0 || handSize <= 0 { + return nil, fmt.Errorf("deckSize %d or handSize %d is not positive", deckSize, handSize) + } + if handSize > deckSize { + return nil, fmt.Errorf("handSize %d is greater than deckSize %d", handSize, deckSize) + } + if deckSize > 1<<26 { + return nil, fmt.Errorf("deckSize %d is impractically large", deckSize) + } + if RequiredEntropyBits(deckSize, handSize) > MaxHashBits { + return nil, fmt.Errorf("required entropy bits of deckSize %d and handSize %d is greater than %d", deckSize, handSize, MaxHashBits) + } + + return &Dealer{ + deckSize: deckSize, + handSize: handSize, + }, nil +} + +// Deal shuffles a card deck and deals a hand of cards, using the given hashValue as the source of entropy. +// The deck size and hand size are properties of the Dealer. +// This function synchronously makes sequential calls to pick, one for each dealt card. +// Each card is identified by an integer in the range [0, deckSize). +// For example, for deckSize=128 and handSize=4 this function might call pick(14); pick(73); pick(119); pick(26). +func (d *Dealer) Deal(hashValue uint64, pick func(int)) { + // 15 is the largest possible value of handSize + var remainders [15]int + + for i := 0; i < d.handSize; i++ { + hashValueNext := hashValue / uint64(d.deckSize-i) + remainders[i] = int(hashValue - uint64(d.deckSize-i)*hashValueNext) + hashValue = hashValueNext + } + + for i := 0; i < d.handSize; i++ { + card := remainders[i] + for j := i; j > 0; j-- { + if card >= remainders[j-1] { + card++ + } + } + pick(card) + } +} + +// DealIntoHand shuffles and deals according to the Dealer's parameters, +// using the given hashValue as the source of entropy and then +// returns the dealt cards as a slice of `int`. +// If `hand` has the correct length as Dealer's handSize, it will be used as-is and no allocations will be made. +// If `hand` is nil or too small, it will be extended (performing an allocation). +// If `hand` is too large, a sub-slice will be returned. +func (d *Dealer) DealIntoHand(hashValue uint64, hand []int) []int { + h := hand[:0] + d.Deal(hashValue, func(card int) { h = append(h, card) }) + return h +} diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go b/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go index 042879dad..965bc8b58 100644 --- a/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go @@ -55,7 +55,7 @@ func NewDefaultAuthenticationInfoResolverWrapper( } if egressSelector != nil { - networkContext := egressselector.Master.AsNetworkContext() + networkContext := egressselector.ControlPlane.AsNetworkContext() var egressDialer utilnet.DialFunc egressDialer, err = egressSelector.Lookup(networkContext) diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go b/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go index 2128647e0..abaade352 100644 --- a/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go @@ -36,12 +36,23 @@ import ( // timeout of the HTTP request, including reading the response body. const defaultRequestTimeout = 30 * time.Second +// DefaultRetryBackoffWithInitialDelay returns the default backoff parameters for webhook retry from a given initial delay. +// Handy for the client that provides a custom initial delay only. +func DefaultRetryBackoffWithInitialDelay(initialBackoffDelay time.Duration) wait.Backoff { + return wait.Backoff{ + Duration: initialBackoffDelay, + Factor: 1.5, + Jitter: 0.2, + Steps: 5, + } +} + // GenericWebhook defines a generic client for webhooks with commonly used capabilities, // such as retry requests. type GenericWebhook struct { - RestClient *rest.RESTClient - InitialBackoff time.Duration - ShouldRetry func(error) bool + RestClient *rest.RESTClient + RetryBackoff wait.Backoff + ShouldRetry func(error) bool } // DefaultShouldRetry is a default implementation for the GenericWebhook ShouldRetry function property. @@ -61,11 +72,11 @@ func DefaultShouldRetry(err error) bool { } // NewGenericWebhook creates a new GenericWebhook from the provided kubeconfig file. -func NewGenericWebhook(scheme *runtime.Scheme, codecFactory serializer.CodecFactory, kubeConfigFile string, groupVersions []schema.GroupVersion, initialBackoff time.Duration, customDial utilnet.DialFunc) (*GenericWebhook, error) { - return newGenericWebhook(scheme, codecFactory, kubeConfigFile, groupVersions, initialBackoff, defaultRequestTimeout, customDial) +func NewGenericWebhook(scheme *runtime.Scheme, codecFactory serializer.CodecFactory, kubeConfigFile string, groupVersions []schema.GroupVersion, retryBackoff wait.Backoff, customDial utilnet.DialFunc) (*GenericWebhook, error) { + return newGenericWebhook(scheme, codecFactory, kubeConfigFile, groupVersions, retryBackoff, defaultRequestTimeout, customDial) } -func newGenericWebhook(scheme *runtime.Scheme, codecFactory serializer.CodecFactory, kubeConfigFile string, groupVersions []schema.GroupVersion, initialBackoff, requestTimeout time.Duration, customDial utilnet.DialFunc) (*GenericWebhook, error) { +func newGenericWebhook(scheme *runtime.Scheme, codecFactory serializer.CodecFactory, kubeConfigFile string, groupVersions []schema.GroupVersion, retryBackoff wait.Backoff, requestTimeout time.Duration, customDial utilnet.DialFunc) (*GenericWebhook, error) { for _, groupVersion := range groupVersions { if !scheme.IsVersionRegistered(groupVersion) { return nil, fmt.Errorf("webhook plugin requires enabling extension resource: %s", groupVersion) @@ -102,19 +113,20 @@ func newGenericWebhook(scheme *runtime.Scheme, codecFactory serializer.CodecFact return nil, err } - return &GenericWebhook{restClient, initialBackoff, DefaultShouldRetry}, nil + return &GenericWebhook{restClient, retryBackoff, DefaultShouldRetry}, nil } -// WithExponentialBackoff will retry webhookFn() up to 5 times with exponentially increasing backoff when -// it returns an error for which this GenericWebhook's ShouldRetry function returns true, confirming it to -// be retriable. If no ShouldRetry has been defined for the webhook, then the default one is used (DefaultShouldRetry). +// WithExponentialBackoff will retry webhookFn() as specified by the given backoff parameters with exponentially +// increasing backoff when it returns an error for which this GenericWebhook's ShouldRetry function returns true, +// confirming it to be retriable. If no ShouldRetry has been defined for the webhook, +// then the default one is used (DefaultShouldRetry). func (g *GenericWebhook) WithExponentialBackoff(ctx context.Context, webhookFn func() rest.Result) rest.Result { var result rest.Result shouldRetry := g.ShouldRetry if shouldRetry == nil { shouldRetry = DefaultShouldRetry } - WithExponentialBackoff(ctx, g.InitialBackoff, func() error { + WithExponentialBackoff(ctx, g.RetryBackoff, func() error { result = webhookFn() return result.Error() }, shouldRetry) @@ -123,28 +135,28 @@ func (g *GenericWebhook) WithExponentialBackoff(ctx context.Context, webhookFn f // WithExponentialBackoff will retry webhookFn up to 5 times with exponentially increasing backoff when // it returns an error for which shouldRetry returns true, confirming it to be retriable. -func WithExponentialBackoff(ctx context.Context, initialBackoff time.Duration, webhookFn func() error, shouldRetry func(error) bool) error { - backoff := wait.Backoff{ - Duration: initialBackoff, - Factor: 1.5, - Jitter: 0.2, - Steps: 5, - } - - var err error - wait.ExponentialBackoff(backoff, func() (bool, error) { - err = webhookFn() - if ctx.Err() != nil { - // we timed out or were cancelled, we should not retry - return true, err - } - if shouldRetry(err) { +func WithExponentialBackoff(ctx context.Context, retryBackoff wait.Backoff, webhookFn func() error, shouldRetry func(error) bool) error { + // having a webhook error allows us to track the last actual webhook error for requests that + // are later cancelled or time out. + var webhookErr error + err := wait.ExponentialBackoffWithContext(ctx, retryBackoff, func() (bool, error) { + webhookErr = webhookFn() + if shouldRetry(webhookErr) { return false, nil } - if err != nil { - return false, err + if webhookErr != nil { + return false, webhookErr } return true, nil }) - return err + + switch { + // we check for webhookErr first, if webhookErr is set it's the most important error to return. + case webhookErr != nil: + return webhookErr + case err != nil: + return fmt.Errorf("webhook call failed: %s", err.Error()) + default: + return nil + } } diff --git a/vendor/k8s.io/apiserver/pkg/warning/context.go b/vendor/k8s.io/apiserver/pkg/warning/context.go new file mode 100644 index 000000000..1b9dd54df --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/warning/context.go @@ -0,0 +1,59 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package warning + +import ( + "context" +) + +// The key type is unexported to prevent collisions +type key int + +const ( + // auditAnnotationsKey is the context key for the audit annotations. + warningRecorderKey key = iota +) + +// Recorder provides a method for recording warnings +type Recorder interface { + // AddWarning adds the specified warning to the response. + // agent must be valid UTF-8, and must not contain spaces, quotes, backslashes, or control characters. + // text must be valid UTF-8, and must not contain control characters. + AddWarning(agent, text string) +} + +// WithWarningRecorder returns a new context that wraps the provided context and contains the provided Recorder implementation. +// The returned context can be passed to AddWarning(). +func WithWarningRecorder(ctx context.Context, recorder Recorder) context.Context { + return context.WithValue(ctx, warningRecorderKey, recorder) +} +func warningRecorderFrom(ctx context.Context) (Recorder, bool) { + recorder, ok := ctx.Value(warningRecorderKey).(Recorder) + return recorder, ok +} + +// AddWarning records a warning for the specified agent and text to the Recorder added to the provided context using WithWarningRecorder(). +// If no Recorder exists in the provided context, this is a no-op. +// agent must be valid UTF-8, and must not contain spaces, quotes, backslashes, or control characters. +// text must be valid UTF-8, and must not contain control characters. +func AddWarning(ctx context.Context, agent string, text string) { + recorder, ok := warningRecorderFrom(ctx) + if !ok { + return + } + recorder.AddWarning(agent, text) +} diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/buffered.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/buffered.go new file mode 100644 index 000000000..a96d9bea3 --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/buffered.go @@ -0,0 +1,290 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package buffered + +import ( + "fmt" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/audit" + "k8s.io/client-go/util/flowcontrol" +) + +// PluginName is the name reported in error metrics. +const PluginName = "buffered" + +// BatchConfig represents batching delegate audit backend configuration. +type BatchConfig struct { + // BufferSize defines a size of the buffering queue. + BufferSize int + // MaxBatchSize defines maximum size of a batch. + MaxBatchSize int + // MaxBatchWait indicates the maximum interval between two batches. + MaxBatchWait time.Duration + + // ThrottleEnable defines whether throttling will be applied to the batching process. + ThrottleEnable bool + // ThrottleQPS defines the allowed rate of batches per second sent to the delegate backend. + ThrottleQPS float32 + // ThrottleBurst defines the maximum number of requests sent to the delegate backend at the same moment in case + // the capacity defined by ThrottleQPS was not utilized. + ThrottleBurst int + + // Whether the delegate backend should be called asynchronously. + AsyncDelegate bool +} + +type bufferedBackend struct { + // The delegate backend that actually exports events. + delegateBackend audit.Backend + + // Channel to buffer events before sending to the delegate backend. + buffer chan *auditinternal.Event + // Maximum number of events in a batch sent to the delegate backend. + maxBatchSize int + // Amount of time to wait after sending a batch to the delegate backend before sending another one. + // + // Receiving maxBatchSize events will always trigger sending a batch, regardless of the amount of time passed. + maxBatchWait time.Duration + + // Whether the delegate backend should be called asynchronously. + asyncDelegate bool + + // Channel to signal that the batching routine has processed all remaining events and exited. + // Once `shutdownCh` is closed no new events will be sent to the delegate backend. + shutdownCh chan struct{} + + // WaitGroup to control the concurrency of sending batches to the delegate backend. + // Worker routine calls Add before sending a batch and + // then spawns a routine that calls Done after batch was processed by the delegate backend. + // This WaitGroup is used to wait for all sending routines to finish before shutting down audit backend. + wg sync.WaitGroup + + // Limits the number of batches sent to the delegate backend per second. + throttle flowcontrol.RateLimiter +} + +var _ audit.Backend = &bufferedBackend{} + +// NewBackend returns a buffered audit backend that wraps delegate backend. +// Buffered backend automatically runs and shuts down the delegate backend. +func NewBackend(delegate audit.Backend, config BatchConfig) audit.Backend { + var throttle flowcontrol.RateLimiter + if config.ThrottleEnable { + throttle = flowcontrol.NewTokenBucketRateLimiter(config.ThrottleQPS, config.ThrottleBurst) + } + return &bufferedBackend{ + delegateBackend: delegate, + buffer: make(chan *auditinternal.Event, config.BufferSize), + maxBatchSize: config.MaxBatchSize, + maxBatchWait: config.MaxBatchWait, + asyncDelegate: config.AsyncDelegate, + shutdownCh: make(chan struct{}), + wg: sync.WaitGroup{}, + throttle: throttle, + } +} + +func (b *bufferedBackend) Run(stopCh <-chan struct{}) error { + go func() { + // Signal that the working routine has exited. + defer close(b.shutdownCh) + + b.processIncomingEvents(stopCh) + + // Handle the events that were received after the last buffer + // scraping and before this line. Since the buffer is closed, no new + // events will come through. + allEventsProcessed := false + timer := make(chan time.Time) + for !allEventsProcessed { + allEventsProcessed = func() bool { + // Recover from any panic in order to try to process all remaining events. + // Note, that in case of a panic, the return value will be false and + // the loop execution will continue. + defer runtime.HandleCrash() + + events := b.collectEvents(timer, wait.NeverStop) + b.processEvents(events) + return len(events) == 0 + }() + } + }() + return b.delegateBackend.Run(stopCh) +} + +// Shutdown blocks until stopCh passed to the Run method is closed and all +// events added prior to that moment are batched and sent to the delegate backend. +func (b *bufferedBackend) Shutdown() { + // Wait until the routine spawned in Run method exits. + <-b.shutdownCh + + // Wait until all sending routines exit. + // + // - When b.shutdownCh is closed, we know that the goroutine in Run has terminated. + // - This means that processIncomingEvents has terminated. + // - Which means that b.buffer is closed and cannot accept any new events anymore. + // - Because processEvents is called synchronously from the Run goroutine, the waitgroup has its final value. + // Hence wg.Wait will not miss any more outgoing batches. + b.wg.Wait() + + b.delegateBackend.Shutdown() +} + +// processIncomingEvents runs a loop that collects events from the buffer. When +// b.stopCh is closed, processIncomingEvents stops and closes the buffer. +func (b *bufferedBackend) processIncomingEvents(stopCh <-chan struct{}) { + defer close(b.buffer) + + var ( + maxWaitChan <-chan time.Time + maxWaitTimer *time.Timer + ) + // Only use max wait batching if batching is enabled. + if b.maxBatchSize > 1 { + maxWaitTimer = time.NewTimer(b.maxBatchWait) + maxWaitChan = maxWaitTimer.C + defer maxWaitTimer.Stop() + } + + for { + func() { + // Recover from any panics caused by this function so a panic in the + // goroutine can't bring down the main routine. + defer runtime.HandleCrash() + + if b.maxBatchSize > 1 { + maxWaitTimer.Reset(b.maxBatchWait) + } + b.processEvents(b.collectEvents(maxWaitChan, stopCh)) + }() + + select { + case <-stopCh: + return + default: + } + } +} + +// collectEvents attempts to collect some number of events in a batch. +// +// The following things can cause collectEvents to stop and return the list +// of events: +// +// * Maximum number of events for a batch. +// * Timer has passed. +// * Buffer channel is closed and empty. +// * stopCh is closed. +func (b *bufferedBackend) collectEvents(timer <-chan time.Time, stopCh <-chan struct{}) []*auditinternal.Event { + var events []*auditinternal.Event + +L: + for i := 0; i < b.maxBatchSize; i++ { + select { + case ev, ok := <-b.buffer: + // Buffer channel was closed and no new events will follow. + if !ok { + break L + } + events = append(events, ev) + case <-timer: + // Timer has expired. Send currently accumulated batch. + break L + case <-stopCh: + // Backend has been stopped. Send currently accumulated batch. + break L + } + } + + return events +} + +// processEvents process the batch events in a goroutine using delegateBackend's ProcessEvents. +func (b *bufferedBackend) processEvents(events []*auditinternal.Event) { + if len(events) == 0 { + return + } + + // TODO(audit): Should control the number of active goroutines + // if one goroutine takes 5 seconds to finish, the number of goroutines can be 5 * defaultBatchThrottleQPS + if b.throttle != nil { + b.throttle.Accept() + } + + if b.asyncDelegate { + b.wg.Add(1) + go func() { + defer b.wg.Done() + defer runtime.HandleCrash() + + // Execute the real processing in a goroutine to keep it from blocking. + // This lets the batching routine continue draining the queue immediately. + b.delegateBackend.ProcessEvents(events...) + }() + } else { + func() { + defer runtime.HandleCrash() + + // Execute the real processing in a goroutine to keep it from blocking. + // This lets the batching routine continue draining the queue immediately. + b.delegateBackend.ProcessEvents(events...) + }() + } +} + +func (b *bufferedBackend) ProcessEvents(ev ...*auditinternal.Event) bool { + // The following mechanism is in place to support the situation when audit + // events are still coming after the backend was stopped. + var sendErr error + var evIndex int + + // If the delegateBackend was shutdown and the buffer channel was closed, an + // attempt to add an event to it will result in panic that we should + // recover from. + defer func() { + if err := recover(); err != nil { + sendErr = fmt.Errorf("audit backend shut down") + } + if sendErr != nil { + audit.HandlePluginError(PluginName, sendErr, ev[evIndex:]...) + } + }() + + for i, e := range ev { + evIndex = i + // Per the audit.Backend interface these events are reused after being + // sent to the Sink. Deep copy and send the copy to the queue. + event := e.DeepCopy() + + select { + case b.buffer <- event: + default: + sendErr = fmt.Errorf("audit buffer queue blocked") + return true + } + } + return true +} + +func (b *bufferedBackend) String() string { + return fmt.Sprintf("%s<%s>", PluginName, b.delegateBackend) +} diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/doc.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/doc.go new file mode 100644 index 000000000..a82599e42 --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package buffered provides an implementation for the audit.Backend interface +// that batches incoming audit events and sends batches to the delegate audit.Backend. +package buffered // import "k8s.io/apiserver/plugin/pkg/audit/buffered" diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/log/backend.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/log/backend.go new file mode 100644 index 000000000..2ef2cc6ec --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/log/backend.go @@ -0,0 +1,104 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "fmt" + "io" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/audit" +) + +const ( + // FormatLegacy saves event in 1-line text format. + FormatLegacy = "legacy" + // FormatJson saves event in structured json format. + FormatJson = "json" + + // PluginName is the name of this plugin, to be used in help and logs. + PluginName = "log" +) + +// AllowedFormats are the formats known by log backend. +var AllowedFormats = []string{ + FormatLegacy, + FormatJson, +} + +type backend struct { + out io.Writer + format string + encoder runtime.Encoder +} + +var _ audit.Backend = &backend{} + +func NewBackend(out io.Writer, format string, groupVersion schema.GroupVersion) audit.Backend { + return &backend{ + out: out, + format: format, + encoder: audit.Codecs.LegacyCodec(groupVersion), + } +} + +func (b *backend) ProcessEvents(events ...*auditinternal.Event) bool { + success := true + for _, ev := range events { + success = b.logEvent(ev) && success + } + return success +} + +func (b *backend) logEvent(ev *auditinternal.Event) bool { + line := "" + switch b.format { + case FormatLegacy: + line = audit.EventString(ev) + "\n" + case FormatJson: + bs, err := runtime.Encode(b.encoder, ev) + if err != nil { + audit.HandlePluginError(PluginName, err, ev) + return false + } + line = string(bs[:]) + default: + audit.HandlePluginError(PluginName, fmt.Errorf("log format %q is not in list of known formats (%s)", + b.format, strings.Join(AllowedFormats, ",")), ev) + return false + } + if _, err := fmt.Fprint(b.out, line); err != nil { + audit.HandlePluginError(PluginName, err, ev) + return false + } + return true +} + +func (b *backend) Run(stopCh <-chan struct{}) error { + return nil +} + +func (b *backend) Shutdown() { + // Nothing to do here. +} + +func (b *backend) String() string { + return PluginName +} diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/doc.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/doc.go new file mode 100644 index 000000000..9392ac314 --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package truncate provides an implementation for the audit.Backend interface +// that truncates audit events and sends them to the delegate audit.Backend. +package truncate // import "k8s.io/apiserver/plugin/pkg/audit/truncate" diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/truncate.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/truncate.go new file mode 100644 index 000000000..de1c2d9f7 --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/truncate.go @@ -0,0 +1,160 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package truncate + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/audit" +) + +const ( + // PluginName is the name reported in error metrics. + PluginName = "truncate" + + // annotationKey defines the name of the annotation used to indicate truncation. + annotationKey = "audit.k8s.io/truncated" + // annotationValue defines the value of the annotation used to indicate truncation. + annotationValue = "true" +) + +// Config represents truncating backend configuration. +type Config struct { + // MaxEventSize defines max allowed size of the event. If the event is larger, + // truncating will be performed. + MaxEventSize int64 + + // MaxBatchSize defined max allowed size of the batch of events, passed to the backend. + // If the total size of the batch is larger than this number, batch will be split. Actual + // size of the serialized request might be slightly higher, on the order of hundreds of bytes. + MaxBatchSize int64 +} + +type backend struct { + // The delegate backend that actually exports events. + delegateBackend audit.Backend + + // Configuration used for truncation. + c Config + + // Encoder used to calculate audit event sizes. + e runtime.Encoder +} + +var _ audit.Backend = &backend{} + +// NewBackend returns a new truncating backend, using configuration passed in the parameters. +// Truncate backend automatically runs and shut downs the delegate backend. +func NewBackend(delegateBackend audit.Backend, config Config, groupVersion schema.GroupVersion) audit.Backend { + return &backend{ + delegateBackend: delegateBackend, + c: config, + e: audit.Codecs.LegacyCodec(groupVersion), + } +} + +func (b *backend) ProcessEvents(events ...*auditinternal.Event) bool { + var errors []error + var impacted []*auditinternal.Event + var batch []*auditinternal.Event + var batchSize int64 + success := true + for _, event := range events { + size, err := b.calcSize(event) + // If event was correctly serialized, but the size is more than allowed + // and it makes sense to do trimming, i.e. there's a request and/or + // response present, try to strip away request and response. + if err == nil && size > b.c.MaxEventSize && event.Level.GreaterOrEqual(auditinternal.LevelRequest) { + event = truncate(event) + size, err = b.calcSize(event) + } + if err != nil { + errors = append(errors, err) + impacted = append(impacted, event) + continue + } + if size > b.c.MaxEventSize { + errors = append(errors, fmt.Errorf("event is too large even after truncating")) + impacted = append(impacted, event) + continue + } + + if len(batch) > 0 && batchSize+size > b.c.MaxBatchSize { + success = b.delegateBackend.ProcessEvents(batch...) && success + batch = []*auditinternal.Event{} + batchSize = 0 + } + + batchSize += size + batch = append(batch, event) + } + + if len(batch) > 0 { + success = b.delegateBackend.ProcessEvents(batch...) && success + } + + if len(impacted) > 0 { + audit.HandlePluginError(PluginName, utilerrors.NewAggregate(errors), impacted...) + } + return success +} + +// truncate removed request and response objects from the audit events, +// to try and keep at least metadata. +func truncate(e *auditinternal.Event) *auditinternal.Event { + // Make a shallow copy to avoid copying response/request objects. + newEvent := &auditinternal.Event{} + *newEvent = *e + + newEvent.RequestObject = nil + newEvent.ResponseObject = nil + audit.LogAnnotation(newEvent, annotationKey, annotationValue) + return newEvent +} + +func (b *backend) Run(stopCh <-chan struct{}) error { + return b.delegateBackend.Run(stopCh) +} + +func (b *backend) Shutdown() { + b.delegateBackend.Shutdown() +} + +func (b *backend) calcSize(e *auditinternal.Event) (int64, error) { + s := &sizer{} + if err := b.e.Encode(e, s); err != nil { + return 0, err + } + return s.Size, nil +} + +func (b *backend) String() string { + return fmt.Sprintf("%s<%s>", PluginName, b.delegateBackend) +} + +type sizer struct { + Size int64 +} + +func (s *sizer) Write(p []byte) (n int, err error) { + s.Size += int64(len(p)) + return len(p), nil +} diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/webhook/webhook.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/webhook/webhook.go new file mode 100644 index 000000000..0a2aa7078 --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/webhook/webhook.go @@ -0,0 +1,139 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package webhook implements the audit.Backend interface using HTTP webhooks. +package webhook + +import ( + "context" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/runtime/schema" + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/wait" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/apis/audit/install" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/util/webhook" + "k8s.io/client-go/rest" + utiltrace "k8s.io/utils/trace" +) + +const ( + // PluginName is the name of this plugin, to be used in help and logs. + PluginName = "webhook" + + // DefaultInitialBackoffDelay is the default amount of time to wait before + // retrying sending audit events through a webhook. + DefaultInitialBackoffDelay = 10 * time.Second +) + +func init() { + install.Install(audit.Scheme) +} + +// retryOnError enforces the webhook client to retry requests +// on error regardless of its nature. +// The default implementation considers a very limited set of +// 'retriable' errors, assuming correct use of HTTP codes by +// external webhooks. +// That may easily lead to dropped audit events. In fact, there is +// hardly any error that could be a justified reason NOT to retry +// sending audit events if there is even a slight chance that the +// receiving service gets back to normal at some point. +func retryOnError(err error) bool { + if err != nil { + return true + } + return false +} + +func loadWebhook(configFile string, groupVersion schema.GroupVersion, retryBackoff wait.Backoff, customDial utilnet.DialFunc) (*webhook.GenericWebhook, error) { + w, err := webhook.NewGenericWebhook(audit.Scheme, audit.Codecs, configFile, + []schema.GroupVersion{groupVersion}, retryBackoff, customDial) + if err != nil { + return nil, err + } + + w.ShouldRetry = retryOnError + return w, nil +} + +type backend struct { + w *webhook.GenericWebhook + name string +} + +// NewDynamicBackend returns an audit backend configured from a REST client that +// sends events over HTTP to an external service. +func NewDynamicBackend(rc *rest.RESTClient, retryBackoff wait.Backoff) audit.Backend { + return &backend{ + w: &webhook.GenericWebhook{ + RestClient: rc, + RetryBackoff: retryBackoff, + ShouldRetry: retryOnError, + }, + name: fmt.Sprintf("dynamic_%s", PluginName), + } +} + +// NewBackend returns an audit backend that sends events over HTTP to an external service. +func NewBackend(kubeConfigFile string, groupVersion schema.GroupVersion, retryBackoff wait.Backoff, customDial utilnet.DialFunc) (audit.Backend, error) { + w, err := loadWebhook(kubeConfigFile, groupVersion, retryBackoff, customDial) + if err != nil { + return nil, err + } + return &backend{w: w, name: PluginName}, nil +} + +func (b *backend) Run(stopCh <-chan struct{}) error { + return nil +} + +func (b *backend) Shutdown() { + // nothing to do here +} + +func (b *backend) ProcessEvents(ev ...*auditinternal.Event) bool { + if err := b.processEvents(ev...); err != nil { + audit.HandlePluginError(b.String(), err, ev...) + return false + } + return true +} + +func (b *backend) processEvents(ev ...*auditinternal.Event) error { + var list auditinternal.EventList + for _, e := range ev { + list.Items = append(list.Items, *e) + } + return b.w.WithExponentialBackoff(context.Background(), func() rest.Result { + trace := utiltrace.New("Call Audit Events webhook", + utiltrace.Field{"name", b.name}, + utiltrace.Field{"event-count", len(list.Items)}) + // Only log audit webhook traces that exceed a 25ms per object limit plus a 50ms + // request overhead allowance. The high per object limit used here is primarily to + // allow enough time for the serialization/deserialization of audit events, which + // contain nested request and response objects plus additional event fields. + defer trace.LogIfLong(time.Duration(50+25*len(list.Items)) * time.Millisecond) + return b.w.RestClient.Post().Body(&list).Do(context.TODO()) + }).Error() +} + +func (b *backend) String() string { + return b.name +} diff --git a/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go b/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go index b04c6a215..d4bf1b45a 100644 --- a/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go +++ b/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/util/webhook" @@ -37,7 +38,11 @@ import ( "k8s.io/klog/v2" ) -const retryBackoff = 500 * time.Millisecond +// DefaultRetryBackoff returns the default backoff parameters for webhook retry. +func DefaultRetryBackoff() *wait.Backoff { + backoff := webhook.DefaultRetryBackoffWithInitialDelay(500 * time.Millisecond) + return &backoff +} // Ensure WebhookTokenAuthenticator implements the authenticator.Token interface. var _ authenticator.Token = (*WebhookTokenAuthenticator)(nil) @@ -47,16 +52,16 @@ type tokenReviewer interface { } type WebhookTokenAuthenticator struct { - tokenReview tokenReviewer - initialBackoff time.Duration - implicitAuds authenticator.Audiences + tokenReview tokenReviewer + retryBackoff wait.Backoff + implicitAuds authenticator.Audiences } // NewFromInterface creates a webhook authenticator using the given tokenReview // client. It is recommend to wrap this authenticator with the token cache // authenticator implemented in // k8s.io/apiserver/pkg/authentication/token/cache. -func NewFromInterface(tokenReview authenticationv1client.TokenReviewInterface, implicitAuds authenticator.Audiences) (*WebhookTokenAuthenticator, error) { +func NewFromInterface(tokenReview authenticationv1client.TokenReviewInterface, implicitAuds authenticator.Audiences, retryBackoff wait.Backoff) (*WebhookTokenAuthenticator, error) { return newWithBackoff(tokenReview, retryBackoff, implicitAuds) } @@ -64,8 +69,8 @@ func NewFromInterface(tokenReview authenticationv1client.TokenReviewInterface, i // file. It is recommend to wrap this authenticator with the token cache // authenticator implemented in // k8s.io/apiserver/pkg/authentication/token/cache. -func New(kubeConfigFile string, version string, implicitAuds authenticator.Audiences, customDial utilnet.DialFunc) (*WebhookTokenAuthenticator, error) { - tokenReview, err := tokenReviewInterfaceFromKubeconfig(kubeConfigFile, version, customDial) +func New(kubeConfigFile string, version string, implicitAuds authenticator.Audiences, retryBackoff wait.Backoff, customDial utilnet.DialFunc) (*WebhookTokenAuthenticator, error) { + tokenReview, err := tokenReviewInterfaceFromKubeconfig(kubeConfigFile, version, retryBackoff, customDial) if err != nil { return nil, err } @@ -73,8 +78,8 @@ func New(kubeConfigFile string, version string, implicitAuds authenticator.Audie } // newWithBackoff allows tests to skip the sleep. -func newWithBackoff(tokenReview tokenReviewer, initialBackoff time.Duration, implicitAuds authenticator.Audiences) (*WebhookTokenAuthenticator, error) { - return &WebhookTokenAuthenticator{tokenReview, initialBackoff, implicitAuds}, nil +func newWithBackoff(tokenReview tokenReviewer, retryBackoff wait.Backoff, implicitAuds authenticator.Audiences) (*WebhookTokenAuthenticator, error) { + return &WebhookTokenAuthenticator{tokenReview, retryBackoff, implicitAuds}, nil } // AuthenticateToken implements the authenticator.Token interface. @@ -102,7 +107,7 @@ func (w *WebhookTokenAuthenticator) AuthenticateToken(ctx context.Context, token err error auds authenticator.Audiences ) - webhook.WithExponentialBackoff(ctx, w.initialBackoff, func() error { + webhook.WithExponentialBackoff(ctx, w.retryBackoff, func() error { result, err = w.tokenReview.Create(ctx, r, metav1.CreateOptions{}) return err }, webhook.DefaultShouldRetry) @@ -154,7 +159,7 @@ func (w *WebhookTokenAuthenticator) AuthenticateToken(ctx context.Context, token // tokenReviewInterfaceFromKubeconfig builds a client from the specified kubeconfig file, // and returns a TokenReviewInterface that uses that client. Note that the client submits TokenReview // requests to the exact path specified in the kubeconfig file, so arbitrary non-API servers can be targeted. -func tokenReviewInterfaceFromKubeconfig(kubeConfigFile string, version string, customDial utilnet.DialFunc) (tokenReviewer, error) { +func tokenReviewInterfaceFromKubeconfig(kubeConfigFile string, version string, retryBackoff wait.Backoff, customDial utilnet.DialFunc) (tokenReviewer, error) { localScheme := runtime.NewScheme() if err := scheme.AddToScheme(localScheme); err != nil { return nil, err @@ -166,7 +171,7 @@ func tokenReviewInterfaceFromKubeconfig(kubeConfigFile string, version string, c if err := localScheme.SetVersionPriority(groupVersions...); err != nil { return nil, err } - gw, err := webhook.NewGenericWebhook(localScheme, scheme.Codecs, kubeConfigFile, groupVersions, 0, customDial) + gw, err := webhook.NewGenericWebhook(localScheme, scheme.Codecs, kubeConfigFile, groupVersions, retryBackoff, customDial) if err != nil { return nil, err } @@ -177,7 +182,7 @@ func tokenReviewInterfaceFromKubeconfig(kubeConfigFile string, version string, c if err := localScheme.SetVersionPriority(groupVersions...); err != nil { return nil, err } - gw, err := webhook.NewGenericWebhook(localScheme, scheme.Codecs, kubeConfigFile, groupVersions, 0, customDial) + gw, err := webhook.NewGenericWebhook(localScheme, scheme.Codecs, kubeConfigFile, groupVersions, retryBackoff, customDial) if err != nil { return nil, err } diff --git a/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go b/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go index d7f4f631e..5c9f28ad4 100644 --- a/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go +++ b/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go @@ -32,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/cache" utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/util/webhook" @@ -40,11 +41,16 @@ import ( ) const ( - retryBackoff = 500 * time.Millisecond // The maximum length of requester-controlled attributes to allow caching. maxControlledAttrCacheSize = 10000 ) +// DefaultRetryBackoff returns the default backoff parameters for webhook retry. +func DefaultRetryBackoff() *wait.Backoff { + backoff := webhook.DefaultRetryBackoffWithInitialDelay(500 * time.Millisecond) + return &backoff +} + // Ensure Webhook implements the authorizer.Authorizer interface. var _ authorizer.Authorizer = (*WebhookAuthorizer)(nil) @@ -57,12 +63,12 @@ type WebhookAuthorizer struct { responseCache *cache.LRUExpireCache authorizedTTL time.Duration unauthorizedTTL time.Duration - initialBackoff time.Duration + retryBackoff wait.Backoff decisionOnError authorizer.Decision } // NewFromInterface creates a WebhookAuthorizer using the given subjectAccessReview client -func NewFromInterface(subjectAccessReview authorizationv1client.SubjectAccessReviewInterface, authorizedTTL, unauthorizedTTL time.Duration) (*WebhookAuthorizer, error) { +func NewFromInterface(subjectAccessReview authorizationv1client.SubjectAccessReviewInterface, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff) (*WebhookAuthorizer, error) { return newWithBackoff(subjectAccessReview, authorizedTTL, unauthorizedTTL, retryBackoff) } @@ -85,8 +91,8 @@ func NewFromInterface(subjectAccessReview authorizationv1client.SubjectAccessRev // // For additional HTTP configuration, refer to the kubeconfig documentation // https://kubernetes.io/docs/user-guide/kubeconfig-file/. -func New(kubeConfigFile string, version string, authorizedTTL, unauthorizedTTL time.Duration, customDial utilnet.DialFunc) (*WebhookAuthorizer, error) { - subjectAccessReview, err := subjectAccessReviewInterfaceFromKubeconfig(kubeConfigFile, version, customDial) +func New(kubeConfigFile string, version string, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff, customDial utilnet.DialFunc) (*WebhookAuthorizer, error) { + subjectAccessReview, err := subjectAccessReviewInterfaceFromKubeconfig(kubeConfigFile, version, retryBackoff, customDial) if err != nil { return nil, err } @@ -94,13 +100,13 @@ func New(kubeConfigFile string, version string, authorizedTTL, unauthorizedTTL t } // newWithBackoff allows tests to skip the sleep. -func newWithBackoff(subjectAccessReview subjectAccessReviewer, authorizedTTL, unauthorizedTTL, initialBackoff time.Duration) (*WebhookAuthorizer, error) { +func newWithBackoff(subjectAccessReview subjectAccessReviewer, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff) (*WebhookAuthorizer, error) { return &WebhookAuthorizer{ subjectAccessReview: subjectAccessReview, responseCache: cache.NewLRUExpireCache(8192), authorizedTTL: authorizedTTL, unauthorizedTTL: unauthorizedTTL, - initialBackoff: initialBackoff, + retryBackoff: retryBackoff, decisionOnError: authorizer.DecisionNoOpinion, }, nil } @@ -190,7 +196,7 @@ func (w *WebhookAuthorizer) Authorize(ctx context.Context, attr authorizer.Attri result *authorizationv1.SubjectAccessReview err error ) - webhook.WithExponentialBackoff(ctx, w.initialBackoff, func() error { + webhook.WithExponentialBackoff(ctx, w.retryBackoff, func() error { result, err = w.subjectAccessReview.Create(ctx, r, metav1.CreateOptions{}) return err }, webhook.DefaultShouldRetry) @@ -246,7 +252,7 @@ func convertToSARExtra(extra map[string][]string) map[string]authorizationv1.Ext // subjectAccessReviewInterfaceFromKubeconfig builds a client from the specified kubeconfig file, // and returns a SubjectAccessReviewInterface that uses that client. Note that the client submits SubjectAccessReview // requests to the exact path specified in the kubeconfig file, so arbitrary non-API servers can be targeted. -func subjectAccessReviewInterfaceFromKubeconfig(kubeConfigFile string, version string, customDial utilnet.DialFunc) (subjectAccessReviewer, error) { +func subjectAccessReviewInterfaceFromKubeconfig(kubeConfigFile string, version string, retryBackoff wait.Backoff, customDial utilnet.DialFunc) (subjectAccessReviewer, error) { localScheme := runtime.NewScheme() if err := scheme.AddToScheme(localScheme); err != nil { return nil, err @@ -258,7 +264,7 @@ func subjectAccessReviewInterfaceFromKubeconfig(kubeConfigFile string, version s if err := localScheme.SetVersionPriority(groupVersions...); err != nil { return nil, err } - gw, err := webhook.NewGenericWebhook(localScheme, scheme.Codecs, kubeConfigFile, groupVersions, 0, customDial) + gw, err := webhook.NewGenericWebhook(localScheme, scheme.Codecs, kubeConfigFile, groupVersions, retryBackoff, customDial) if err != nil { return nil, err } @@ -269,7 +275,7 @@ func subjectAccessReviewInterfaceFromKubeconfig(kubeConfigFile string, version s if err := localScheme.SetVersionPriority(groupVersions...); err != nil { return nil, err } - gw, err := webhook.NewGenericWebhook(localScheme, scheme.Codecs, kubeConfigFile, groupVersions, 0, customDial) + gw, err := webhook.NewGenericWebhook(localScheme, scheme.Codecs, kubeConfigFile, groupVersions, retryBackoff, customDial) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go index 6c8e87e23..57404e0b2 100644 --- a/vendor/k8s.io/client-go/discovery/discovery_client.go +++ b/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -501,7 +501,7 @@ func NewDiscoveryClientForConfigOrDie(c *restclient.Config) *DiscoveryClient { } -// NewDiscoveryClient returns a new DiscoveryClient for the given RESTClient. +// NewDiscoveryClient returns a new DiscoveryClient for the given RESTClient. func NewDiscoveryClient(c restclient.Interface) *DiscoveryClient { return &DiscoveryClient{restClient: c, LegacyPrefix: "/api"} } diff --git a/vendor/k8s.io/client-go/informers/settings/interface.go b/vendor/k8s.io/client-go/informers/apiserverinternal/interface.go similarity index 94% rename from vendor/k8s.io/client-go/informers/settings/interface.go rename to vendor/k8s.io/client-go/informers/apiserverinternal/interface.go index d91e49867..122c03099 100644 --- a/vendor/k8s.io/client-go/informers/settings/interface.go +++ b/vendor/k8s.io/client-go/informers/apiserverinternal/interface.go @@ -16,11 +16,11 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package settings +package apiserverinternal import ( + v1alpha1 "k8s.io/client-go/informers/apiserverinternal/v1alpha1" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - v1alpha1 "k8s.io/client-go/informers/settings/v1alpha1" ) // Interface provides access to each of this group's versions. diff --git a/vendor/k8s.io/client-go/informers/settings/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/interface.go similarity index 80% rename from vendor/k8s.io/client-go/informers/settings/v1alpha1/interface.go rename to vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/interface.go index 250220469..9778325c6 100644 --- a/vendor/k8s.io/client-go/informers/settings/v1alpha1/interface.go +++ b/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/interface.go @@ -24,8 +24,8 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { - // PodPresets returns a PodPresetInformer. - PodPresets() PodPresetInformer + // StorageVersions returns a StorageVersionInformer. + StorageVersions() StorageVersionInformer } type version struct { @@ -39,7 +39,7 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// PodPresets returns a PodPresetInformer. -func (v *version) PodPresets() PodPresetInformer { - return &podPresetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +// StorageVersions returns a StorageVersionInformer. +func (v *version) StorageVersions() StorageVersionInformer { + return &storageVersionInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/storageversion.go new file mode 100644 index 000000000..34175b522 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/storageversion.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/apiserverinternal/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// StorageVersionInformer provides access to a shared informer and lister for +// StorageVersions. +type StorageVersionInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.StorageVersionLister +} + +type storageVersionInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewStorageVersionInformer constructs a new informer for StorageVersion type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewStorageVersionInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredStorageVersionInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredStorageVersionInformer constructs a new informer for StorageVersion type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredStorageVersionInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.InternalV1alpha1().StorageVersions().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.InternalV1alpha1().StorageVersions().Watch(context.TODO(), options) + }, + }, + &apiserverinternalv1alpha1.StorageVersion{}, + resyncPeriod, + indexers, + ) +} + +func (f *storageVersionInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredStorageVersionInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *storageVersionInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiserverinternalv1alpha1.StorageVersion{}, f.defaultInformer) +} + +func (f *storageVersionInformer) Lister() v1alpha1.StorageVersionLister { + return v1alpha1.NewStorageVersionLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/factory.go b/vendor/k8s.io/client-go/informers/factory.go index 5c17f81f9..c6e102699 100644 --- a/vendor/k8s.io/client-go/informers/factory.go +++ b/vendor/k8s.io/client-go/informers/factory.go @@ -27,6 +27,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" admissionregistration "k8s.io/client-go/informers/admissionregistration" + apiserverinternal "k8s.io/client-go/informers/apiserverinternal" apps "k8s.io/client-go/informers/apps" autoscaling "k8s.io/client-go/informers/autoscaling" batch "k8s.io/client-go/informers/batch" @@ -43,7 +44,6 @@ import ( policy "k8s.io/client-go/informers/policy" rbac "k8s.io/client-go/informers/rbac" scheduling "k8s.io/client-go/informers/scheduling" - settings "k8s.io/client-go/informers/settings" storage "k8s.io/client-go/informers/storage" kubernetes "k8s.io/client-go/kubernetes" cache "k8s.io/client-go/tools/cache" @@ -190,6 +190,7 @@ type SharedInformerFactory interface { WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool Admissionregistration() admissionregistration.Interface + Internal() apiserverinternal.Interface Apps() apps.Interface Autoscaling() autoscaling.Interface Batch() batch.Interface @@ -205,7 +206,6 @@ type SharedInformerFactory interface { Policy() policy.Interface Rbac() rbac.Interface Scheduling() scheduling.Interface - Settings() settings.Interface Storage() storage.Interface } @@ -213,6 +213,10 @@ func (f *sharedInformerFactory) Admissionregistration() admissionregistration.In return admissionregistration.New(f, f.namespace, f.tweakListOptions) } +func (f *sharedInformerFactory) Internal() apiserverinternal.Interface { + return apiserverinternal.New(f, f.namespace, f.tweakListOptions) +} + func (f *sharedInformerFactory) Apps() apps.Interface { return apps.New(f, f.namespace, f.tweakListOptions) } @@ -273,10 +277,6 @@ func (f *sharedInformerFactory) Scheduling() scheduling.Interface { return scheduling.New(f, f.namespace, f.tweakListOptions) } -func (f *sharedInformerFactory) Settings() settings.Interface { - return settings.New(f, f.namespace, f.tweakListOptions) -} - func (f *sharedInformerFactory) Storage() storage.Interface { return storage.New(f, f.namespace, f.tweakListOptions) } diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/interface.go b/vendor/k8s.io/client-go/informers/flowcontrol/interface.go index 27e68efe8..b04ca59d3 100644 --- a/vendor/k8s.io/client-go/informers/flowcontrol/interface.go +++ b/vendor/k8s.io/client-go/informers/flowcontrol/interface.go @@ -20,6 +20,7 @@ package flowcontrol import ( v1alpha1 "k8s.io/client-go/informers/flowcontrol/v1alpha1" + v1beta1 "k8s.io/client-go/informers/flowcontrol/v1beta1" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" ) @@ -27,6 +28,8 @@ import ( type Interface interface { // V1alpha1 provides access to shared informers for resources in V1alpha1. V1alpha1() v1alpha1.Interface + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface } type group struct { @@ -44,3 +47,8 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList func (g *group) V1alpha1() v1alpha1.Interface { return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) } + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/flowschema.go similarity index 51% rename from vendor/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go rename to vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/flowschema.go index 8c10b16c8..13f4ff093 100644 --- a/vendor/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/flowschema.go @@ -16,75 +16,74 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1alpha1 +package v1beta1 import ( "context" time "time" - settingsv1alpha1 "k8s.io/api/settings/v1alpha1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/settings/v1alpha1" + v1beta1 "k8s.io/client-go/listers/flowcontrol/v1beta1" cache "k8s.io/client-go/tools/cache" ) -// PodPresetInformer provides access to a shared informer and lister for -// PodPresets. -type PodPresetInformer interface { +// FlowSchemaInformer provides access to a shared informer and lister for +// FlowSchemas. +type FlowSchemaInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.PodPresetLister + Lister() v1beta1.FlowSchemaLister } -type podPresetInformer struct { +type flowSchemaInformer struct { factory internalinterfaces.SharedInformerFactory tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string } -// NewPodPresetInformer constructs a new informer for PodPreset type. +// NewFlowSchemaInformer constructs a new informer for FlowSchema type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewPodPresetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredPodPresetInformer(client, namespace, resyncPeriod, indexers, nil) +func NewFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredFlowSchemaInformer(client, resyncPeriod, indexers, nil) } -// NewFilteredPodPresetInformer constructs a new informer for PodPreset type. +// NewFilteredFlowSchemaInformer constructs a new informer for FlowSchema type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewFilteredPodPresetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { +func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.SettingsV1alpha1().PodPresets(namespace).List(context.TODO(), options) + return client.FlowcontrolV1beta1().FlowSchemas().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.SettingsV1alpha1().PodPresets(namespace).Watch(context.TODO(), options) + return client.FlowcontrolV1beta1().FlowSchemas().Watch(context.TODO(), options) }, }, - &settingsv1alpha1.PodPreset{}, + &flowcontrolv1beta1.FlowSchema{}, resyncPeriod, indexers, ) } -func (f *podPresetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredPodPresetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +func (f *flowSchemaInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredFlowSchemaInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } -func (f *podPresetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&settingsv1alpha1.PodPreset{}, f.defaultInformer) +func (f *flowSchemaInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&flowcontrolv1beta1.FlowSchema{}, f.defaultInformer) } -func (f *podPresetInformer) Lister() v1alpha1.PodPresetLister { - return v1alpha1.NewPodPresetLister(f.Informer().GetIndexer()) +func (f *flowSchemaInformer) Lister() v1beta1.FlowSchemaLister { + return v1beta1.NewFlowSchemaLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/interface.go new file mode 100644 index 000000000..50329bb0a --- /dev/null +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/interface.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // FlowSchemas returns a FlowSchemaInformer. + FlowSchemas() FlowSchemaInformer + // PriorityLevelConfigurations returns a PriorityLevelConfigurationInformer. + PriorityLevelConfigurations() PriorityLevelConfigurationInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// FlowSchemas returns a FlowSchemaInformer. +func (v *version) FlowSchemas() FlowSchemaInformer { + return &flowSchemaInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// PriorityLevelConfigurations returns a PriorityLevelConfigurationInformer. +func (v *version) PriorityLevelConfigurations() PriorityLevelConfigurationInformer { + return &priorityLevelConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/prioritylevelconfiguration.go new file mode 100644 index 000000000..fa4835906 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/prioritylevelconfiguration.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + time "time" + + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/flowcontrol/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// PriorityLevelConfigurationInformer provides access to a shared informer and lister for +// PriorityLevelConfigurations. +type PriorityLevelConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.PriorityLevelConfigurationLister +} + +type priorityLevelConfigurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewPriorityLevelConfigurationInformer constructs a new informer for PriorityLevelConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPriorityLevelConfigurationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredPriorityLevelConfigurationInformer constructs a new informer for PriorityLevelConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.FlowcontrolV1beta1().PriorityLevelConfigurations().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.FlowcontrolV1beta1().PriorityLevelConfigurations().Watch(context.TODO(), options) + }, + }, + &flowcontrolv1beta1.PriorityLevelConfiguration{}, + resyncPeriod, + indexers, + ) +} + +func (f *priorityLevelConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPriorityLevelConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *priorityLevelConfigurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&flowcontrolv1beta1.PriorityLevelConfiguration{}, f.defaultInformer) +} + +func (f *priorityLevelConfigurationInformer) Lister() v1beta1.PriorityLevelConfigurationLister { + return v1beta1.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/generic.go b/vendor/k8s.io/client-go/informers/generic.go index f67c64ac8..2bc451095 100644 --- a/vendor/k8s.io/client-go/informers/generic.go +++ b/vendor/k8s.io/client-go/informers/generic.go @@ -23,6 +23,7 @@ import ( v1 "k8s.io/api/admissionregistration/v1" v1beta1 "k8s.io/api/admissionregistration/v1beta1" + apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" v1beta2 "k8s.io/api/apps/v1beta2" @@ -43,8 +44,10 @@ import ( eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" networkingv1 "k8s.io/api/networking/v1" networkingv1beta1 "k8s.io/api/networking/v1beta1" + nodev1 "k8s.io/api/node/v1" nodev1alpha1 "k8s.io/api/node/v1alpha1" nodev1beta1 "k8s.io/api/node/v1beta1" policyv1beta1 "k8s.io/api/policy/v1beta1" @@ -54,7 +57,6 @@ import ( schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" - settingsv1alpha1 "k8s.io/api/settings/v1alpha1" storagev1 "k8s.io/api/storage/v1" storagev1alpha1 "k8s.io/api/storage/v1alpha1" storagev1beta1 "k8s.io/api/storage/v1beta1" @@ -242,6 +244,16 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case flowcontrolv1alpha1.SchemeGroupVersion.WithResource("prioritylevelconfigurations"): return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1alpha1().PriorityLevelConfigurations().Informer()}, nil + // Group=flowcontrol.apiserver.k8s.io, Version=v1beta1 + case flowcontrolv1beta1.SchemeGroupVersion.WithResource("flowschemas"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1beta1().FlowSchemas().Informer()}, nil + case flowcontrolv1beta1.SchemeGroupVersion.WithResource("prioritylevelconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1beta1().PriorityLevelConfigurations().Informer()}, nil + + // Group=internal.apiserver.k8s.io, Version=v1alpha1 + case apiserverinternalv1alpha1.SchemeGroupVersion.WithResource("storageversions"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Internal().V1alpha1().StorageVersions().Informer()}, nil + // Group=networking.k8s.io, Version=v1 case networkingv1.SchemeGroupVersion.WithResource("ingresses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1().Ingresses().Informer()}, nil @@ -256,6 +268,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case networkingv1beta1.SchemeGroupVersion.WithResource("ingressclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().IngressClasses().Informer()}, nil + // Group=node.k8s.io, Version=v1 + case nodev1.SchemeGroupVersion.WithResource("runtimeclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Node().V1().RuntimeClasses().Informer()}, nil + // Group=node.k8s.io, Version=v1alpha1 case nodev1alpha1.SchemeGroupVersion.WithResource("runtimeclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Node().V1alpha1().RuntimeClasses().Informer()}, nil @@ -312,10 +328,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case schedulingv1beta1.SchemeGroupVersion.WithResource("priorityclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1beta1().PriorityClasses().Informer()}, nil - // Group=settings.k8s.io, Version=v1alpha1 - case settingsv1alpha1.SchemeGroupVersion.WithResource("podpresets"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Settings().V1alpha1().PodPresets().Informer()}, nil - // Group=storage.k8s.io, Version=v1 case storagev1.SchemeGroupVersion.WithResource("csidrivers"): return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1().CSIDrivers().Informer()}, nil diff --git a/vendor/k8s.io/client-go/informers/node/interface.go b/vendor/k8s.io/client-go/informers/node/interface.go index 977369379..61ed5af76 100644 --- a/vendor/k8s.io/client-go/informers/node/interface.go +++ b/vendor/k8s.io/client-go/informers/node/interface.go @@ -20,12 +20,15 @@ package node import ( internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + v1 "k8s.io/client-go/informers/node/v1" v1alpha1 "k8s.io/client-go/informers/node/v1alpha1" v1beta1 "k8s.io/client-go/informers/node/v1beta1" ) // Interface provides access to each of this group's versions. type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface // V1alpha1 provides access to shared informers for resources in V1alpha1. V1alpha1() v1alpha1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. @@ -43,6 +46,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} + // V1alpha1 returns a new v1alpha1.Interface. func (g *group) V1alpha1() v1alpha1.Interface { return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) diff --git a/vendor/k8s.io/client-go/informers/node/v1/interface.go b/vendor/k8s.io/client-go/informers/node/v1/interface.go new file mode 100644 index 000000000..913fec4ac --- /dev/null +++ b/vendor/k8s.io/client-go/informers/node/v1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // RuntimeClasses returns a RuntimeClassInformer. + RuntimeClasses() RuntimeClassInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// RuntimeClasses returns a RuntimeClassInformer. +func (v *version) RuntimeClasses() RuntimeClassInformer { + return &runtimeClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/informers/node/v1/runtimeclass.go new file mode 100644 index 000000000..293f4e2e2 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/node/v1/runtimeclass.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + nodev1 "k8s.io/api/node/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/node/v1" + cache "k8s.io/client-go/tools/cache" +) + +// RuntimeClassInformer provides access to a shared informer and lister for +// RuntimeClasses. +type RuntimeClassInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.RuntimeClassLister +} + +type runtimeClassInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewRuntimeClassInformer constructs a new informer for RuntimeClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRuntimeClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredRuntimeClassInformer constructs a new informer for RuntimeClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NodeV1().RuntimeClasses().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NodeV1().RuntimeClasses().Watch(context.TODO(), options) + }, + }, + &nodev1.RuntimeClass{}, + resyncPeriod, + indexers, + ) +} + +func (f *runtimeClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRuntimeClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *runtimeClassInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&nodev1.RuntimeClass{}, f.defaultInformer) +} + +func (f *runtimeClassInformer) Lister() v1.RuntimeClassLister { + return v1.NewRuntimeClassLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index 064c24d12..f0d54b6a1 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -24,6 +24,7 @@ import ( discovery "k8s.io/client-go/discovery" admissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" + internalv1alpha1 "k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1" appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" @@ -48,8 +49,10 @@ import ( eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" flowcontrolv1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1" + flowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" + nodev1 "k8s.io/client-go/kubernetes/typed/node/v1" nodev1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1" nodev1beta1 "k8s.io/client-go/kubernetes/typed/node/v1beta1" policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" @@ -59,7 +62,6 @@ import ( schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1" schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" - settingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1" storagev1 "k8s.io/client-go/kubernetes/typed/storage/v1" storagev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1" storagev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" @@ -71,6 +73,7 @@ type Interface interface { Discovery() discovery.DiscoveryInterface AdmissionregistrationV1() admissionregistrationv1.AdmissionregistrationV1Interface AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface + InternalV1alpha1() internalv1alpha1.InternalV1alpha1Interface AppsV1() appsv1.AppsV1Interface AppsV1beta1() appsv1beta1.AppsV1beta1Interface AppsV1beta2() appsv1beta2.AppsV1beta2Interface @@ -95,8 +98,10 @@ type Interface interface { EventsV1beta1() eventsv1beta1.EventsV1beta1Interface ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha1Interface + FlowcontrolV1beta1() flowcontrolv1beta1.FlowcontrolV1beta1Interface NetworkingV1() networkingv1.NetworkingV1Interface NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface + NodeV1() nodev1.NodeV1Interface NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface NodeV1beta1() nodev1beta1.NodeV1beta1Interface PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface @@ -106,7 +111,6 @@ type Interface interface { SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface SchedulingV1() schedulingv1.SchedulingV1Interface - SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface StorageV1beta1() storagev1beta1.StorageV1beta1Interface StorageV1() storagev1.StorageV1Interface StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface @@ -118,6 +122,7 @@ type Clientset struct { *discovery.DiscoveryClient admissionregistrationV1 *admissionregistrationv1.AdmissionregistrationV1Client admissionregistrationV1beta1 *admissionregistrationv1beta1.AdmissionregistrationV1beta1Client + internalV1alpha1 *internalv1alpha1.InternalV1alpha1Client appsV1 *appsv1.AppsV1Client appsV1beta1 *appsv1beta1.AppsV1beta1Client appsV1beta2 *appsv1beta2.AppsV1beta2Client @@ -142,8 +147,10 @@ type Clientset struct { eventsV1beta1 *eventsv1beta1.EventsV1beta1Client extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client flowcontrolV1alpha1 *flowcontrolv1alpha1.FlowcontrolV1alpha1Client + flowcontrolV1beta1 *flowcontrolv1beta1.FlowcontrolV1beta1Client networkingV1 *networkingv1.NetworkingV1Client networkingV1beta1 *networkingv1beta1.NetworkingV1beta1Client + nodeV1 *nodev1.NodeV1Client nodeV1alpha1 *nodev1alpha1.NodeV1alpha1Client nodeV1beta1 *nodev1beta1.NodeV1beta1Client policyV1beta1 *policyv1beta1.PolicyV1beta1Client @@ -153,7 +160,6 @@ type Clientset struct { schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client schedulingV1 *schedulingv1.SchedulingV1Client - settingsV1alpha1 *settingsv1alpha1.SettingsV1alpha1Client storageV1beta1 *storagev1beta1.StorageV1beta1Client storageV1 *storagev1.StorageV1Client storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client @@ -169,6 +175,11 @@ func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1. return c.admissionregistrationV1beta1 } +// InternalV1alpha1 retrieves the InternalV1alpha1Client +func (c *Clientset) InternalV1alpha1() internalv1alpha1.InternalV1alpha1Interface { + return c.internalV1alpha1 +} + // AppsV1 retrieves the AppsV1Client func (c *Clientset) AppsV1() appsv1.AppsV1Interface { return c.appsV1 @@ -289,6 +300,11 @@ func (c *Clientset) FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha return c.flowcontrolV1alpha1 } +// FlowcontrolV1beta1 retrieves the FlowcontrolV1beta1Client +func (c *Clientset) FlowcontrolV1beta1() flowcontrolv1beta1.FlowcontrolV1beta1Interface { + return c.flowcontrolV1beta1 +} + // NetworkingV1 retrieves the NetworkingV1Client func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { return c.networkingV1 @@ -299,6 +315,11 @@ func (c *Clientset) NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Inter return c.networkingV1beta1 } +// NodeV1 retrieves the NodeV1Client +func (c *Clientset) NodeV1() nodev1.NodeV1Interface { + return c.nodeV1 +} + // NodeV1alpha1 retrieves the NodeV1alpha1Client func (c *Clientset) NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface { return c.nodeV1alpha1 @@ -344,11 +365,6 @@ func (c *Clientset) SchedulingV1() schedulingv1.SchedulingV1Interface { return c.schedulingV1 } -// SettingsV1alpha1 retrieves the SettingsV1alpha1Client -func (c *Clientset) SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface { - return c.settingsV1alpha1 -} - // StorageV1beta1 retrieves the StorageV1beta1Client func (c *Clientset) StorageV1beta1() storagev1beta1.StorageV1beta1Interface { return c.storageV1beta1 @@ -393,6 +409,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.internalV1alpha1, err = internalv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.appsV1, err = appsv1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -489,6 +509,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.flowcontrolV1beta1, err = flowcontrolv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.networkingV1, err = networkingv1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -497,6 +521,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.nodeV1, err = nodev1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.nodeV1alpha1, err = nodev1alpha1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -533,10 +561,6 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } - cs.settingsV1alpha1, err = settingsv1alpha1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } cs.storageV1beta1, err = storagev1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -563,6 +587,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset cs.admissionregistrationV1 = admissionregistrationv1.NewForConfigOrDie(c) cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.NewForConfigOrDie(c) + cs.internalV1alpha1 = internalv1alpha1.NewForConfigOrDie(c) cs.appsV1 = appsv1.NewForConfigOrDie(c) cs.appsV1beta1 = appsv1beta1.NewForConfigOrDie(c) cs.appsV1beta2 = appsv1beta2.NewForConfigOrDie(c) @@ -587,8 +612,10 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.eventsV1beta1 = eventsv1beta1.NewForConfigOrDie(c) cs.extensionsV1beta1 = extensionsv1beta1.NewForConfigOrDie(c) cs.flowcontrolV1alpha1 = flowcontrolv1alpha1.NewForConfigOrDie(c) + cs.flowcontrolV1beta1 = flowcontrolv1beta1.NewForConfigOrDie(c) cs.networkingV1 = networkingv1.NewForConfigOrDie(c) cs.networkingV1beta1 = networkingv1beta1.NewForConfigOrDie(c) + cs.nodeV1 = nodev1.NewForConfigOrDie(c) cs.nodeV1alpha1 = nodev1alpha1.NewForConfigOrDie(c) cs.nodeV1beta1 = nodev1beta1.NewForConfigOrDie(c) cs.policyV1beta1 = policyv1beta1.NewForConfigOrDie(c) @@ -598,7 +625,6 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.schedulingV1alpha1 = schedulingv1alpha1.NewForConfigOrDie(c) cs.schedulingV1beta1 = schedulingv1beta1.NewForConfigOrDie(c) cs.schedulingV1 = schedulingv1.NewForConfigOrDie(c) - cs.settingsV1alpha1 = settingsv1alpha1.NewForConfigOrDie(c) cs.storageV1beta1 = storagev1beta1.NewForConfigOrDie(c) cs.storageV1 = storagev1.NewForConfigOrDie(c) cs.storageV1alpha1 = storagev1alpha1.NewForConfigOrDie(c) @@ -612,6 +638,7 @@ func New(c rest.Interface) *Clientset { var cs Clientset cs.admissionregistrationV1 = admissionregistrationv1.New(c) cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.New(c) + cs.internalV1alpha1 = internalv1alpha1.New(c) cs.appsV1 = appsv1.New(c) cs.appsV1beta1 = appsv1beta1.New(c) cs.appsV1beta2 = appsv1beta2.New(c) @@ -636,8 +663,10 @@ func New(c rest.Interface) *Clientset { cs.eventsV1beta1 = eventsv1beta1.New(c) cs.extensionsV1beta1 = extensionsv1beta1.New(c) cs.flowcontrolV1alpha1 = flowcontrolv1alpha1.New(c) + cs.flowcontrolV1beta1 = flowcontrolv1beta1.New(c) cs.networkingV1 = networkingv1.New(c) cs.networkingV1beta1 = networkingv1beta1.New(c) + cs.nodeV1 = nodev1.New(c) cs.nodeV1alpha1 = nodev1alpha1.New(c) cs.nodeV1beta1 = nodev1beta1.New(c) cs.policyV1beta1 = policyv1beta1.New(c) @@ -647,7 +676,6 @@ func New(c rest.Interface) *Clientset { cs.schedulingV1alpha1 = schedulingv1alpha1.New(c) cs.schedulingV1beta1 = schedulingv1beta1.New(c) cs.schedulingV1 = schedulingv1.New(c) - cs.settingsV1alpha1 = settingsv1alpha1.New(c) cs.storageV1beta1 = storagev1beta1.New(c) cs.storageV1 = storagev1.New(c) cs.storageV1alpha1 = storagev1alpha1.New(c) diff --git a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go index 70c4ac6e4..7293844ca 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go +++ b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go @@ -28,6 +28,8 @@ import ( fakeadmissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake" admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" fakeadmissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake" + internalv1alpha1 "k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1" + fakeinternalv1alpha1 "k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake" appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" fakeappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1/fake" appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" @@ -76,10 +78,14 @@ import ( fakeextensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake" flowcontrolv1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1" fakeflowcontrolv1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake" + flowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" + fakeflowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake" networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" fakenetworkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1/fake" networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" fakenetworkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake" + nodev1 "k8s.io/client-go/kubernetes/typed/node/v1" + fakenodev1 "k8s.io/client-go/kubernetes/typed/node/v1/fake" nodev1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1" fakenodev1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake" nodev1beta1 "k8s.io/client-go/kubernetes/typed/node/v1beta1" @@ -98,8 +104,6 @@ import ( fakeschedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake" schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" fakeschedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake" - settingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1" - fakesettingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake" storagev1 "k8s.io/client-go/kubernetes/typed/storage/v1" fakestoragev1 "k8s.io/client-go/kubernetes/typed/storage/v1/fake" storagev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1" @@ -166,6 +170,11 @@ func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1. return &fakeadmissionregistrationv1beta1.FakeAdmissionregistrationV1beta1{Fake: &c.Fake} } +// InternalV1alpha1 retrieves the InternalV1alpha1Client +func (c *Clientset) InternalV1alpha1() internalv1alpha1.InternalV1alpha1Interface { + return &fakeinternalv1alpha1.FakeInternalV1alpha1{Fake: &c.Fake} +} + // AppsV1 retrieves the AppsV1Client func (c *Clientset) AppsV1() appsv1.AppsV1Interface { return &fakeappsv1.FakeAppsV1{Fake: &c.Fake} @@ -286,6 +295,11 @@ func (c *Clientset) FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha return &fakeflowcontrolv1alpha1.FakeFlowcontrolV1alpha1{Fake: &c.Fake} } +// FlowcontrolV1beta1 retrieves the FlowcontrolV1beta1Client +func (c *Clientset) FlowcontrolV1beta1() flowcontrolv1beta1.FlowcontrolV1beta1Interface { + return &fakeflowcontrolv1beta1.FakeFlowcontrolV1beta1{Fake: &c.Fake} +} + // NetworkingV1 retrieves the NetworkingV1Client func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { return &fakenetworkingv1.FakeNetworkingV1{Fake: &c.Fake} @@ -296,6 +310,11 @@ func (c *Clientset) NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Inter return &fakenetworkingv1beta1.FakeNetworkingV1beta1{Fake: &c.Fake} } +// NodeV1 retrieves the NodeV1Client +func (c *Clientset) NodeV1() nodev1.NodeV1Interface { + return &fakenodev1.FakeNodeV1{Fake: &c.Fake} +} + // NodeV1alpha1 retrieves the NodeV1alpha1Client func (c *Clientset) NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface { return &fakenodev1alpha1.FakeNodeV1alpha1{Fake: &c.Fake} @@ -341,11 +360,6 @@ func (c *Clientset) SchedulingV1() schedulingv1.SchedulingV1Interface { return &fakeschedulingv1.FakeSchedulingV1{Fake: &c.Fake} } -// SettingsV1alpha1 retrieves the SettingsV1alpha1Client -func (c *Clientset) SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface { - return &fakesettingsv1alpha1.FakeSettingsV1alpha1{Fake: &c.Fake} -} - // StorageV1beta1 retrieves the StorageV1beta1Client func (c *Clientset) StorageV1beta1() storagev1beta1.StorageV1beta1Interface { return &fakestoragev1beta1.FakeStorageV1beta1{Fake: &c.Fake} diff --git a/vendor/k8s.io/client-go/kubernetes/fake/register.go b/vendor/k8s.io/client-go/kubernetes/fake/register.go index 00e71589b..0e8ab29f5 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/register.go +++ b/vendor/k8s.io/client-go/kubernetes/fake/register.go @@ -21,6 +21,7 @@ package fake import ( admissionregistrationv1 "k8s.io/api/admissionregistration/v1" admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + internalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" @@ -45,8 +46,10 @@ import ( eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" networkingv1 "k8s.io/api/networking/v1" networkingv1beta1 "k8s.io/api/networking/v1beta1" + nodev1 "k8s.io/api/node/v1" nodev1alpha1 "k8s.io/api/node/v1alpha1" nodev1beta1 "k8s.io/api/node/v1beta1" policyv1beta1 "k8s.io/api/policy/v1beta1" @@ -56,7 +59,6 @@ import ( schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" - settingsv1alpha1 "k8s.io/api/settings/v1alpha1" storagev1 "k8s.io/api/storage/v1" storagev1alpha1 "k8s.io/api/storage/v1alpha1" storagev1beta1 "k8s.io/api/storage/v1beta1" @@ -73,6 +75,7 @@ var codecs = serializer.NewCodecFactory(scheme) var localSchemeBuilder = runtime.SchemeBuilder{ admissionregistrationv1.AddToScheme, admissionregistrationv1beta1.AddToScheme, + internalv1alpha1.AddToScheme, appsv1.AddToScheme, appsv1beta1.AddToScheme, appsv1beta2.AddToScheme, @@ -97,8 +100,10 @@ var localSchemeBuilder = runtime.SchemeBuilder{ eventsv1beta1.AddToScheme, extensionsv1beta1.AddToScheme, flowcontrolv1alpha1.AddToScheme, + flowcontrolv1beta1.AddToScheme, networkingv1.AddToScheme, networkingv1beta1.AddToScheme, + nodev1.AddToScheme, nodev1alpha1.AddToScheme, nodev1beta1.AddToScheme, policyv1beta1.AddToScheme, @@ -108,7 +113,6 @@ var localSchemeBuilder = runtime.SchemeBuilder{ schedulingv1alpha1.AddToScheme, schedulingv1beta1.AddToScheme, schedulingv1.AddToScheme, - settingsv1alpha1.AddToScheme, storagev1beta1.AddToScheme, storagev1.AddToScheme, storagev1alpha1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index 2710bf2de..5601e20dd 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -21,6 +21,7 @@ package scheme import ( admissionregistrationv1 "k8s.io/api/admissionregistration/v1" admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + internalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" @@ -45,8 +46,10 @@ import ( eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" networkingv1 "k8s.io/api/networking/v1" networkingv1beta1 "k8s.io/api/networking/v1beta1" + nodev1 "k8s.io/api/node/v1" nodev1alpha1 "k8s.io/api/node/v1alpha1" nodev1beta1 "k8s.io/api/node/v1beta1" policyv1beta1 "k8s.io/api/policy/v1beta1" @@ -56,7 +59,6 @@ import ( schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" - settingsv1alpha1 "k8s.io/api/settings/v1alpha1" storagev1 "k8s.io/api/storage/v1" storagev1alpha1 "k8s.io/api/storage/v1alpha1" storagev1beta1 "k8s.io/api/storage/v1beta1" @@ -73,6 +75,7 @@ var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ admissionregistrationv1.AddToScheme, admissionregistrationv1beta1.AddToScheme, + internalv1alpha1.AddToScheme, appsv1.AddToScheme, appsv1beta1.AddToScheme, appsv1beta2.AddToScheme, @@ -97,8 +100,10 @@ var localSchemeBuilder = runtime.SchemeBuilder{ eventsv1beta1.AddToScheme, extensionsv1beta1.AddToScheme, flowcontrolv1alpha1.AddToScheme, + flowcontrolv1beta1.AddToScheme, networkingv1.AddToScheme, networkingv1beta1.AddToScheme, + nodev1.AddToScheme, nodev1alpha1.AddToScheme, nodev1beta1.AddToScheme, policyv1beta1.AddToScheme, @@ -108,7 +113,6 @@ var localSchemeBuilder = runtime.SchemeBuilder{ schedulingv1alpha1.AddToScheme, schedulingv1beta1.AddToScheme, schedulingv1.AddToScheme, - settingsv1alpha1.AddToScheme, storagev1beta1.AddToScheme, storagev1.AddToScheme, storagev1alpha1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go similarity index 64% rename from vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go index 8d3a8d8e1..e43a9a368 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go @@ -19,27 +19,27 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/settings/v1alpha1" + v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) -type SettingsV1alpha1Interface interface { +type InternalV1alpha1Interface interface { RESTClient() rest.Interface - PodPresetsGetter + StorageVersionsGetter } -// SettingsV1alpha1Client is used to interact with features provided by the settings.k8s.io group. -type SettingsV1alpha1Client struct { +// InternalV1alpha1Client is used to interact with features provided by the internal.apiserver.k8s.io group. +type InternalV1alpha1Client struct { restClient rest.Interface } -func (c *SettingsV1alpha1Client) PodPresets(namespace string) PodPresetInterface { - return newPodPresets(c, namespace) +func (c *InternalV1alpha1Client) StorageVersions() StorageVersionInterface { + return newStorageVersions(c) } -// NewForConfig creates a new SettingsV1alpha1Client for the given config. -func NewForConfig(c *rest.Config) (*SettingsV1alpha1Client, error) { +// NewForConfig creates a new InternalV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*InternalV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -48,12 +48,12 @@ func NewForConfig(c *rest.Config) (*SettingsV1alpha1Client, error) { if err != nil { return nil, err } - return &SettingsV1alpha1Client{client}, nil + return &InternalV1alpha1Client{client}, nil } -// NewForConfigOrDie creates a new SettingsV1alpha1Client for the given config and +// NewForConfigOrDie creates a new InternalV1alpha1Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *SettingsV1alpha1Client { +func NewForConfigOrDie(c *rest.Config) *InternalV1alpha1Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -61,9 +61,9 @@ func NewForConfigOrDie(c *rest.Config) *SettingsV1alpha1Client { return client } -// New creates a new SettingsV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *SettingsV1alpha1Client { - return &SettingsV1alpha1Client{c} +// New creates a new InternalV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *InternalV1alpha1Client { + return &InternalV1alpha1Client{c} } func setConfigDefaults(config *rest.Config) error { @@ -81,7 +81,7 @@ func setConfigDefaults(config *rest.Config) error { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *SettingsV1alpha1Client) RESTClient() rest.Interface { +func (c *InternalV1alpha1Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/doc.go similarity index 100% rename from vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/doc.go diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/doc.go similarity index 100% rename from vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/doc.go diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_settings_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_apiserverinternal_client.go similarity index 75% rename from vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_settings_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_apiserverinternal_client.go index a142edfed..0960a5e81 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_settings_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_apiserverinternal_client.go @@ -19,22 +19,22 @@ limitations under the License. package fake import ( - v1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1" + v1alpha1 "k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1" rest "k8s.io/client-go/rest" testing "k8s.io/client-go/testing" ) -type FakeSettingsV1alpha1 struct { +type FakeInternalV1alpha1 struct { *testing.Fake } -func (c *FakeSettingsV1alpha1) PodPresets(namespace string) v1alpha1.PodPresetInterface { - return &FakePodPresets{c, namespace} +func (c *FakeInternalV1alpha1) StorageVersions() v1alpha1.StorageVersionInterface { + return &FakeStorageVersions{c} } // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *FakeSettingsV1alpha1) RESTClient() rest.Interface { +func (c *FakeInternalV1alpha1) RESTClient() rest.Interface { var ret *rest.RESTClient return ret } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go new file mode 100644 index 000000000..d75049a40 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go @@ -0,0 +1,133 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeStorageVersions implements StorageVersionInterface +type FakeStorageVersions struct { + Fake *FakeInternalV1alpha1 +} + +var storageversionsResource = schema.GroupVersionResource{Group: "internal.apiserver.k8s.io", Version: "v1alpha1", Resource: "storageversions"} + +var storageversionsKind = schema.GroupVersionKind{Group: "internal.apiserver.k8s.io", Version: "v1alpha1", Kind: "StorageVersion"} + +// Get takes name of the storageVersion, and returns the corresponding storageVersion object, and an error if there is any. +func (c *FakeStorageVersions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersion, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(storageversionsResource, name), &v1alpha1.StorageVersion{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.StorageVersion), err +} + +// List takes label and field selectors, and returns the list of StorageVersions that match those selectors. +func (c *FakeStorageVersions) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageVersionList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(storageversionsResource, storageversionsKind, opts), &v1alpha1.StorageVersionList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.StorageVersionList{ListMeta: obj.(*v1alpha1.StorageVersionList).ListMeta} + for _, item := range obj.(*v1alpha1.StorageVersionList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested storageVersions. +func (c *FakeStorageVersions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(storageversionsResource, opts)) +} + +// Create takes the representation of a storageVersion and creates it. Returns the server's representation of the storageVersion, and an error, if there is any. +func (c *FakeStorageVersions) Create(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.CreateOptions) (result *v1alpha1.StorageVersion, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(storageversionsResource, storageVersion), &v1alpha1.StorageVersion{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.StorageVersion), err +} + +// Update takes the representation of a storageVersion and updates it. Returns the server's representation of the storageVersion, and an error, if there is any. +func (c *FakeStorageVersions) Update(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(storageversionsResource, storageVersion), &v1alpha1.StorageVersion{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.StorageVersion), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeStorageVersions) UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (*v1alpha1.StorageVersion, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(storageversionsResource, "status", storageVersion), &v1alpha1.StorageVersion{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.StorageVersion), err +} + +// Delete takes name of the storageVersion and deletes it. Returns an error if one occurs. +func (c *FakeStorageVersions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(storageversionsResource, name), &v1alpha1.StorageVersion{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeStorageVersions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(storageversionsResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.StorageVersionList{}) + return err +} + +// Patch applies the patch and returns the patched storageVersion. +func (c *FakeStorageVersions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersion, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(storageversionsResource, name, pt, data, subresources...), &v1alpha1.StorageVersion{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.StorageVersion), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/generated_expansion.go similarity index 93% rename from vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/generated_expansion.go index 23d9f94d5..f2835e607 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/generated_expansion.go @@ -18,4 +18,4 @@ limitations under the License. package v1alpha1 -type PodPresetExpansion interface{} +type StorageVersionExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go new file mode 100644 index 000000000..af5466b04 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go @@ -0,0 +1,184 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// StorageVersionsGetter has a method to return a StorageVersionInterface. +// A group's client should implement this interface. +type StorageVersionsGetter interface { + StorageVersions() StorageVersionInterface +} + +// StorageVersionInterface has methods to work with StorageVersion resources. +type StorageVersionInterface interface { + Create(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.CreateOptions) (*v1alpha1.StorageVersion, error) + Update(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (*v1alpha1.StorageVersion, error) + UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (*v1alpha1.StorageVersion, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.StorageVersion, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.StorageVersionList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersion, err error) + StorageVersionExpansion +} + +// storageVersions implements StorageVersionInterface +type storageVersions struct { + client rest.Interface +} + +// newStorageVersions returns a StorageVersions +func newStorageVersions(c *InternalV1alpha1Client) *storageVersions { + return &storageVersions{ + client: c.RESTClient(), + } +} + +// Get takes name of the storageVersion, and returns the corresponding storageVersion object, and an error if there is any. +func (c *storageVersions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersion, err error) { + result = &v1alpha1.StorageVersion{} + err = c.client.Get(). + Resource("storageversions"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StorageVersions that match those selectors. +func (c *storageVersions) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageVersionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.StorageVersionList{} + err = c.client.Get(). + Resource("storageversions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested storageVersions. +func (c *storageVersions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("storageversions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a storageVersion and creates it. Returns the server's representation of the storageVersion, and an error, if there is any. +func (c *storageVersions) Create(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.CreateOptions) (result *v1alpha1.StorageVersion, err error) { + result = &v1alpha1.StorageVersion{} + err = c.client.Post(). + Resource("storageversions"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(storageVersion). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a storageVersion and updates it. Returns the server's representation of the storageVersion, and an error, if there is any. +func (c *storageVersions) Update(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) { + result = &v1alpha1.StorageVersion{} + err = c.client.Put(). + Resource("storageversions"). + Name(storageVersion.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(storageVersion). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *storageVersions) UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) { + result = &v1alpha1.StorageVersion{} + err = c.client.Put(). + Resource("storageversions"). + Name(storageVersion.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(storageVersion). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the storageVersion and deletes it. Returns an error if one occurs. +func (c *storageVersions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("storageversions"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *storageVersions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("storageversions"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched storageVersion. +func (c *storageVersions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersion, err error) { + result = &v1alpha1.StorageVersion{} + err = c.client.Patch(pt). + Resource("storageversions"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/doc.go new file mode 100644 index 000000000..771101956 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/doc.go new file mode 100644 index 000000000..16f443990 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowcontrol_client.go new file mode 100644 index 000000000..1bd58d088 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowcontrol_client.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeFlowcontrolV1beta1 struct { + *testing.Fake +} + +func (c *FakeFlowcontrolV1beta1) FlowSchemas() v1beta1.FlowSchemaInterface { + return &FakeFlowSchemas{c} +} + +func (c *FakeFlowcontrolV1beta1) PriorityLevelConfigurations() v1beta1.PriorityLevelConfigurationInterface { + return &FakePriorityLevelConfigurations{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeFlowcontrolV1beta1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go new file mode 100644 index 000000000..7732b69dc --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go @@ -0,0 +1,133 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1beta1 "k8s.io/api/flowcontrol/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeFlowSchemas implements FlowSchemaInterface +type FakeFlowSchemas struct { + Fake *FakeFlowcontrolV1beta1 +} + +var flowschemasResource = schema.GroupVersionResource{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Resource: "flowschemas"} + +var flowschemasKind = schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "FlowSchema"} + +// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. +func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.FlowSchema, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(flowschemasResource, name), &v1beta1.FlowSchema{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.FlowSchema), err +} + +// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. +func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.FlowSchemaList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(flowschemasResource, flowschemasKind, opts), &v1beta1.FlowSchemaList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.FlowSchemaList{ListMeta: obj.(*v1beta1.FlowSchemaList).ListMeta} + for _, item := range obj.(*v1beta1.FlowSchemaList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested flowSchemas. +func (c *FakeFlowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(flowschemasResource, opts)) +} + +// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. +func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.CreateOptions) (result *v1beta1.FlowSchema, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(flowschemasResource, flowSchema), &v1beta1.FlowSchema{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.FlowSchema), err +} + +// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. +func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(flowschemasResource, flowSchema), &v1beta1.FlowSchema{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.FlowSchema), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (*v1beta1.FlowSchema, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(flowschemasResource, "status", flowSchema), &v1beta1.FlowSchema{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.FlowSchema), err +} + +// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. +func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(flowschemasResource, name), &v1beta1.FlowSchema{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(flowschemasResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1beta1.FlowSchemaList{}) + return err +} + +// Patch applies the patch and returns the patched flowSchema. +func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.FlowSchema, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, name, pt, data, subresources...), &v1beta1.FlowSchema{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.FlowSchema), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go new file mode 100644 index 000000000..f93a505d4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go @@ -0,0 +1,133 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1beta1 "k8s.io/api/flowcontrol/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakePriorityLevelConfigurations implements PriorityLevelConfigurationInterface +type FakePriorityLevelConfigurations struct { + Fake *FakeFlowcontrolV1beta1 +} + +var prioritylevelconfigurationsResource = schema.GroupVersionResource{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Resource: "prioritylevelconfigurations"} + +var prioritylevelconfigurationsKind = schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "PriorityLevelConfiguration"} + +// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. +func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(prioritylevelconfigurationsResource, name), &v1beta1.PriorityLevelConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PriorityLevelConfiguration), err +} + +// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. +func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityLevelConfigurationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), &v1beta1.PriorityLevelConfigurationList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.PriorityLevelConfigurationList{ListMeta: obj.(*v1beta1.PriorityLevelConfigurationList).ListMeta} + for _, item := range obj.(*v1beta1.PriorityLevelConfigurationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. +func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(prioritylevelconfigurationsResource, opts)) +} + +// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. +func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta1.PriorityLevelConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PriorityLevelConfiguration), err +} + +// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. +func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta1.PriorityLevelConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PriorityLevelConfiguration), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta1.PriorityLevelConfiguration, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1beta1.PriorityLevelConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PriorityLevelConfiguration), err +} + +// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. +func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(prioritylevelconfigurationsResource, name), &v1beta1.PriorityLevelConfiguration{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(prioritylevelconfigurationsResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1beta1.PriorityLevelConfigurationList{}) + return err +} + +// Patch applies the patch and returns the patched priorityLevelConfiguration. +func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityLevelConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1beta1.PriorityLevelConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PriorityLevelConfiguration), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go new file mode 100644 index 000000000..9a8ba560e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/flowcontrol/v1beta1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type FlowcontrolV1beta1Interface interface { + RESTClient() rest.Interface + FlowSchemasGetter + PriorityLevelConfigurationsGetter +} + +// FlowcontrolV1beta1Client is used to interact with features provided by the flowcontrol.apiserver.k8s.io group. +type FlowcontrolV1beta1Client struct { + restClient rest.Interface +} + +func (c *FlowcontrolV1beta1Client) FlowSchemas() FlowSchemaInterface { + return newFlowSchemas(c) +} + +func (c *FlowcontrolV1beta1Client) PriorityLevelConfigurations() PriorityLevelConfigurationInterface { + return newPriorityLevelConfigurations(c) +} + +// NewForConfig creates a new FlowcontrolV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*FlowcontrolV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &FlowcontrolV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new FlowcontrolV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *FlowcontrolV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new FlowcontrolV1beta1Client for the given RESTClient. +func New(c rest.Interface) *FlowcontrolV1beta1Client { + return &FlowcontrolV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FlowcontrolV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go new file mode 100644 index 000000000..398f4f347 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go @@ -0,0 +1,184 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1beta1 "k8s.io/api/flowcontrol/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// FlowSchemasGetter has a method to return a FlowSchemaInterface. +// A group's client should implement this interface. +type FlowSchemasGetter interface { + FlowSchemas() FlowSchemaInterface +} + +// FlowSchemaInterface has methods to work with FlowSchema resources. +type FlowSchemaInterface interface { + Create(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.CreateOptions) (*v1beta1.FlowSchema, error) + Update(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (*v1beta1.FlowSchema, error) + UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (*v1beta1.FlowSchema, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.FlowSchema, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.FlowSchemaList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.FlowSchema, err error) + FlowSchemaExpansion +} + +// flowSchemas implements FlowSchemaInterface +type flowSchemas struct { + client rest.Interface +} + +// newFlowSchemas returns a FlowSchemas +func newFlowSchemas(c *FlowcontrolV1beta1Client) *flowSchemas { + return &flowSchemas{ + client: c.RESTClient(), + } +} + +// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. +func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.FlowSchema, err error) { + result = &v1beta1.FlowSchema{} + err = c.client.Get(). + Resource("flowschemas"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. +func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.FlowSchemaList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.FlowSchemaList{} + err = c.client.Get(). + Resource("flowschemas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested flowSchemas. +func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("flowschemas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. +func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.CreateOptions) (result *v1beta1.FlowSchema, err error) { + result = &v1beta1.FlowSchema{} + err = c.client.Post(). + Resource("flowschemas"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(flowSchema). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. +func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) { + result = &v1beta1.FlowSchema{} + err = c.client.Put(). + Resource("flowschemas"). + Name(flowSchema.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(flowSchema). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) { + result = &v1beta1.FlowSchema{} + err = c.client.Put(). + Resource("flowschemas"). + Name(flowSchema.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(flowSchema). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. +func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("flowschemas"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("flowschemas"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched flowSchema. +func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.FlowSchema, err error) { + result = &v1beta1.FlowSchema{} + err = c.client.Patch(pt). + Resource("flowschemas"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/generated_expansion.go new file mode 100644 index 000000000..fc15f6935 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type FlowSchemaExpansion interface{} + +type PriorityLevelConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go new file mode 100644 index 000000000..88633c827 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go @@ -0,0 +1,184 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1beta1 "k8s.io/api/flowcontrol/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface. +// A group's client should implement this interface. +type PriorityLevelConfigurationsGetter interface { + PriorityLevelConfigurations() PriorityLevelConfigurationInterface +} + +// PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources. +type PriorityLevelConfigurationInterface interface { + Create(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta1.PriorityLevelConfiguration, error) + Update(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta1.PriorityLevelConfiguration, error) + UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta1.PriorityLevelConfiguration, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PriorityLevelConfiguration, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PriorityLevelConfigurationList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityLevelConfiguration, err error) + PriorityLevelConfigurationExpansion +} + +// priorityLevelConfigurations implements PriorityLevelConfigurationInterface +type priorityLevelConfigurations struct { + client rest.Interface +} + +// newPriorityLevelConfigurations returns a PriorityLevelConfigurations +func newPriorityLevelConfigurations(c *FlowcontrolV1beta1Client) *priorityLevelConfigurations { + return &priorityLevelConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. +func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { + result = &v1beta1.PriorityLevelConfiguration{} + err = c.client.Get(). + Resource("prioritylevelconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. +func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityLevelConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.PriorityLevelConfigurationList{} + err = c.client.Get(). + Resource("prioritylevelconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. +func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("prioritylevelconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. +func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { + result = &v1beta1.PriorityLevelConfiguration{} + err = c.client.Post(). + Resource("prioritylevelconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(priorityLevelConfiguration). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. +func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { + result = &v1beta1.PriorityLevelConfiguration{} + err = c.client.Put(). + Resource("prioritylevelconfigurations"). + Name(priorityLevelConfiguration.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(priorityLevelConfiguration). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { + result = &v1beta1.PriorityLevelConfiguration{} + err = c.client.Put(). + Resource("prioritylevelconfigurations"). + Name(priorityLevelConfiguration.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(priorityLevelConfiguration). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. +func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("prioritylevelconfigurations"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("prioritylevelconfigurations"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched priorityLevelConfiguration. +func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityLevelConfiguration, err error) { + result = &v1beta1.PriorityLevelConfiguration{} + err = c.client.Patch(pt). + Resource("prioritylevelconfigurations"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/doc.go new file mode 100644 index 000000000..3af5d054f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/doc.go new file mode 100644 index 000000000..16f443990 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_node_client.go new file mode 100644 index 000000000..dea10cbad --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_node_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/client-go/kubernetes/typed/node/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeNodeV1 struct { + *testing.Fake +} + +func (c *FakeNodeV1) RuntimeClasses() v1.RuntimeClassInterface { + return &FakeRuntimeClasses{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeNodeV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go new file mode 100644 index 000000000..461386f45 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go @@ -0,0 +1,122 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + nodev1 "k8s.io/api/node/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeRuntimeClasses implements RuntimeClassInterface +type FakeRuntimeClasses struct { + Fake *FakeNodeV1 +} + +var runtimeclassesResource = schema.GroupVersionResource{Group: "node.k8s.io", Version: "v1", Resource: "runtimeclasses"} + +var runtimeclassesKind = schema.GroupVersionKind{Group: "node.k8s.io", Version: "v1", Kind: "RuntimeClass"} + +// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. +func (c *FakeRuntimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *nodev1.RuntimeClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(runtimeclassesResource, name), &nodev1.RuntimeClass{}) + if obj == nil { + return nil, err + } + return obj.(*nodev1.RuntimeClass), err +} + +// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. +func (c *FakeRuntimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *nodev1.RuntimeClassList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(runtimeclassesResource, runtimeclassesKind, opts), &nodev1.RuntimeClassList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &nodev1.RuntimeClassList{ListMeta: obj.(*nodev1.RuntimeClassList).ListMeta} + for _, item := range obj.(*nodev1.RuntimeClassList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested runtimeClasses. +func (c *FakeRuntimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(runtimeclassesResource, opts)) +} + +// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *nodev1.RuntimeClass, opts v1.CreateOptions) (result *nodev1.RuntimeClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(runtimeclassesResource, runtimeClass), &nodev1.RuntimeClass{}) + if obj == nil { + return nil, err + } + return obj.(*nodev1.RuntimeClass), err +} + +// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *nodev1.RuntimeClass, opts v1.UpdateOptions) (result *nodev1.RuntimeClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(runtimeclassesResource, runtimeClass), &nodev1.RuntimeClass{}) + if obj == nil { + return nil, err + } + return obj.(*nodev1.RuntimeClass), err +} + +// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. +func (c *FakeRuntimeClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(runtimeclassesResource, name), &nodev1.RuntimeClass{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(runtimeclassesResource, listOpts) + + _, err := c.Fake.Invokes(action, &nodev1.RuntimeClassList{}) + return err +} + +// Patch applies the patch and returns the patched runtimeClass. +func (c *FakeRuntimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *nodev1.RuntimeClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, name, pt, data, subresources...), &nodev1.RuntimeClass{}) + if obj == nil { + return nil, err + } + return obj.(*nodev1.RuntimeClass), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/generated_expansion.go new file mode 100644 index 000000000..e2c25926f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type RuntimeClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go new file mode 100644 index 000000000..7f0da811b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/node/v1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type NodeV1Interface interface { + RESTClient() rest.Interface + RuntimeClassesGetter +} + +// NodeV1Client is used to interact with features provided by the node.k8s.io group. +type NodeV1Client struct { + restClient rest.Interface +} + +func (c *NodeV1Client) RuntimeClasses() RuntimeClassInterface { + return newRuntimeClasses(c) +} + +// NewForConfig creates a new NodeV1Client for the given config. +func NewForConfig(c *rest.Config) (*NodeV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &NodeV1Client{client}, nil +} + +// NewForConfigOrDie creates a new NodeV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *NodeV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new NodeV1Client for the given RESTClient. +func New(c rest.Interface) *NodeV1Client { + return &NodeV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *NodeV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go new file mode 100644 index 000000000..df8c1cafe --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go @@ -0,0 +1,168 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "k8s.io/api/node/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// RuntimeClassesGetter has a method to return a RuntimeClassInterface. +// A group's client should implement this interface. +type RuntimeClassesGetter interface { + RuntimeClasses() RuntimeClassInterface +} + +// RuntimeClassInterface has methods to work with RuntimeClass resources. +type RuntimeClassInterface interface { + Create(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.CreateOptions) (*v1.RuntimeClass, error) + Update(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.UpdateOptions) (*v1.RuntimeClass, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.RuntimeClass, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.RuntimeClassList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RuntimeClass, err error) + RuntimeClassExpansion +} + +// runtimeClasses implements RuntimeClassInterface +type runtimeClasses struct { + client rest.Interface +} + +// newRuntimeClasses returns a RuntimeClasses +func newRuntimeClasses(c *NodeV1Client) *runtimeClasses { + return &runtimeClasses{ + client: c.RESTClient(), + } +} + +// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. +func (c *runtimeClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RuntimeClass, err error) { + result = &v1.RuntimeClass{} + err = c.client.Get(). + Resource("runtimeclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. +func (c *runtimeClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RuntimeClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.RuntimeClassList{} + err = c.client.Get(). + Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested runtimeClasses. +func (c *runtimeClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.CreateOptions) (result *v1.RuntimeClass, err error) { + result = &v1.RuntimeClass{} + err = c.client.Post(). + Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(runtimeClass). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *runtimeClasses) Update(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.UpdateOptions) (result *v1.RuntimeClass, err error) { + result = &v1.RuntimeClass{} + err = c.client.Put(). + Resource("runtimeclasses"). + Name(runtimeClass.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(runtimeClass). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. +func (c *runtimeClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("runtimeclasses"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *runtimeClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("runtimeclasses"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched runtimeClass. +func (c *runtimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RuntimeClass, err error) { + result = &v1.RuntimeClass{} + err = c.client.Patch(pt). + Resource("runtimeclasses"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go deleted file mode 100644 index c8ecd09ce..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1alpha1 "k8s.io/api/settings/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakePodPresets implements PodPresetInterface -type FakePodPresets struct { - Fake *FakeSettingsV1alpha1 - ns string -} - -var podpresetsResource = schema.GroupVersionResource{Group: "settings.k8s.io", Version: "v1alpha1", Resource: "podpresets"} - -var podpresetsKind = schema.GroupVersionKind{Group: "settings.k8s.io", Version: "v1alpha1", Kind: "PodPreset"} - -// Get takes name of the podPreset, and returns the corresponding podPreset object, and an error if there is any. -func (c *FakePodPresets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PodPreset, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(podpresetsResource, c.ns, name), &v1alpha1.PodPreset{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.PodPreset), err -} - -// List takes label and field selectors, and returns the list of PodPresets that match those selectors. -func (c *FakePodPresets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(podpresetsResource, podpresetsKind, c.ns, opts), &v1alpha1.PodPresetList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.PodPresetList{ListMeta: obj.(*v1alpha1.PodPresetList).ListMeta} - for _, item := range obj.(*v1alpha1.PodPresetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested podPresets. -func (c *FakePodPresets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(podpresetsResource, c.ns, opts)) - -} - -// Create takes the representation of a podPreset and creates it. Returns the server's representation of the podPreset, and an error, if there is any. -func (c *FakePodPresets) Create(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.CreateOptions) (result *v1alpha1.PodPreset, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(podpresetsResource, c.ns, podPreset), &v1alpha1.PodPreset{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.PodPreset), err -} - -// Update takes the representation of a podPreset and updates it. Returns the server's representation of the podPreset, and an error, if there is any. -func (c *FakePodPresets) Update(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.UpdateOptions) (result *v1alpha1.PodPreset, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(podpresetsResource, c.ns, podPreset), &v1alpha1.PodPreset{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.PodPreset), err -} - -// Delete takes name of the podPreset and deletes it. Returns an error if one occurs. -func (c *FakePodPresets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(podpresetsResource, c.ns, name), &v1alpha1.PodPreset{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePodPresets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(podpresetsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.PodPresetList{}) - return err -} - -// Patch applies the patch and returns the patched podPreset. -func (c *FakePodPresets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodPreset, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podpresetsResource, c.ns, name, pt, data, subresources...), &v1alpha1.PodPreset{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.PodPreset), err -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go deleted file mode 100644 index aa1cb364e..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "k8s.io/api/settings/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PodPresetsGetter has a method to return a PodPresetInterface. -// A group's client should implement this interface. -type PodPresetsGetter interface { - PodPresets(namespace string) PodPresetInterface -} - -// PodPresetInterface has methods to work with PodPreset resources. -type PodPresetInterface interface { - Create(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.CreateOptions) (*v1alpha1.PodPreset, error) - Update(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.UpdateOptions) (*v1alpha1.PodPreset, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PodPreset, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PodPresetList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodPreset, err error) - PodPresetExpansion -} - -// podPresets implements PodPresetInterface -type podPresets struct { - client rest.Interface - ns string -} - -// newPodPresets returns a PodPresets -func newPodPresets(c *SettingsV1alpha1Client, namespace string) *podPresets { - return &podPresets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the podPreset, and returns the corresponding podPreset object, and an error if there is any. -func (c *podPresets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PodPreset, err error) { - result = &v1alpha1.PodPreset{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podpresets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodPresets that match those selectors. -func (c *podPresets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.PodPresetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podpresets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podPresets. -func (c *podPresets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("podpresets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podPreset and creates it. Returns the server's representation of the podPreset, and an error, if there is any. -func (c *podPresets) Create(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.CreateOptions) (result *v1alpha1.PodPreset, err error) { - result = &v1alpha1.PodPreset{} - err = c.client.Post(). - Namespace(c.ns). - Resource("podpresets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podPreset). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podPreset and updates it. Returns the server's representation of the podPreset, and an error, if there is any. -func (c *podPresets) Update(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.UpdateOptions) (result *v1alpha1.PodPreset, err error) { - result = &v1alpha1.PodPreset{} - err = c.client.Put(). - Namespace(c.ns). - Resource("podpresets"). - Name(podPreset.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podPreset). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podPreset and deletes it. Returns an error if one occurs. -func (c *podPresets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("podpresets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podPresets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("podpresets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podPreset. -func (c *podPresets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodPreset, err error) { - result = &v1alpha1.PodPreset{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("podpresets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/listers/settings/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/expansion_generated.go similarity index 69% rename from vendor/k8s.io/client-go/listers/settings/v1alpha1/expansion_generated.go rename to vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/expansion_generated.go index fba210343..ad860c7c9 100644 --- a/vendor/k8s.io/client-go/listers/settings/v1alpha1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/expansion_generated.go @@ -18,10 +18,6 @@ limitations under the License. package v1alpha1 -// PodPresetListerExpansion allows custom methods to be added to -// PodPresetLister. -type PodPresetListerExpansion interface{} - -// PodPresetNamespaceListerExpansion allows custom methods to be added to -// PodPresetNamespaceLister. -type PodPresetNamespaceListerExpansion interface{} +// StorageVersionListerExpansion allows custom methods to be added to +// StorageVersionLister. +type StorageVersionListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/storageversion.go new file mode 100644 index 000000000..9a6d74b2b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/storageversion.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// StorageVersionLister helps list StorageVersions. +// All objects returned here must be treated as read-only. +type StorageVersionLister interface { + // List lists all StorageVersions in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.StorageVersion, err error) + // Get retrieves the StorageVersion from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.StorageVersion, error) + StorageVersionListerExpansion +} + +// storageVersionLister implements the StorageVersionLister interface. +type storageVersionLister struct { + indexer cache.Indexer +} + +// NewStorageVersionLister returns a new StorageVersionLister. +func NewStorageVersionLister(indexer cache.Indexer) StorageVersionLister { + return &storageVersionLister{indexer: indexer} +} + +// List lists all StorageVersions in the indexer. +func (s *storageVersionLister) List(selector labels.Selector) (ret []*v1alpha1.StorageVersion, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.StorageVersion)) + }) + return ret, err +} + +// Get retrieves the StorageVersion from the index for a given name. +func (s *storageVersionLister) Get(name string) (*v1alpha1.StorageVersion, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("storageversion"), name) + } + return obj.(*v1alpha1.StorageVersion), nil +} diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/expansion_generated.go new file mode 100644 index 000000000..c674e951e --- /dev/null +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +// FlowSchemaListerExpansion allows custom methods to be added to +// FlowSchemaLister. +type FlowSchemaListerExpansion interface{} + +// PriorityLevelConfigurationListerExpansion allows custom methods to be added to +// PriorityLevelConfigurationLister. +type PriorityLevelConfigurationListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/flowschema.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/flowschema.go new file mode 100644 index 000000000..7927a8411 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/flowschema.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/flowcontrol/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// FlowSchemaLister helps list FlowSchemas. +// All objects returned here must be treated as read-only. +type FlowSchemaLister interface { + // List lists all FlowSchemas in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta1.FlowSchema, err error) + // Get retrieves the FlowSchema from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1beta1.FlowSchema, error) + FlowSchemaListerExpansion +} + +// flowSchemaLister implements the FlowSchemaLister interface. +type flowSchemaLister struct { + indexer cache.Indexer +} + +// NewFlowSchemaLister returns a new FlowSchemaLister. +func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister { + return &flowSchemaLister{indexer: indexer} +} + +// List lists all FlowSchemas in the indexer. +func (s *flowSchemaLister) List(selector labels.Selector) (ret []*v1beta1.FlowSchema, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.FlowSchema)) + }) + return ret, err +} + +// Get retrieves the FlowSchema from the index for a given name. +func (s *flowSchemaLister) Get(name string) (*v1beta1.FlowSchema, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("flowschema"), name) + } + return obj.(*v1beta1.FlowSchema), nil +} diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/prioritylevelconfiguration.go new file mode 100644 index 000000000..c94aaa4c1 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/prioritylevelconfiguration.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/flowcontrol/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PriorityLevelConfigurationLister helps list PriorityLevelConfigurations. +// All objects returned here must be treated as read-only. +type PriorityLevelConfigurationLister interface { + // List lists all PriorityLevelConfigurations in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta1.PriorityLevelConfiguration, err error) + // Get retrieves the PriorityLevelConfiguration from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1beta1.PriorityLevelConfiguration, error) + PriorityLevelConfigurationListerExpansion +} + +// priorityLevelConfigurationLister implements the PriorityLevelConfigurationLister interface. +type priorityLevelConfigurationLister struct { + indexer cache.Indexer +} + +// NewPriorityLevelConfigurationLister returns a new PriorityLevelConfigurationLister. +func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelConfigurationLister { + return &priorityLevelConfigurationLister{indexer: indexer} +} + +// List lists all PriorityLevelConfigurations in the indexer. +func (s *priorityLevelConfigurationLister) List(selector labels.Selector) (ret []*v1beta1.PriorityLevelConfiguration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.PriorityLevelConfiguration)) + }) + return ret, err +} + +// Get retrieves the PriorityLevelConfiguration from the index for a given name. +func (s *priorityLevelConfigurationLister) Get(name string) (*v1beta1.PriorityLevelConfiguration, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("prioritylevelconfiguration"), name) + } + return obj.(*v1beta1.PriorityLevelConfiguration), nil +} diff --git a/vendor/k8s.io/client-go/listers/node/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/node/v1/expansion_generated.go new file mode 100644 index 000000000..4f010b87c --- /dev/null +++ b/vendor/k8s.io/client-go/listers/node/v1/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// RuntimeClassListerExpansion allows custom methods to be added to +// RuntimeClassLister. +type RuntimeClassListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/listers/node/v1/runtimeclass.go new file mode 100644 index 000000000..6e00cf1a5 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/node/v1/runtimeclass.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/node/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// RuntimeClassLister helps list RuntimeClasses. +// All objects returned here must be treated as read-only. +type RuntimeClassLister interface { + // List lists all RuntimeClasses in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.RuntimeClass, err error) + // Get retrieves the RuntimeClass from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.RuntimeClass, error) + RuntimeClassListerExpansion +} + +// runtimeClassLister implements the RuntimeClassLister interface. +type runtimeClassLister struct { + indexer cache.Indexer +} + +// NewRuntimeClassLister returns a new RuntimeClassLister. +func NewRuntimeClassLister(indexer cache.Indexer) RuntimeClassLister { + return &runtimeClassLister{indexer: indexer} +} + +// List lists all RuntimeClasses in the indexer. +func (s *runtimeClassLister) List(selector labels.Selector) (ret []*v1.RuntimeClass, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.RuntimeClass)) + }) + return ret, err +} + +// Get retrieves the RuntimeClass from the index for a given name. +func (s *runtimeClassLister) Get(name string) (*v1.RuntimeClass, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("runtimeclass"), name) + } + return obj.(*v1.RuntimeClass), nil +} diff --git a/vendor/k8s.io/client-go/listers/settings/v1alpha1/podpreset.go b/vendor/k8s.io/client-go/listers/settings/v1alpha1/podpreset.go deleted file mode 100644 index c21eb72e6..000000000 --- a/vendor/k8s.io/client-go/listers/settings/v1alpha1/podpreset.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/settings/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// PodPresetLister helps list PodPresets. -// All objects returned here must be treated as read-only. -type PodPresetLister interface { - // List lists all PodPresets in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.PodPreset, err error) - // PodPresets returns an object that can list and get PodPresets. - PodPresets(namespace string) PodPresetNamespaceLister - PodPresetListerExpansion -} - -// podPresetLister implements the PodPresetLister interface. -type podPresetLister struct { - indexer cache.Indexer -} - -// NewPodPresetLister returns a new PodPresetLister. -func NewPodPresetLister(indexer cache.Indexer) PodPresetLister { - return &podPresetLister{indexer: indexer} -} - -// List lists all PodPresets in the indexer. -func (s *podPresetLister) List(selector labels.Selector) (ret []*v1alpha1.PodPreset, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.PodPreset)) - }) - return ret, err -} - -// PodPresets returns an object that can list and get PodPresets. -func (s *podPresetLister) PodPresets(namespace string) PodPresetNamespaceLister { - return podPresetNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// PodPresetNamespaceLister helps list and get PodPresets. -// All objects returned here must be treated as read-only. -type PodPresetNamespaceLister interface { - // List lists all PodPresets in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.PodPreset, err error) - // Get retrieves the PodPreset from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.PodPreset, error) - PodPresetNamespaceListerExpansion -} - -// podPresetNamespaceLister implements the PodPresetNamespaceLister -// interface. -type podPresetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all PodPresets in the indexer for a given namespace. -func (s podPresetNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.PodPreset, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.PodPreset)) - }) - return ret, err -} - -// Get retrieves the PodPreset from the indexer for a given namespace and name. -func (s podPresetNamespaceLister) Get(name string) (*v1alpha1.PodPreset, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("podpreset"), name) - } - return obj.(*v1alpha1.PodPreset), nil -} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go index 6fb53cecf..c10899792 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go @@ -18,11 +18,12 @@ package clientauthentication import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ExecCredentials is used by exec-based plugins to communicate credentials to +// ExecCredential is used by exec-based plugins to communicate credentials to // HTTP transports. type ExecCredential struct { metav1.TypeMeta @@ -37,7 +38,7 @@ type ExecCredential struct { Status *ExecCredentialStatus } -// ExecCredenitalSpec holds request and runtime specific information provided by +// ExecCredentialSpec holds request and runtime specific information provided by // the transport. type ExecCredentialSpec struct { // Response is populated when the transport encounters HTTP status codes, such as 401, @@ -49,6 +50,13 @@ type ExecCredentialSpec struct { // interactive prompt. // +optional Interactive bool + + // Cluster contains information to allow an exec plugin to communicate with the + // kubernetes cluster being authenticated to. Note that Cluster is non-nil only + // when provideClusterInfo is set to true in the exec provider config (i.e., + // ExecConfig.ProvideClusterInfo). + // +optional + Cluster *Cluster } // ExecCredentialStatus holds credentials for the transport to use. @@ -58,13 +66,13 @@ type ExecCredentialStatus struct { ExpirationTimestamp *metav1.Time // Token is a bearer token used by the client for request authentication. // +optional - Token string + Token string `datapolicy:"token"` // PEM-encoded client TLS certificate. // +optional ClientCertificateData string // PEM-encoded client TLS private key. // +optional - ClientKeyData string + ClientKeyData string `datapolicy:"secret-key"` } // Response defines metadata about a failed request, including HTTP status code and @@ -75,3 +83,56 @@ type Response struct { // Code is the HTTP status code returned by the server. Code int32 } + +// Cluster contains information to allow an exec plugin to communicate +// with the kubernetes cluster being authenticated to. +// +// To ensure that this struct contains everything someone would need to communicate +// with a kubernetes cluster (just like they would via a kubeconfig), the fields +// should shadow "k8s.io/client-go/tools/clientcmd/api/v1".Cluster, with the exception +// of CertificateAuthority, since CA data will always be passed to the plugin as bytes. +type Cluster struct { + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string + // TLSServerName is passed to the server for SNI and is used in the client to + // check server certificates against. If ServerName is empty, the hostname + // used to contact the server is used. + // +optional + TLSServerName string + // InsecureSkipTLSVerify skips the validity check for the server's certificate. + // This will make your HTTPS connections insecure. + // +optional + InsecureSkipTLSVerify bool + // CAData contains PEM-encoded certificate authority certificates. + // If empty, system roots should be used. + // +listType=atomic + // +optional + CertificateAuthorityData []byte + // ProxyURL is the URL to the proxy to be used for all requests to this + // cluster. + // +optional + ProxyURL string + // Config holds additional config data that is specific to the exec + // plugin with regards to the cluster being authenticated to. + // + // This data is sourced from the clientcmd Cluster object's + // extensions[client.authentication.k8s.io/exec] field: + // + // clusters: + // - name: my-cluster + // cluster: + // ... + // extensions: + // - name: client.authentication.k8s.io/exec # reserved extension name for per cluster exec config + // extension: + // audience: 06e3fbd18de8 # arbitrary config + // + // In some environments, the user config may be exactly the same across many clusters + // (i.e. call this exec plugin) minus some details that are specific to each cluster + // such as the audience. This field allows the per cluster config to be directly + // specified with the cluster info. Using this field to store secret data is not + // recommended as one of the prime benefits of exec plugins is that no secrets need + // to be stored directly in the kubeconfig. + // +optional + Config runtime.Object +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/conversion.go new file mode 100644 index 000000000..572e049f8 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/conversion.go @@ -0,0 +1,27 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/client-go/pkg/apis/clientauthentication" +) + +func Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { + // This conversion intentionally omits the Cluster field which is only supported in newer versions. + return autoConvert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go index c714e2457..1ff13c438 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go @@ -37,7 +37,7 @@ type ExecCredential struct { Status *ExecCredentialStatus `json:"status,omitempty"` } -// ExecCredenitalSpec holds request and runtime specific information provided by +// ExecCredentialSpec holds request and runtime specific information provided by // the transport. type ExecCredentialSpec struct { // Response is populated when the transport encounters HTTP status codes, such as 401, @@ -61,11 +61,11 @@ type ExecCredentialStatus struct { // +optional ExpirationTimestamp *metav1.Time `json:"expirationTimestamp,omitempty"` // Token is a bearer token used by the client for request authentication. - Token string `json:"token,omitempty"` + Token string `json:"token,omitempty" datapolicy:"token"` // PEM-encoded client TLS certificates (including intermediates, if any). ClientCertificateData string `json:"clientCertificateData,omitempty"` // PEM-encoded private key for the above certificate. - ClientKeyData string `json:"clientKeyData,omitempty"` + ClientKeyData string `json:"clientKeyData,omitempty" datapolicy:"security-key"` } // Response defines metadata about a failed request, including HTTP status code and diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go index 461c20b29..b0e503af4 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go @@ -51,11 +51,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*ExecCredentialStatus)(nil), (*clientauthentication.ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(a.(*ExecCredentialStatus), b.(*clientauthentication.ExecCredentialStatus), scope) }); err != nil { @@ -76,6 +71,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope) + }); err != nil { + return err + } return nil } @@ -119,14 +119,10 @@ func Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialS func autoConvert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { out.Response = (*Response)(unsafe.Pointer(in.Response)) out.Interactive = in.Interactive + // WARNING: in.Cluster requires manual conversion: does not exist in peer-type return nil } -// Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec is an autogenerated conversion function. -func Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { - return autoConvert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in, out, s) -} - func autoConvert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error { out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp)) out.Token = in.Token diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go index f543806ac..441b7c44b 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go @@ -17,10 +17,12 @@ limitations under the License. package v1beta1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" - clientauthentication "k8s.io/client-go/pkg/apis/clientauthentication" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/client-go/pkg/apis/clientauthentication" ) func Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { - return nil + // This conversion intentionally omits the Response and Interactive fields, which were only + // supported in v1alpha1. + return autoConvert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in, out, s) } diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go index d6e267452..fabc6f65e 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go @@ -18,17 +18,17 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ExecCredentials is used by exec-based plugins to communicate credentials to +// ExecCredential is used by exec-based plugins to communicate credentials to // HTTP transports. type ExecCredential struct { metav1.TypeMeta `json:",inline"` - // Spec holds information passed to the plugin by the transport. This contains - // request and runtime specific information, such as if the session is interactive. + // Spec holds information passed to the plugin by the transport. Spec ExecCredentialSpec `json:"spec,omitempty"` // Status is filled in by the plugin and holds the credentials that the transport @@ -37,9 +37,16 @@ type ExecCredential struct { Status *ExecCredentialStatus `json:"status,omitempty"` } -// ExecCredenitalSpec holds request and runtime specific information provided by +// ExecCredentialSpec holds request and runtime specific information provided by // the transport. -type ExecCredentialSpec struct{} +type ExecCredentialSpec struct { + // Cluster contains information to allow an exec plugin to communicate with the + // kubernetes cluster being authenticated to. Note that Cluster is non-nil only + // when provideClusterInfo is set to true in the exec provider config (i.e., + // ExecConfig.ProvideClusterInfo). + // +optional + Cluster *Cluster `json:"cluster,omitempty"` +} // ExecCredentialStatus holds credentials for the transport to use. // @@ -51,9 +58,62 @@ type ExecCredentialStatus struct { // +optional ExpirationTimestamp *metav1.Time `json:"expirationTimestamp,omitempty"` // Token is a bearer token used by the client for request authentication. - Token string `json:"token,omitempty"` + Token string `json:"token,omitempty" datapolicy:"token"` // PEM-encoded client TLS certificates (including intermediates, if any). ClientCertificateData string `json:"clientCertificateData,omitempty"` // PEM-encoded private key for the above certificate. - ClientKeyData string `json:"clientKeyData,omitempty"` + ClientKeyData string `json:"clientKeyData,omitempty" datapolicy:"security-key"` +} + +// Cluster contains information to allow an exec plugin to communicate +// with the kubernetes cluster being authenticated to. +// +// To ensure that this struct contains everything someone would need to communicate +// with a kubernetes cluster (just like they would via a kubeconfig), the fields +// should shadow "k8s.io/client-go/tools/clientcmd/api/v1".Cluster, with the exception +// of CertificateAuthority, since CA data will always be passed to the plugin as bytes. +type Cluster struct { + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string `json:"server"` + // TLSServerName is passed to the server for SNI and is used in the client to + // check server certificates against. If ServerName is empty, the hostname + // used to contact the server is used. + // +optional + TLSServerName string `json:"tls-server-name,omitempty"` + // InsecureSkipTLSVerify skips the validity check for the server's certificate. + // This will make your HTTPS connections insecure. + // +optional + InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` + // CAData contains PEM-encoded certificate authority certificates. + // If empty, system roots should be used. + // +listType=atomic + // +optional + CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` + // ProxyURL is the URL to the proxy to be used for all requests to this + // cluster. + // +optional + ProxyURL string `json:"proxy-url,omitempty"` + // Config holds additional config data that is specific to the exec + // plugin with regards to the cluster being authenticated to. + // + // This data is sourced from the clientcmd Cluster object's + // extensions[client.authentication.k8s.io/exec] field: + // + // clusters: + // - name: my-cluster + // cluster: + // ... + // extensions: + // - name: client.authentication.k8s.io/exec # reserved extension name for per cluster exec config + // extension: + // audience: 06e3fbd18de8 # arbitrary config + // + // In some environments, the user config may be exactly the same across many clusters + // (i.e. call this exec plugin) minus some details that are specific to each cluster + // such as the audience. This field allows the per cluster config to be directly + // specified with the cluster info. Using this field to store secret data is not + // recommended as one of the prime benefits of exec plugins is that no secrets need + // to be stored directly in the kubeconfig. + // +optional + Config runtime.RawExtension `json:"config,omitempty"` } diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go index 0e533e465..90f7935fe 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go @@ -36,6 +36,16 @@ func init() { // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*Cluster)(nil), (*clientauthentication.Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Cluster_To_clientauthentication_Cluster(a.(*Cluster), b.(*clientauthentication.Cluster), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*clientauthentication.Cluster)(nil), (*Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clientauthentication_Cluster_To_v1beta1_Cluster(a.(*clientauthentication.Cluster), b.(*Cluster), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*ExecCredential)(nil), (*clientauthentication.ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(a.(*ExecCredential), b.(*clientauthentication.ExecCredential), scope) }); err != nil { @@ -69,6 +79,40 @@ func RegisterConversions(s *runtime.Scheme) error { return nil } +func autoConvert_v1beta1_Cluster_To_clientauthentication_Cluster(in *Cluster, out *clientauthentication.Cluster, s conversion.Scope) error { + out.Server = in.Server + out.TLSServerName = in.TLSServerName + out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify + out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) + out.ProxyURL = in.ProxyURL + if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Config, &out.Config, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_Cluster_To_clientauthentication_Cluster is an autogenerated conversion function. +func Convert_v1beta1_Cluster_To_clientauthentication_Cluster(in *Cluster, out *clientauthentication.Cluster, s conversion.Scope) error { + return autoConvert_v1beta1_Cluster_To_clientauthentication_Cluster(in, out, s) +} + +func autoConvert_clientauthentication_Cluster_To_v1beta1_Cluster(in *clientauthentication.Cluster, out *Cluster, s conversion.Scope) error { + out.Server = in.Server + out.TLSServerName = in.TLSServerName + out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify + out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) + out.ProxyURL = in.ProxyURL + if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Config, &out.Config, s); err != nil { + return err + } + return nil +} + +// Convert_clientauthentication_Cluster_To_v1beta1_Cluster is an autogenerated conversion function. +func Convert_clientauthentication_Cluster_To_v1beta1_Cluster(in *clientauthentication.Cluster, out *Cluster, s conversion.Scope) error { + return autoConvert_clientauthentication_Cluster_To_v1beta1_Cluster(in, out, s) +} + func autoConvert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error { if err := Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -96,6 +140,15 @@ func Convert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(in *c } func autoConvert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error { + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(clientauthentication.Cluster) + if err := Convert_v1beta1_Cluster_To_clientauthentication_Cluster(*in, *out, s); err != nil { + return err + } + } else { + out.Cluster = nil + } return nil } @@ -107,6 +160,15 @@ func Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSp func autoConvert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { // WARNING: in.Response requires manual conversion: does not exist in peer-type // WARNING: in.Interactive requires manual conversion: does not exist in peer-type + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(Cluster) + if err := Convert_clientauthentication_Cluster_To_v1beta1_Cluster(*in, *out, s); err != nil { + return err + } + } else { + out.Cluster = nil + } return nil } diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go index 736b8cf00..3a72ece0c 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go @@ -24,11 +24,33 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + if in.CertificateAuthorityData != nil { + in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + in.Config.DeepCopyInto(&out.Config) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExecCredential) DeepCopyInto(out *ExecCredential) { *out = *in out.TypeMeta = in.TypeMeta - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) if in.Status != nil { in, out := &in.Status, &out.Status *out = new(ExecCredentialStatus) @@ -58,6 +80,11 @@ func (in *ExecCredential) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) { *out = *in + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(Cluster) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go index c568a6fc8..045b07f5b 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go @@ -24,6 +24,30 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + if in.CertificateAuthorityData != nil { + in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.Config != nil { + out.Config = in.Config.DeepCopyObject() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExecCredential) DeepCopyInto(out *ExecCredential) { *out = *in @@ -63,6 +87,11 @@ func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) { *out = new(Response) (*in).DeepCopyInto(*out) } + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(Cluster) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go index 627bb2de9..af21c4995 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go @@ -87,8 +87,15 @@ func newCache() *cache { var spewConfig = &spew.ConfigState{DisableMethods: true, Indent: " "} -func cacheKey(c *api.ExecConfig) string { - return spewConfig.Sprint(c) +func cacheKey(conf *api.ExecConfig, cluster *clientauthentication.Cluster) string { + key := struct { + conf *api.ExecConfig + cluster *clientauthentication.Cluster + }{ + conf: conf, + cluster: cluster, + } + return spewConfig.Sprint(key) } type cache struct { @@ -155,12 +162,12 @@ func (s *sometimes) Do(f func()) { } // GetAuthenticator returns an exec-based plugin for providing client credentials. -func GetAuthenticator(config *api.ExecConfig) (*Authenticator, error) { - return newAuthenticator(globalCache, config) +func GetAuthenticator(config *api.ExecConfig, cluster *clientauthentication.Cluster) (*Authenticator, error) { + return newAuthenticator(globalCache, config, cluster) } -func newAuthenticator(c *cache, config *api.ExecConfig) (*Authenticator, error) { - key := cacheKey(config) +func newAuthenticator(c *cache, config *api.ExecConfig, cluster *clientauthentication.Cluster) (*Authenticator, error) { + key := cacheKey(config, cluster) if a, ok := c.get(key); ok { return a, nil } @@ -171,9 +178,11 @@ func newAuthenticator(c *cache, config *api.ExecConfig) (*Authenticator, error) } a := &Authenticator{ - cmd: config.Command, - args: config.Args, - group: gv, + cmd: config.Command, + args: config.Args, + group: gv, + cluster: cluster, + provideClusterInfo: config.ProvideClusterInfo, installHint: config.InstallHint, sometimes: &sometimes{ @@ -200,10 +209,12 @@ func newAuthenticator(c *cache, config *api.ExecConfig) (*Authenticator, error) // The plugin input and output are defined by the API group client.authentication.k8s.io. type Authenticator struct { // Set by the config - cmd string - args []string - group schema.GroupVersion - env []string + cmd string + args []string + group schema.GroupVersion + env []string + cluster *clientauthentication.Cluster + provideClusterInfo bool // Used to avoid log spew by rate limiting install hint printing. We didn't do // this by interval based rate limiting alone since that way may have prevented @@ -230,8 +241,8 @@ type Authenticator struct { } type credentials struct { - token string - cert *tls.Certificate + token string `datapolicy:"token"` + cert *tls.Certificate `datapolicy:"secret-key"` } // UpdateTransportConfig updates the transport.Config to use credentials @@ -367,19 +378,16 @@ func (a *Authenticator) refreshCredsLocked(r *clientauthentication.Response) err Interactive: a.interactive, }, } + if a.provideClusterInfo { + cred.Spec.Cluster = a.cluster + } env := append(a.environ(), a.env...) - if a.group == v1alpha1.SchemeGroupVersion { - // Input spec disabled for beta due to lack of use. Possibly re-enable this later if - // someone wants it back. - // - // See: https://github.com/kubernetes/kubernetes/issues/61796 - data, err := runtime.Encode(codecs.LegacyCodec(a.group), cred) - if err != nil { - return fmt.Errorf("encode ExecCredentials: %v", err) - } - env = append(env, fmt.Sprintf("%s=%s", execInfoEnv, data)) + data, err := runtime.Encode(codecs.LegacyCodec(a.group), cred) + if err != nil { + return fmt.Errorf("encode ExecCredentials: %v", err) } + env = append(env, fmt.Sprintf("%s=%s", execInfoEnv, data)) stdout := &bytes.Buffer{} cmd := exec.Command(a.cmd, a.args...) diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go index 6e50eef51..3735750bb 100644 --- a/vendor/k8s.io/client-go/rest/config.go +++ b/vendor/k8s.io/client-go/rest/config.go @@ -65,12 +65,12 @@ type Config struct { // Server requires Basic authentication Username string - Password string + Password string `datapolicy:"password"` // Server requires Bearer authentication. This client will not attempt to use // refresh tokens for an OAuth2 flow. // TODO: demonstrate an OAuth2 compatible client. - BearerToken string + BearerToken string `datapolicy:"token"` // Path to a file containing a BearerToken. // If set, the contents are periodically read. @@ -125,6 +125,7 @@ type Config struct { // WarningHandler handles warnings in server responses. // If not set, the default warning handler is used. + // See documentation for SetDefaultWarningHandler() for details. WarningHandler WarningHandler // The maximum length of time to wait before giving up on a server request. A value of zero means no timeout. @@ -133,7 +134,7 @@ type Config struct { // Dial specifies the dial function for creating unencrypted TCP connections. Dial func(ctx context.Context, network, address string) (net.Conn, error) - // Proxy is the the proxy func to be used for all requests made by this + // Proxy is the proxy func to be used for all requests made by this // transport. If Proxy is nil, http.ProxyFromEnvironment is used. If Proxy // returns a nil *URL, no proxy is used. // @@ -159,6 +160,15 @@ func (sanitizedAuthConfigPersister) String() string { return "rest.AuthProviderConfigPersister(--- REDACTED ---)" } +type sanitizedObject struct{ runtime.Object } + +func (sanitizedObject) GoString() string { + return "runtime.Object(--- REDACTED ---)" +} +func (sanitizedObject) String() string { + return "runtime.Object(--- REDACTED ---)" +} + // GoString implements fmt.GoStringer and sanitizes sensitive fields of Config // to prevent accidental leaking via logs. func (c *Config) GoString() string { @@ -182,7 +192,9 @@ func (c *Config) String() string { if cc.AuthConfigPersister != nil { cc.AuthConfigPersister = sanitizedAuthConfigPersister{cc.AuthConfigPersister} } - + if cc.ExecProvider != nil && cc.ExecProvider.Config != nil { + cc.ExecProvider.Config = sanitizedObject{Object: cc.ExecProvider.Config} + } return fmt.Sprintf("%#v", cc) } @@ -203,7 +215,7 @@ type TLSClientConfig struct { // Server should be accessed without verifying the TLS certificate. For testing only. Insecure bool // ServerName is passed to the server for SNI and is used in the client to check server - // ceritificates against. If ServerName is empty, the hostname used to contact the + // certificates against. If ServerName is empty, the hostname used to contact the // server is used. ServerName string @@ -219,7 +231,7 @@ type TLSClientConfig struct { CertData []byte // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). // KeyData takes precedence over KeyFile - KeyData []byte + KeyData []byte `datapolicy:"security-key"` // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). // CAData takes precedence over CAFile CAData []byte @@ -587,7 +599,7 @@ func AnonymousClientConfig(config *Config) *Config { // CopyConfig returns a copy of the given config func CopyConfig(config *Config) *Config { - return &Config{ + c := &Config{ Host: config.Host, APIPath: config.APIPath, ContentConfig: config.ContentConfig, @@ -626,4 +638,8 @@ func CopyConfig(config *Config) *Config { Dial: config.Dial, Proxy: config.Proxy, } + if config.ExecProvider != nil && config.ExecProvider.Config != nil { + c.ExecProvider.Config = config.ExecProvider.Config.DeepCopyObject() + } + return c } diff --git a/vendor/k8s.io/client-go/rest/exec.go b/vendor/k8s.io/client-go/rest/exec.go new file mode 100644 index 000000000..5f3b43c55 --- /dev/null +++ b/vendor/k8s.io/client-go/rest/exec.go @@ -0,0 +1,85 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "fmt" + "net/http" + "net/url" + + "k8s.io/client-go/pkg/apis/clientauthentication" + clientauthenticationapi "k8s.io/client-go/pkg/apis/clientauthentication" +) + +// This file contains Config logic related to exec credential plugins. + +// ConfigToExecCluster creates a clientauthenticationapi.Cluster with the corresponding fields from +// the provided Config. +func ConfigToExecCluster(config *Config) (*clientauthenticationapi.Cluster, error) { + caData, err := dataFromSliceOrFile(config.CAData, config.CAFile) + if err != nil { + return nil, fmt.Errorf("failed to load CA bundle for execProvider: %v", err) + } + + var proxyURL string + if config.Proxy != nil { + req, err := http.NewRequest("", config.Host, nil) + if err != nil { + return nil, fmt.Errorf("failed to create proxy URL request for execProvider: %w", err) + } + url, err := config.Proxy(req) + if err != nil { + return nil, fmt.Errorf("failed to get proxy URL for execProvider: %w", err) + } + if url != nil { + proxyURL = url.String() + } + } + + return &clientauthentication.Cluster{ + Server: config.Host, + TLSServerName: config.ServerName, + InsecureSkipTLSVerify: config.Insecure, + CertificateAuthorityData: caData, + ProxyURL: proxyURL, + Config: config.ExecProvider.Config, + }, nil +} + +// ExecClusterToConfig creates a Config with the corresponding fields from the provided +// clientauthenticationapi.Cluster. The returned Config will be anonymous (i.e., it will not have +// any authentication-related fields set). +func ExecClusterToConfig(cluster *clientauthentication.Cluster) (*Config, error) { + var proxy func(*http.Request) (*url.URL, error) + if cluster.ProxyURL != "" { + proxyURL, err := url.Parse(cluster.ProxyURL) + if err != nil { + return nil, fmt.Errorf("cannot parse proxy URL: %w", err) + } + proxy = http.ProxyURL(proxyURL) + } + + return &Config{ + Host: cluster.Server, + TLSClientConfig: TLSClientConfig{ + Insecure: cluster.InsecureSkipTLSVerify, + ServerName: cluster.TLSServerName, + CAData: cluster.CertificateAuthorityData, + }, + Proxy: proxy, + }, nil +} diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index 0ed7def73..1ccc0dafe 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -511,13 +511,23 @@ func (r Request) finalURLTemplate() url.URL { } r.params = newParams url := r.URL() - segments := strings.Split(r.URL().Path, "/") + + segments := strings.Split(url.Path, "/") groupIndex := 0 index := 0 - if r.URL() != nil && r.c.base != nil && strings.Contains(r.URL().Path, r.c.base.Path) { - groupIndex += len(strings.Split(r.c.base.Path, "/")) + trimmedBasePath := "" + if url != nil && r.c.base != nil && strings.Contains(url.Path, r.c.base.Path) { + p := strings.TrimPrefix(url.Path, r.c.base.Path) + if !strings.HasPrefix(p, "/") { + p = "/" + p + } + // store the base path that we have trimmed so we can append it + // before returning the URL + trimmedBasePath = r.c.base.Path + segments = strings.Split(p, "/") + groupIndex = 1 } - if groupIndex >= len(segments) { + if len(segments) <= 2 { return *url } @@ -563,7 +573,7 @@ func (r Request) finalURLTemplate() url.URL { segments[index+3] = "{name}" } } - url.Path = path.Join(segments...) + url.Path = path.Join(trimmedBasePath, path.Join(segments...)) return *url } @@ -638,7 +648,7 @@ func (b *throttledLogger) attemptToLog() (klog.Level, bool) { return -1, false } -// Infof will write a log message at each logLevel specified by the reciever's throttleSettings +// Infof will write a log message at each logLevel specified by the receiver's throttleSettings // as long as it hasn't written a log message more recently than minLogInterval. func (b *throttledLogger) Infof(message string, args ...interface{}) { if logLevel, ok := b.attemptToLog(); ok { diff --git a/vendor/k8s.io/client-go/rest/transport.go b/vendor/k8s.io/client-go/rest/transport.go index 450edc6ed..87792750a 100644 --- a/vendor/k8s.io/client-go/rest/transport.go +++ b/vendor/k8s.io/client-go/rest/transport.go @@ -21,6 +21,7 @@ import ( "errors" "net/http" + "k8s.io/client-go/pkg/apis/clientauthentication" "k8s.io/client-go/plugin/pkg/client/auth/exec" "k8s.io/client-go/transport" ) @@ -94,7 +95,15 @@ func (c *Config) TransportConfig() (*transport.Config, error) { } if c.ExecProvider != nil { - provider, err := exec.GetAuthenticator(c.ExecProvider) + var cluster *clientauthentication.Cluster + if c.ExecProvider.ProvideClusterInfo { + var err error + cluster, err = ConfigToExecCluster(c) + if err != nil { + return nil, err + } + } + provider, err := exec.GetAuthenticator(c.ExecProvider, cluster) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/rest/warnings.go b/vendor/k8s.io/client-go/rest/warnings.go index 45c1c3b2c..18476f5ff 100644 --- a/vendor/k8s.io/client-go/rest/warnings.go +++ b/vendor/k8s.io/client-go/rest/warnings.go @@ -38,8 +38,11 @@ var ( defaultWarningHandlerLock sync.RWMutex ) -// SetDefaultWarningHandler sets the default handler client uses when warning headers are encountered. -// By default, warnings are printed to stderr. +// SetDefaultWarningHandler sets the default handler clients use when warning headers are encountered. +// By default, warnings are logged. Several built-in implementations are provided: +// - NoWarnings suppresses warnings. +// - WarningLogger logs warnings. +// - NewWarningWriter() outputs warnings to the provided writer. func SetDefaultWarningHandler(l WarningHandler) { defaultWarningHandlerLock.Lock() defer defaultWarningHandlerLock.Unlock() diff --git a/vendor/k8s.io/client-go/tools/auth/clientauth.go b/vendor/k8s.io/client-go/tools/auth/clientauth.go index c34172677..4c24f7997 100644 --- a/vendor/k8s.io/client-go/tools/auth/clientauth.go +++ b/vendor/k8s.io/client-go/tools/auth/clientauth.go @@ -75,11 +75,11 @@ import ( // to be read/written from a file as a JSON object. type Info struct { User string - Password string + Password string `datapolicy:"password"` CAFile string CertFile string KeyFile string - BearerToken string + BearerToken string `datapolicy:"token"` Insecure *bool } diff --git a/vendor/k8s.io/client-go/tools/cache/OWNERS b/vendor/k8s.io/client-go/tools/cache/OWNERS index 7bbe63542..9d0a18771 100644 --- a/vendor/k8s.io/client-go/tools/cache/OWNERS +++ b/vendor/k8s.io/client-go/tools/cache/OWNERS @@ -38,6 +38,5 @@ reviewers: - resouer - jessfraz - mfojtik -- mqliang - sdminonne - ncdc diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go index 3ad9b53bb..684d1a8d3 100644 --- a/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -72,6 +72,9 @@ type Config struct { // Called whenever the ListAndWatch drops the connection with an error. WatchErrorHandler WatchErrorHandler + + // WatchListPageSize is the requested chunk size of initial and relist watch lists. + WatchListPageSize int64 } // ShouldResyncFunc is a type of function that indicates if a reflector should perform a @@ -134,6 +137,7 @@ func (c *controller) Run(stopCh <-chan struct{}) { c.config.FullResyncPeriod, ) r.ShouldResync = c.config.ShouldResync + r.WatchListPageSize = c.config.WatchListPageSize r.clock = c.clock if c.config.WatchErrorHandler != nil { r.watchErrorHandler = c.config.WatchErrorHandler diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index 2774f4f21..148b478d5 100644 --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -145,7 +145,7 @@ func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO { // DeltaFIFO's Pop(), Get(), and GetByKey() methods return // interface{} to satisfy the Store/Queue interfaces, but they // will always return an object of type Deltas. List() returns -// the newest objects currently in the FIFO. +// the newest object from each accumulator in the FIFO. // // A DeltaFIFO's knownObjects KeyListerGetter provides the abilities // to list Store keys and to get objects by Store key. The objects in @@ -161,12 +161,13 @@ type DeltaFIFO struct { lock sync.RWMutex cond sync.Cond - // `items` maps keys to Deltas. - // `queue` maintains FIFO order of keys for consumption in Pop(). - // We maintain the property that keys in the `items` and `queue` are - // strictly 1:1 mapping, and that all Deltas in `items` should have - // at least one Delta. + // `items` maps a key to a Deltas. + // Each such Deltas has at least one Delta. items map[string]Deltas + + // `queue` maintains FIFO order of keys for consumption in Pop(). + // There are no duplicates in `queue`. + // A key is in `queue` if and only if it is in `items`. queue []string // populated is true if the first batch of items inserted by Replace() has been populated @@ -376,8 +377,8 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err if err != nil { return KeyError{obj, err} } - - newDeltas := append(f.items[id], Delta{actionType, obj}) + oldDeltas := f.items[id] + newDeltas := append(oldDeltas, Delta{actionType, obj}) newDeltas = dedupDeltas(newDeltas) if len(newDeltas) > 0 { @@ -389,10 +390,14 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err } else { // This never happens, because dedupDeltas never returns an empty list // when given a non-empty list (as it is here). - // But if somehow it ever does return an empty list, then - // We need to remove this from our map (extra items in the queue are - // ignored if they are not in the map). - delete(f.items, id) + // If somehow it happens anyway, deal with it but complain. + if oldDeltas == nil { + klog.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; ignoring", id, oldDeltas, obj) + return nil + } + klog.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; breaking invariant by storing empty Deltas", id, oldDeltas, obj) + f.items[id] = newDeltas + return fmt.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; broke DeltaFIFO invariant by storing empty Deltas", id, oldDeltas, obj) } return nil } @@ -459,7 +464,7 @@ func (f *DeltaFIFO) IsClosed() bool { return f.closed } -// Pop blocks until an item is added to the queue, and then returns it. If +// Pop blocks until the queue has some items, and then returns one. If // multiple items are ready, they are returned in the order in which they were // added/updated. The item is removed from the queue (and the store) before it // is returned, so if you don't successfully process it, you need to add it back @@ -494,7 +499,8 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { } item, ok := f.items[id] if !ok { - // Item may have been deleted subsequently. + // This should never happen + klog.Errorf("Inconceivable! %q was in f.queue but not f.items; ignoring.", id) continue } delete(f.items, id) diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index e995abe25..360d7304b 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -69,6 +69,8 @@ type Reflector struct { // backoff manages backoff of ListWatch backoffManager wait.BackoffManager + // initConnBackoffManager manages backoff the initial connection with the Watch calll of ListAndWatch. + initConnBackoffManager wait.BackoffManager resyncPeriod time.Duration // ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked @@ -99,6 +101,15 @@ type Reflector struct { watchErrorHandler WatchErrorHandler } +// ResourceVersionUpdater is an interface that allows store implementation to +// track the current resource version of the reflector. This is especially +// important if storage bookmarks are enabled. +type ResourceVersionUpdater interface { + // UpdateResourceVersion is called each time current resource version of the reflector + // is updated. + UpdateResourceVersion(resourceVersion string) +} + // The WatchErrorHandler is called whenever ListAndWatch drops the // connection with an error. After calling this handler, the informer // will backoff and retry. @@ -166,10 +177,11 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, // We used to make the call every 1sec (1 QPS), the goal here is to achieve ~98% traffic reduction when // API server is not healthy. With these parameters, backoff will stop at [30,60) sec interval which is // 0.22 QPS. If we don't backoff for 2min, assume API server is healthy and we reset the backoff. - backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock), - resyncPeriod: resyncPeriod, - clock: realClock, - watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler), + backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock), + initConnBackoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock), + resyncPeriod: resyncPeriod, + clock: realClock, + watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler), } r.setExpectedType(expectedType) return r @@ -404,9 +416,9 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { // If this is "connection refused" error, it means that most likely apiserver is not responsive. // It doesn't make sense to re-list all objects because most likely we will be able to restart // watch where we ended. - // If that's the case wait and resend watch request. + // If that's the case begin exponentially backing off and resend watch request. if utilnet.IsConnectionRefused(err) { - time.Sleep(time.Second) + <-r.initConnBackoffManager.Backoff().C() continue } return err @@ -504,6 +516,9 @@ loop: } *resourceVersion = newResourceVersion r.setLastSyncResourceVersion(newResourceVersion) + if rvu, ok := r.store.(ResourceVersionUpdater); ok { + rvu.UpdateResourceVersion(newResourceVersion) + } eventCount++ } } diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index f48989763..3a3f538a1 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -485,13 +485,13 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv if resyncPeriod > 0 { if resyncPeriod < minimumResyncPeriod { - klog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod) + klog.Warningf("resyncPeriod %v is too small. Changing it to the minimum allowed value of %v", resyncPeriod, minimumResyncPeriod) resyncPeriod = minimumResyncPeriod } if resyncPeriod < s.resyncCheckPeriod { if s.started { - klog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod) + klog.Warningf("resyncPeriod %v is smaller than resyncCheckPeriod %v and the informer has already started. Changing it to %v", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod) resyncPeriod = s.resyncCheckPeriod } else { // if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go index 829424dcf..24f469236 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go @@ -114,10 +114,10 @@ type AuthInfo struct { ClientKey string `json:"client-key,omitempty"` // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey // +optional - ClientKeyData []byte `json:"client-key-data,omitempty"` + ClientKeyData []byte `json:"client-key-data,omitempty" datapolicy:"security-key"` // Token is the bearer token for authentication to the kubernetes cluster. // +optional - Token string `json:"token,omitempty"` + Token string `json:"token,omitempty" datapolicy:"token"` // TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence. // +optional TokenFile string `json:"tokenFile,omitempty"` @@ -135,7 +135,7 @@ type AuthInfo struct { Username string `json:"username,omitempty"` // Password is the password for basic authentication to the kubernetes cluster. // +optional - Password string `json:"password,omitempty"` + Password string `json:"password,omitempty" datapolicy:"password"` // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. // +optional AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` @@ -215,6 +215,36 @@ type ExecConfig struct { // present. For example, `brew install foo-cli` might be a good InstallHint for // foo-cli on Mac OS systems. InstallHint string `json:"installHint,omitempty"` + + // ProvideClusterInfo determines whether or not to provide cluster information, + // which could potentially contain very large CA data, to this exec plugin as a + // part of the KUBERNETES_EXEC_INFO environment variable. By default, it is set + // to false. Package k8s.io/client-go/tools/auth/exec provides helper methods for + // reading this environment variable. + ProvideClusterInfo bool `json:"provideClusterInfo"` + + // Config holds additional config data that is specific to the exec + // plugin with regards to the cluster being authenticated to. + // + // This data is sourced from the clientcmd Cluster object's extensions[exec] field: + // + // clusters: + // - name: my-cluster + // cluster: + // ... + // extensions: + // - name: client.authentication.k8s.io/exec # reserved extension name for per cluster exec config + // extension: + // audience: 06e3fbd18de8 # arbitrary config + // + // In some environments, the user config may be exactly the same across many clusters + // (i.e. call this exec plugin) minus some details that are specific to each cluster + // such as the audience. This field allows the per cluster config to be directly + // specified with the cluster info. Using this field to store secret data is not + // recommended as one of the prime benefits of exec plugins is that no secrets need + // to be stored directly in the kubeconfig. + // +k8s:conversion-gen=false + Config runtime.Object } var _ fmt.Stringer = new(ExecConfig) @@ -237,7 +267,11 @@ func (c ExecConfig) String() string { if len(c.Env) > 0 { env = "[]ExecEnvVar{--- REDACTED ---}" } - return fmt.Sprintf("api.AuthProviderConfig{Command: %q, Args: %#v, Env: %s, APIVersion: %q}", c.Command, args, env, c.APIVersion) + config := "runtime.Object(nil)" + if c.Config != nil { + config = "runtime.Object(--- REDACTED ---)" + } + return fmt.Sprintf("api.ExecConfig{Command: %q, Args: %#v, Env: %s, APIVersion: %q, ProvideClusterInfo: %t, Config: %s}", c.Command, args, env, c.APIVersion, c.ProvideClusterInfo, config) } // ExecEnvVar is used for setting environment variables when executing an exec-based diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go index 0395f860f..8c29b39c1 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go @@ -104,10 +104,10 @@ type AuthInfo struct { ClientKey string `json:"client-key,omitempty"` // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey // +optional - ClientKeyData []byte `json:"client-key-data,omitempty"` + ClientKeyData []byte `json:"client-key-data,omitempty" datapolicy:"security-key"` // Token is the bearer token for authentication to the kubernetes cluster. // +optional - Token string `json:"token,omitempty"` + Token string `json:"token,omitempty" datapolicy:"token"` // TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence. // +optional TokenFile string `json:"tokenFile,omitempty"` @@ -125,7 +125,7 @@ type AuthInfo struct { Username string `json:"username,omitempty"` // Password is the password for basic authentication to the kubernetes cluster. // +optional - Password string `json:"password,omitempty"` + Password string `json:"password,omitempty" datapolicy:"password"` // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. // +optional AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` @@ -214,6 +214,13 @@ type ExecConfig struct { // present. For example, `brew install foo-cli` might be a good InstallHint for // foo-cli on Mac OS systems. InstallHint string `json:"installHint,omitempty"` + + // ProvideClusterInfo determines whether or not to provide cluster information, + // which could potentially contain very large CA data, to this exec plugin as a + // part of the KUBERNETES_EXEC_INFO environment variable. By default, it is set + // to false. Package k8s.io/client-go/tools/auth/exec provides helper methods for + // reading this environment variable. + ProvideClusterInfo bool `json:"provideClusterInfo"` } // ExecEnvVar is used for setting environment variables when executing an exec-based diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go index bf9eaeca3..26e96529d 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go @@ -171,7 +171,15 @@ func autoConvert_v1_AuthInfo_To_api_AuthInfo(in *AuthInfo, out *api.AuthInfo, s out.Username = in.Username out.Password = in.Password out.AuthProvider = (*api.AuthProviderConfig)(unsafe.Pointer(in.AuthProvider)) - out.Exec = (*api.ExecConfig)(unsafe.Pointer(in.Exec)) + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(api.ExecConfig) + if err := Convert_v1_ExecConfig_To_api_ExecConfig(*in, *out, s); err != nil { + return err + } + } else { + out.Exec = nil + } if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil { return err } @@ -197,7 +205,15 @@ func autoConvert_api_AuthInfo_To_v1_AuthInfo(in *api.AuthInfo, out *AuthInfo, s out.Username = in.Username out.Password = in.Password out.AuthProvider = (*AuthProviderConfig)(unsafe.Pointer(in.AuthProvider)) - out.Exec = (*ExecConfig)(unsafe.Pointer(in.Exec)) + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecConfig) + if err := Convert_api_ExecConfig_To_v1_ExecConfig(*in, *out, s); err != nil { + return err + } + } else { + out.Exec = nil + } if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil { return err } @@ -359,6 +375,7 @@ func autoConvert_v1_ExecConfig_To_api_ExecConfig(in *ExecConfig, out *api.ExecCo out.Env = *(*[]api.ExecEnvVar)(unsafe.Pointer(&in.Env)) out.APIVersion = in.APIVersion out.InstallHint = in.InstallHint + out.ProvideClusterInfo = in.ProvideClusterInfo return nil } @@ -373,6 +390,8 @@ func autoConvert_api_ExecConfig_To_v1_ExecConfig(in *api.ExecConfig, out *ExecCo out.Env = *(*[]ExecEnvVar)(unsafe.Pointer(&in.Env)) out.APIVersion = in.APIVersion out.InstallHint = in.InstallHint + out.ProvideClusterInfo = in.ProvideClusterInfo + // INFO: in.Config opted out of conversion generation return nil } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go index 3240a7a98..a04de6260 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go @@ -267,6 +267,9 @@ func (in *ExecConfig) DeepCopyInto(out *ExecConfig) { *out = make([]ExecEnvVar, len(*in)) copy(*out, *in) } + if in.Config != nil { + out.Config = in.Config.DeepCopyObject() + } return } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go index 690afce0c..9e1cd64a0 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go @@ -34,6 +34,11 @@ import ( "github.com/imdario/mergo" ) +const ( + // clusterExtensionKey is reserved in the cluster extensions list for exec plugin config. + clusterExtensionKey = "client.authentication.k8s.io/exec" +) + var ( // ClusterDefaults has the same behavior as the old EnvVar and DefaultCluster fields // DEPRECATED will be replaced @@ -72,7 +77,7 @@ type PersistAuthProviderConfigForUser func(user string) restclient.AuthProviderC type promptedCredentials struct { username string - password string + password string `datapolicy:"password"` } // DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information @@ -189,7 +194,7 @@ func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) { authInfoName, _ := config.getAuthInfoName() persister = PersisterForUser(config.configAccess, authInfoName) } - userAuthPartialConfig, err := config.getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader, persister) + userAuthPartialConfig, err := config.getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader, persister, configClusterInfo) if err != nil { return nil, err } @@ -232,7 +237,7 @@ func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, // 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) // 3. if there is not enough information to identify the user, load try the ~/.kubernetes_auth file // 4. if there is not enough information to identify the user, prompt if possible -func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister) (*restclient.Config, error) { +func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) { mergedConfig := &restclient.Config{} // blindly overwrite existing values based on precedence @@ -271,6 +276,7 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI if configAuthInfo.Exec != nil { mergedConfig.ExecProvider = configAuthInfo.Exec mergedConfig.ExecProvider.InstallHint = cleanANSIEscapeCodes(mergedConfig.ExecProvider.InstallHint) + mergedConfig.ExecProvider.Config = configClusterInfo.Extensions[clusterExtensionKey] } // if there still isn't enough information to authenticate the user, try prompting diff --git a/vendor/k8s.io/client-go/tools/clientcmd/config.go b/vendor/k8s.io/client-go/tools/clientcmd/config.go index 5f1660bf9..a7eae66bf 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/config.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/config.go @@ -83,10 +83,13 @@ func (o *PathOptions) GetEnvVarFiles() []string { } func (o *PathOptions) GetLoadingPrecedence() []string { + if o.IsExplicitFile() { + return []string{o.GetExplicitFile()} + } + if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { return envVarFiles } - return []string{o.GlobalFile} } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/loader.go b/vendor/k8s.io/client-go/tools/clientcmd/loader.go index b0672291a..901ed50c4 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/loader.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/loader.go @@ -304,6 +304,10 @@ func (rules *ClientConfigLoadingRules) Migrate() error { // GetLoadingPrecedence implements ConfigAccess func (rules *ClientConfigLoadingRules) GetLoadingPrecedence() []string { + if len(rules.ExplicitPath) > 0 { + return []string{rules.ExplicitPath} + } + return rules.Precedence } diff --git a/vendor/k8s.io/client-go/tools/events/OWNERS b/vendor/k8s.io/client-go/tools/events/OWNERS new file mode 100644 index 000000000..fbd0a6a01 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/events/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- yastij +- wojtek-t +reviewers: +- yastij +- wojtek-t diff --git a/vendor/k8s.io/client-go/tools/events/doc.go b/vendor/k8s.io/client-go/tools/events/doc.go new file mode 100644 index 000000000..795582b02 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/events/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package events has all client logic for recording and reporting +// "k8s.io/api/events/v1beta1".Event events. +package events // import "k8s.io/client-go/tools/events" diff --git a/vendor/k8s.io/client-go/tools/events/event_broadcaster.go b/vendor/k8s.io/client-go/tools/events/event_broadcaster.go new file mode 100644 index 000000000..bde888ee9 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/events/event_broadcaster.go @@ -0,0 +1,384 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + "context" + "fmt" + "os" + "sync" + "time" + + corev1 "k8s.io/api/core/v1" + eventsv1 "k8s.io/api/events/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apimachinery/pkg/util/json" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + typedv1core "k8s.io/client-go/kubernetes/typed/core/v1" + typedeventsv1 "k8s.io/client-go/kubernetes/typed/events/v1" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/tools/record/util" + "k8s.io/klog/v2" +) + +const ( + maxTriesPerEvent = 12 + finishTime = 6 * time.Minute + refreshTime = 30 * time.Minute + maxQueuedEvents = 1000 +) + +var defaultSleepDuration = 10 * time.Second + +// TODO: validate impact of copying and investigate hashing +type eventKey struct { + action string + reason string + reportingController string + regarding corev1.ObjectReference + related corev1.ObjectReference +} + +type eventBroadcasterImpl struct { + *watch.Broadcaster + mu sync.Mutex + eventCache map[eventKey]*eventsv1.Event + sleepDuration time.Duration + sink EventSink +} + +// EventSinkImpl wraps EventsV1Interface to implement EventSink. +// TODO: this makes it easier for testing purpose and masks the logic of performing API calls. +// Note that rollbacking to raw clientset should also be transparent. +type EventSinkImpl struct { + Interface typedeventsv1.EventsV1Interface +} + +// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. +func (e *EventSinkImpl) Create(event *eventsv1.Event) (*eventsv1.Event, error) { + if event.Namespace == "" { + return nil, fmt.Errorf("can't create an event with empty namespace") + } + return e.Interface.Events(event.Namespace).Create(context.TODO(), event, metav1.CreateOptions{}) +} + +// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. +func (e *EventSinkImpl) Update(event *eventsv1.Event) (*eventsv1.Event, error) { + if event.Namespace == "" { + return nil, fmt.Errorf("can't update an event with empty namespace") + } + return e.Interface.Events(event.Namespace).Update(context.TODO(), event, metav1.UpdateOptions{}) +} + +// Patch applies the patch and returns the patched event, and an error, if there is any. +func (e *EventSinkImpl) Patch(event *eventsv1.Event, data []byte) (*eventsv1.Event, error) { + if event.Namespace == "" { + return nil, fmt.Errorf("can't patch an event with empty namespace") + } + return e.Interface.Events(event.Namespace).Patch(context.TODO(), event.Name, types.StrategicMergePatchType, data, metav1.PatchOptions{}) +} + +// NewBroadcaster Creates a new event broadcaster. +func NewBroadcaster(sink EventSink) EventBroadcaster { + return newBroadcaster(sink, defaultSleepDuration, map[eventKey]*eventsv1.Event{}) +} + +// NewBroadcasterForTest Creates a new event broadcaster for test purposes. +func newBroadcaster(sink EventSink, sleepDuration time.Duration, eventCache map[eventKey]*eventsv1.Event) EventBroadcaster { + return &eventBroadcasterImpl{ + Broadcaster: watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), + eventCache: eventCache, + sleepDuration: sleepDuration, + sink: sink, + } +} + +func (e *eventBroadcasterImpl) Shutdown() { + e.Broadcaster.Shutdown() +} + +// refreshExistingEventSeries refresh events TTL +func (e *eventBroadcasterImpl) refreshExistingEventSeries() { + // TODO: Investigate whether lock contention won't be a problem + e.mu.Lock() + defer e.mu.Unlock() + for isomorphicKey, event := range e.eventCache { + if event.Series != nil { + if recordedEvent, retry := recordEvent(e.sink, event); !retry { + if recordedEvent != nil { + e.eventCache[isomorphicKey] = recordedEvent + } + } + } + } +} + +// finishSeries checks if a series has ended and either: +// - write final count to the apiserver +// - delete a singleton event (i.e. series field is nil) from the cache +func (e *eventBroadcasterImpl) finishSeries() { + // TODO: Investigate whether lock contention won't be a problem + e.mu.Lock() + defer e.mu.Unlock() + for isomorphicKey, event := range e.eventCache { + eventSerie := event.Series + if eventSerie != nil { + if eventSerie.LastObservedTime.Time.Before(time.Now().Add(-finishTime)) { + if _, retry := recordEvent(e.sink, event); !retry { + delete(e.eventCache, isomorphicKey) + } + } + } else if event.EventTime.Time.Before(time.Now().Add(-finishTime)) { + delete(e.eventCache, isomorphicKey) + } + } +} + +// NewRecorder returns an EventRecorder that records events with the given event source. +func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorder { + hostname, _ := os.Hostname() + reportingInstance := reportingController + "-" + hostname + return &recorderImpl{scheme, reportingController, reportingInstance, e.Broadcaster, clock.RealClock{}} +} + +func (e *eventBroadcasterImpl) recordToSink(event *eventsv1.Event, clock clock.Clock) { + // Make a copy before modification, because there could be multiple listeners. + eventCopy := event.DeepCopy() + go func() { + evToRecord := func() *eventsv1.Event { + e.mu.Lock() + defer e.mu.Unlock() + eventKey := getKey(eventCopy) + isomorphicEvent, isIsomorphic := e.eventCache[eventKey] + if isIsomorphic { + if isomorphicEvent.Series != nil { + isomorphicEvent.Series.Count++ + isomorphicEvent.Series.LastObservedTime = metav1.MicroTime{Time: clock.Now()} + return nil + } + isomorphicEvent.Series = &eventsv1.EventSeries{ + Count: 1, + LastObservedTime: metav1.MicroTime{Time: clock.Now()}, + } + return isomorphicEvent + } + e.eventCache[eventKey] = eventCopy + return eventCopy + }() + if evToRecord != nil { + recordedEvent := e.attemptRecording(evToRecord) + if recordedEvent != nil { + recordedEventKey := getKey(recordedEvent) + e.mu.Lock() + defer e.mu.Unlock() + e.eventCache[recordedEventKey] = recordedEvent + } + } + }() +} + +func (e *eventBroadcasterImpl) attemptRecording(event *eventsv1.Event) *eventsv1.Event { + tries := 0 + for { + if recordedEvent, retry := recordEvent(e.sink, event); !retry { + return recordedEvent + } + tries++ + if tries >= maxTriesPerEvent { + klog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) + return nil + } + // Randomize sleep so that various clients won't all be + // synced up if the master goes down. + time.Sleep(wait.Jitter(e.sleepDuration, 0.25)) + } +} + +func recordEvent(sink EventSink, event *eventsv1.Event) (*eventsv1.Event, bool) { + var newEvent *eventsv1.Event + var err error + isEventSeries := event.Series != nil + if isEventSeries { + patch, patchBytesErr := createPatchBytesForSeries(event) + if patchBytesErr != nil { + klog.Errorf("Unable to calculate diff, no merge is possible: %v", patchBytesErr) + return nil, false + } + newEvent, err = sink.Patch(event, patch) + } + // Update can fail because the event may have been removed and it no longer exists. + if !isEventSeries || (isEventSeries && util.IsKeyNotFoundError(err)) { + // Making sure that ResourceVersion is empty on creation + event.ResourceVersion = "" + newEvent, err = sink.Create(event) + } + if err == nil { + return newEvent, false + } + // If we can't contact the server, then hold everything while we keep trying. + // Otherwise, something about the event is malformed and we should abandon it. + switch err.(type) { + case *restclient.RequestConstructionError: + // We will construct the request the same next time, so don't keep trying. + klog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) + return nil, false + case *errors.StatusError: + if errors.IsAlreadyExists(err) { + klog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) + } else { + klog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) + } + return nil, false + case *errors.UnexpectedObjectError: + // We don't expect this; it implies the server's response didn't match a + // known pattern. Go ahead and retry. + default: + // This case includes actual http transport errors. Go ahead and retry. + } + klog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err) + return nil, true +} + +func createPatchBytesForSeries(event *eventsv1.Event) ([]byte, error) { + oldEvent := event.DeepCopy() + oldEvent.Series = nil + oldData, err := json.Marshal(oldEvent) + if err != nil { + return nil, err + } + newData, err := json.Marshal(event) + if err != nil { + return nil, err + } + return strategicpatch.CreateTwoWayMergePatch(oldData, newData, eventsv1.Event{}) +} + +func getKey(event *eventsv1.Event) eventKey { + key := eventKey{ + action: event.Action, + reason: event.Reason, + reportingController: event.ReportingController, + regarding: event.Regarding, + } + if event.Related != nil { + key.related = *event.Related + } + return key +} + +// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function. +// The return value is used to stop recording +func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(event runtime.Object)) func() { + watcher := e.Watch() + go func() { + defer utilruntime.HandleCrash() + for { + watchEvent, ok := <-watcher.ResultChan() + if !ok { + return + } + eventHandler(watchEvent.Object) + } + }() + return watcher.Stop +} + +func (e *eventBroadcasterImpl) startRecordingEvents(stopCh <-chan struct{}) { + eventHandler := func(obj runtime.Object) { + event, ok := obj.(*eventsv1.Event) + if !ok { + klog.Errorf("unexpected type, expected eventsv1.Event") + return + } + e.recordToSink(event, clock.RealClock{}) + } + stopWatcher := e.StartEventWatcher(eventHandler) + go func() { + <-stopCh + stopWatcher() + }() +} + +// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. +func (e *eventBroadcasterImpl) StartRecordingToSink(stopCh <-chan struct{}) { + go wait.Until(e.refreshExistingEventSeries, refreshTime, stopCh) + go wait.Until(e.finishSeries, finishTime, stopCh) + e.startRecordingEvents(stopCh) +} + +type eventBroadcasterAdapterImpl struct { + coreClient typedv1core.EventsGetter + coreBroadcaster record.EventBroadcaster + eventsv1Client typedeventsv1.EventsV1Interface + eventsv1Broadcaster EventBroadcaster +} + +// NewEventBroadcasterAdapter creates a wrapper around new and legacy broadcasters to simplify +// migration of individual components to the new Event API. +func NewEventBroadcasterAdapter(client clientset.Interface) EventBroadcasterAdapter { + eventClient := &eventBroadcasterAdapterImpl{} + if _, err := client.Discovery().ServerResourcesForGroupVersion(eventsv1.SchemeGroupVersion.String()); err == nil { + eventClient.eventsv1Client = client.EventsV1() + eventClient.eventsv1Broadcaster = NewBroadcaster(&EventSinkImpl{Interface: eventClient.eventsv1Client}) + } + // Even though there can soon exist cases when coreBroadcaster won't really be needed, + // we create it unconditionally because its overhead is minor and will simplify using usage + // patterns of this library in all components. + eventClient.coreClient = client.CoreV1() + eventClient.coreBroadcaster = record.NewBroadcaster() + return eventClient +} + +// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. +func (e *eventBroadcasterAdapterImpl) StartRecordingToSink(stopCh <-chan struct{}) { + if e.eventsv1Broadcaster != nil && e.eventsv1Client != nil { + e.eventsv1Broadcaster.StartRecordingToSink(stopCh) + } + if e.coreBroadcaster != nil && e.coreClient != nil { + e.coreBroadcaster.StartRecordingToSink(&typedv1core.EventSinkImpl{Interface: e.coreClient.Events("")}) + } +} + +func (e *eventBroadcasterAdapterImpl) NewRecorder(name string) EventRecorder { + if e.eventsv1Broadcaster != nil && e.eventsv1Client != nil { + return e.eventsv1Broadcaster.NewRecorder(scheme.Scheme, name) + } + return record.NewEventRecorderAdapter(e.DeprecatedNewLegacyRecorder(name)) +} + +func (e *eventBroadcasterAdapterImpl) DeprecatedNewLegacyRecorder(name string) record.EventRecorder { + return e.coreBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: name}) +} + +func (e *eventBroadcasterAdapterImpl) Shutdown() { + if e.coreBroadcaster != nil { + e.coreBroadcaster.Shutdown() + } + if e.eventsv1Broadcaster != nil { + e.eventsv1Broadcaster.Shutdown() + } +} diff --git a/vendor/k8s.io/client-go/tools/events/event_recorder.go b/vendor/k8s.io/client-go/tools/events/event_recorder.go new file mode 100644 index 000000000..2837cc160 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/events/event_recorder.go @@ -0,0 +1,88 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + "fmt" + "time" + + v1 "k8s.io/api/core/v1" + eventsv1 "k8s.io/api/events/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/clock" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/record/util" + "k8s.io/client-go/tools/reference" + "k8s.io/klog/v2" +) + +type recorderImpl struct { + scheme *runtime.Scheme + reportingController string + reportingInstance string + *watch.Broadcaster + clock clock.Clock +} + +func (recorder *recorderImpl) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) { + timestamp := metav1.MicroTime{time.Now()} + message := fmt.Sprintf(note, args...) + refRegarding, err := reference.GetReference(recorder.scheme, regarding) + if err != nil { + klog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", regarding, err, eventtype, reason, message) + return + } + refRelated, err := reference.GetReference(recorder.scheme, related) + if err != nil { + klog.V(9).Infof("Could not construct reference to: '%#v' due to: '%v'.", related, err) + } + if !util.ValidateEventType(eventtype) { + klog.Errorf("Unsupported event type: '%v'", eventtype) + return + } + event := recorder.makeEvent(refRegarding, refRelated, timestamp, eventtype, reason, message, recorder.reportingController, recorder.reportingInstance, action) + go func() { + defer utilruntime.HandleCrash() + recorder.Action(watch.Added, event) + }() +} + +func (recorder *recorderImpl) makeEvent(refRegarding *v1.ObjectReference, refRelated *v1.ObjectReference, timestamp metav1.MicroTime, eventtype, reason, message string, reportingController string, reportingInstance string, action string) *eventsv1.Event { + t := metav1.Time{Time: recorder.clock.Now()} + namespace := refRegarding.Namespace + if namespace == "" { + namespace = metav1.NamespaceDefault + } + return &eventsv1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", refRegarding.Name, t.UnixNano()), + Namespace: namespace, + }, + EventTime: timestamp, + Series: nil, + ReportingController: reportingController, + ReportingInstance: reportingInstance, + Action: action, + Reason: reason, + Regarding: *refRegarding, + Related: refRelated, + Note: message, + Type: eventtype, + } +} diff --git a/vendor/k8s.io/client-go/tools/events/fake.go b/vendor/k8s.io/client-go/tools/events/fake.go new file mode 100644 index 000000000..d572e0d3e --- /dev/null +++ b/vendor/k8s.io/client-go/tools/events/fake.go @@ -0,0 +1,45 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" +) + +// FakeRecorder is used as a fake during tests. It is thread safe. It is usable +// when created manually and not by NewFakeRecorder, however all events may be +// thrown away in this case. +type FakeRecorder struct { + Events chan string +} + +// Eventf emits an event +func (f *FakeRecorder) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) { + if f.Events != nil { + f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+note, args...) + } +} + +// NewFakeRecorder creates new fake event recorder with event channel with +// buffer of given size. +func NewFakeRecorder(bufferSize int) *FakeRecorder { + return &FakeRecorder{ + Events: make(chan string, bufferSize), + } +} diff --git a/vendor/k8s.io/client-go/tools/events/interfaces.go b/vendor/k8s.io/client-go/tools/events/interfaces.go new file mode 100644 index 000000000..f1a523caa --- /dev/null +++ b/vendor/k8s.io/client-go/tools/events/interfaces.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + eventsv1 "k8s.io/api/events/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" +) + +// EventRecorder knows how to record events on behalf of an EventSource. +type EventRecorder interface { + // Eventf constructs an event from the given information and puts it in the queue for sending. + // 'regarding' is the object this event is about. Event will make a reference-- or you may also + // pass a reference to the object directly. + // 'related' is the secondary object for more complex actions. E.g. when regarding object triggers + // a creation or deletion of related object. + // 'type' of this event, and can be one of Normal, Warning. New types could be added in future + // 'reason' is the reason this event is generated. 'reason' should be short and unique; it + // should be in UpperCamelCase format (starting with a capital letter). "reason" will be used + // to automate handling of events, so imagine people writing switch statements to handle them. + // You want to make that easy. + // 'action' explains what happened with regarding/what action did the ReportingController + // (ReportingController is a type of a Controller reporting an Event, e.g. k8s.io/node-controller, k8s.io/kubelet.) + // take in regarding's name; it should be in UpperCamelCase format (starting with a capital letter). + // 'note' is intended to be human readable. + Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) +} + +// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log. +type EventBroadcaster interface { + // StartRecordingToSink starts sending events received from the specified eventBroadcaster. + StartRecordingToSink(stopCh <-chan struct{}) + + // NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster + // with the event source set to the given event source. + NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorder + + // StartEventWatcher enables you to watch for emitted events without usage + // of StartRecordingToSink. This lets you also process events in a custom way (e.g. in tests). + // NOTE: events received on your eventHandler should be copied before being used. + // TODO: figure out if this can be removed. + StartEventWatcher(eventHandler func(event runtime.Object)) func() + + // Shutdown shuts down the broadcaster + Shutdown() +} + +// EventSink knows how to store events (client-go implements it.) +// EventSink must respect the namespace that will be embedded in 'event'. +// It is assumed that EventSink will return the same sorts of errors as +// client-go's REST client. +type EventSink interface { + Create(event *eventsv1.Event) (*eventsv1.Event, error) + Update(event *eventsv1.Event) (*eventsv1.Event, error) + Patch(oldEvent *eventsv1.Event, data []byte) (*eventsv1.Event, error) +} + +// EventBroadcasterAdapter is a auxiliary interface to simplify migration to +// the new events API. It is a wrapper around new and legacy broadcasters +// that smartly chooses which one to use. +// +// Deprecated: This interface will be removed once migration is completed. +type EventBroadcasterAdapter interface { + // StartRecordingToSink starts sending events received from the specified eventBroadcaster. + StartRecordingToSink(stopCh <-chan struct{}) + + // NewRecorder creates a new Event Recorder with specified name. + NewRecorder(name string) EventRecorder + + // DeprecatedNewLegacyRecorder creates a legacy Event Recorder with specific name. + DeprecatedNewLegacyRecorder(name string) record.EventRecorder + + // Shutdown shuts down the broadcaster. + Shutdown() +} diff --git a/vendor/k8s.io/client-go/tools/record/OWNERS b/vendor/k8s.io/client-go/tools/record/OWNERS new file mode 100644 index 000000000..792f356b0 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/OWNERS @@ -0,0 +1,28 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- derekwaynecarr +- caesarxuchao +- vishh +- mikedanese +- liggitt +- nikhiljindal +- erictune +- pmorie +- dchen1107 +- saad-ali +- luxas +- yifan-gu +- mwielgus +- timothysc +- jsafrane +- dims +- krousey +- a-robinson +- aveshagarwal +- resouer +- cjcullen diff --git a/vendor/k8s.io/client-go/tools/record/doc.go b/vendor/k8s.io/client-go/tools/record/doc.go new file mode 100644 index 000000000..33d5fe78e --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package record has all client logic for recording and reporting +// "k8s.io/api/core/v1".Event events. +package record // import "k8s.io/client-go/tools/record" diff --git a/vendor/k8s.io/client-go/tools/record/event.go b/vendor/k8s.io/client-go/tools/record/event.go new file mode 100644 index 000000000..48ef45bb5 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/event.go @@ -0,0 +1,380 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package record + +import ( + "fmt" + "math/rand" + "time" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/clock" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record/util" + ref "k8s.io/client-go/tools/reference" + "k8s.io/klog/v2" +) + +const maxTriesPerEvent = 12 + +var defaultSleepDuration = 10 * time.Second + +const maxQueuedEvents = 1000 + +// EventSink knows how to store events (client.Client implements it.) +// EventSink must respect the namespace that will be embedded in 'event'. +// It is assumed that EventSink will return the same sorts of errors as +// pkg/client's REST client. +type EventSink interface { + Create(event *v1.Event) (*v1.Event, error) + Update(event *v1.Event) (*v1.Event, error) + Patch(oldEvent *v1.Event, data []byte) (*v1.Event, error) +} + +// CorrelatorOptions allows you to change the default of the EventSourceObjectSpamFilter +// and EventAggregator in EventCorrelator +type CorrelatorOptions struct { + // The lru cache size used for both EventSourceObjectSpamFilter and the EventAggregator + // If not specified (zero value), the default specified in events_cache.go will be picked + // This means that the LRUCacheSize has to be greater than 0. + LRUCacheSize int + // The burst size used by the token bucket rate filtering in EventSourceObjectSpamFilter + // If not specified (zero value), the default specified in events_cache.go will be picked + // This means that the BurstSize has to be greater than 0. + BurstSize int + // The fill rate of the token bucket in queries per second in EventSourceObjectSpamFilter + // If not specified (zero value), the default specified in events_cache.go will be picked + // This means that the QPS has to be greater than 0. + QPS float32 + // The func used by the EventAggregator to group event keys for aggregation + // If not specified (zero value), EventAggregatorByReasonFunc will be used + KeyFunc EventAggregatorKeyFunc + // The func used by the EventAggregator to produced aggregated message + // If not specified (zero value), EventAggregatorByReasonMessageFunc will be used + MessageFunc EventAggregatorMessageFunc + // The number of events in an interval before aggregation happens by the EventAggregator + // If not specified (zero value), the default specified in events_cache.go will be picked + // This means that the MaxEvents has to be greater than 0 + MaxEvents int + // The amount of time in seconds that must transpire since the last occurrence of a similar event before it is considered new by the EventAggregator + // If not specified (zero value), the default specified in events_cache.go will be picked + // This means that the MaxIntervalInSeconds has to be greater than 0 + MaxIntervalInSeconds int + // The clock used by the EventAggregator to allow for testing + // If not specified (zero value), clock.RealClock{} will be used + Clock clock.Clock +} + +// EventRecorder knows how to record events on behalf of an EventSource. +type EventRecorder interface { + // Event constructs an event from the given information and puts it in the queue for sending. + // 'object' is the object this event is about. Event will make a reference-- or you may also + // pass a reference to the object directly. + // 'type' of this event, and can be one of Normal, Warning. New types could be added in future + // 'reason' is the reason this event is generated. 'reason' should be short and unique; it + // should be in UpperCamelCase format (starting with a capital letter). "reason" will be used + // to automate handling of events, so imagine people writing switch statements to handle them. + // You want to make that easy. + // 'message' is intended to be human readable. + // + // The resulting event will be created in the same namespace as the reference object. + Event(object runtime.Object, eventtype, reason, message string) + + // Eventf is just like Event, but with Sprintf for the message field. + Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) + + // AnnotatedEventf is just like eventf, but with annotations attached + AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) +} + +// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log. +type EventBroadcaster interface { + // StartEventWatcher starts sending events received from this EventBroadcaster to the given + // event handler function. The return value can be ignored or used to stop recording, if + // desired. + StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface + + // StartRecordingToSink starts sending events received from this EventBroadcaster to the given + // sink. The return value can be ignored or used to stop recording, if desired. + StartRecordingToSink(sink EventSink) watch.Interface + + // StartLogging starts sending events received from this EventBroadcaster to the given logging + // function. The return value can be ignored or used to stop recording, if desired. + StartLogging(logf func(format string, args ...interface{})) watch.Interface + + // StartStructuredLogging starts sending events received from this EventBroadcaster to the structured + // logging function. The return value can be ignored or used to stop recording, if desired. + StartStructuredLogging(verbosity klog.Level) watch.Interface + + // NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster + // with the event source set to the given event source. + NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder + + // Shutdown shuts down the broadcaster + Shutdown() +} + +// EventRecorderAdapter is a wrapper around a "k8s.io/client-go/tools/record".EventRecorder +// implementing the new "k8s.io/client-go/tools/events".EventRecorder interface. +type EventRecorderAdapter struct { + recorder EventRecorder +} + +// NewEventRecorderAdapter returns an adapter implementing the new +// "k8s.io/client-go/tools/events".EventRecorder interface. +func NewEventRecorderAdapter(recorder EventRecorder) *EventRecorderAdapter { + return &EventRecorderAdapter{ + recorder: recorder, + } +} + +// Eventf is a wrapper around v1 Eventf +func (a *EventRecorderAdapter) Eventf(regarding, _ runtime.Object, eventtype, reason, action, note string, args ...interface{}) { + a.recorder.Eventf(regarding, eventtype, reason, note, args...) +} + +// Creates a new event broadcaster. +func NewBroadcaster() EventBroadcaster { + return &eventBroadcasterImpl{ + Broadcaster: watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), + sleepDuration: defaultSleepDuration, + } +} + +func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster { + return &eventBroadcasterImpl{ + Broadcaster: watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), + sleepDuration: sleepDuration, + } +} + +func NewBroadcasterWithCorrelatorOptions(options CorrelatorOptions) EventBroadcaster { + return &eventBroadcasterImpl{ + Broadcaster: watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), + sleepDuration: defaultSleepDuration, + options: options, + } +} + +type eventBroadcasterImpl struct { + *watch.Broadcaster + sleepDuration time.Duration + options CorrelatorOptions +} + +// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. +// The return value can be ignored or used to stop recording, if desired. +// TODO: make me an object with parameterizable queue length and retry interval +func (e *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interface { + eventCorrelator := NewEventCorrelatorWithOptions(e.options) + return e.StartEventWatcher( + func(event *v1.Event) { + recordToSink(sink, event, eventCorrelator, e.sleepDuration) + }) +} + +func (e *eventBroadcasterImpl) Shutdown() { + e.Broadcaster.Shutdown() +} + +func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrelator, sleepDuration time.Duration) { + // Make a copy before modification, because there could be multiple listeners. + // Events are safe to copy like this. + eventCopy := *event + event = &eventCopy + result, err := eventCorrelator.EventCorrelate(event) + if err != nil { + utilruntime.HandleError(err) + } + if result.Skip { + return + } + tries := 0 + for { + if recordEvent(sink, result.Event, result.Patch, result.Event.Count > 1, eventCorrelator) { + break + } + tries++ + if tries >= maxTriesPerEvent { + klog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) + break + } + // Randomize the first sleep so that various clients won't all be + // synced up if the master goes down. + if tries == 1 { + time.Sleep(time.Duration(float64(sleepDuration) * rand.Float64())) + } else { + time.Sleep(sleepDuration) + } + } +} + +// recordEvent attempts to write event to a sink. It returns true if the event +// was successfully recorded or discarded, false if it should be retried. +// If updateExistingEvent is false, it creates a new event, otherwise it updates +// existing event. +func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEvent bool, eventCorrelator *EventCorrelator) bool { + var newEvent *v1.Event + var err error + if updateExistingEvent { + newEvent, err = sink.Patch(event, patch) + } + // Update can fail because the event may have been removed and it no longer exists. + if !updateExistingEvent || (updateExistingEvent && util.IsKeyNotFoundError(err)) { + // Making sure that ResourceVersion is empty on creation + event.ResourceVersion = "" + newEvent, err = sink.Create(event) + } + if err == nil { + // we need to update our event correlator with the server returned state to handle name/resourceversion + eventCorrelator.UpdateState(newEvent) + return true + } + + // If we can't contact the server, then hold everything while we keep trying. + // Otherwise, something about the event is malformed and we should abandon it. + switch err.(type) { + case *restclient.RequestConstructionError: + // We will construct the request the same next time, so don't keep trying. + klog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) + return true + case *errors.StatusError: + if errors.IsAlreadyExists(err) { + klog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) + } else { + klog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) + } + return true + case *errors.UnexpectedObjectError: + // We don't expect this; it implies the server's response didn't match a + // known pattern. Go ahead and retry. + default: + // This case includes actual http transport errors. Go ahead and retry. + } + klog.Errorf("Unable to write event: '%#v': '%v'(may retry after sleeping)", event, err) + return false +} + +// StartLogging starts sending events received from this EventBroadcaster to the given logging function. +// The return value can be ignored or used to stop recording, if desired. +func (e *eventBroadcasterImpl) StartLogging(logf func(format string, args ...interface{})) watch.Interface { + return e.StartEventWatcher( + func(e *v1.Event) { + logf("Event(%#v): type: '%v' reason: '%v' %v", e.InvolvedObject, e.Type, e.Reason, e.Message) + }) +} + +// StartStructuredLogging starts sending events received from this EventBroadcaster to the structured logging function. +// The return value can be ignored or used to stop recording, if desired. +func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) watch.Interface { + return e.StartEventWatcher( + func(e *v1.Event) { + klog.V(verbosity).InfoS("Event occurred", "object", klog.KRef(e.InvolvedObject.Namespace, e.InvolvedObject.Name), "kind", e.InvolvedObject.Kind, "apiVersion", e.InvolvedObject.APIVersion, "type", e.Type, "reason", e.Reason, "message", e.Message) + }) +} + +// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function. +// The return value can be ignored or used to stop recording, if desired. +func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface { + watcher := e.Watch() + go func() { + defer utilruntime.HandleCrash() + for watchEvent := range watcher.ResultChan() { + event, ok := watchEvent.Object.(*v1.Event) + if !ok { + // This is all local, so there's no reason this should + // ever happen. + continue + } + eventHandler(event) + } + }() + return watcher +} + +// NewRecorder returns an EventRecorder that records events with the given event source. +func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder { + return &recorderImpl{scheme, source, e.Broadcaster, clock.RealClock{}} +} + +type recorderImpl struct { + scheme *runtime.Scheme + source v1.EventSource + *watch.Broadcaster + clock clock.Clock +} + +func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations map[string]string, timestamp metav1.Time, eventtype, reason, message string) { + ref, err := ref.GetReference(recorder.scheme, object) + if err != nil { + klog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message) + return + } + + if !util.ValidateEventType(eventtype) { + klog.Errorf("Unsupported event type: '%v'", eventtype) + return + } + + event := recorder.makeEvent(ref, annotations, eventtype, reason, message) + event.Source = recorder.source + + go func() { + // NOTE: events should be a non-blocking operation + defer utilruntime.HandleCrash() + recorder.Action(watch.Added, event) + }() +} + +func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) { + recorder.generateEvent(object, nil, metav1.Now(), eventtype, reason, message) +} + +func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + recorder.Event(object, eventtype, reason, fmt.Sprintf(messageFmt, args...)) +} + +func (recorder *recorderImpl) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { + recorder.generateEvent(object, annotations, metav1.Now(), eventtype, reason, fmt.Sprintf(messageFmt, args...)) +} + +func (recorder *recorderImpl) makeEvent(ref *v1.ObjectReference, annotations map[string]string, eventtype, reason, message string) *v1.Event { + t := metav1.Time{Time: recorder.clock.Now()} + namespace := ref.Namespace + if namespace == "" { + namespace = metav1.NamespaceDefault + } + return &v1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()), + Namespace: namespace, + Annotations: annotations, + }, + InvolvedObject: *ref, + Reason: reason, + Message: message, + FirstTimestamp: t, + LastTimestamp: t, + Count: 1, + Type: eventtype, + } +} diff --git a/vendor/k8s.io/client-go/tools/record/events_cache.go b/vendor/k8s.io/client-go/tools/record/events_cache.go new file mode 100644 index 000000000..9374612f2 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/events_cache.go @@ -0,0 +1,511 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package record + +import ( + "encoding/json" + "fmt" + "strings" + "sync" + "time" + + "github.com/golang/groupcache/lru" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/client-go/util/flowcontrol" +) + +const ( + maxLruCacheEntries = 4096 + + // if we see the same event that varies only by message + // more than 10 times in a 10 minute period, aggregate the event + defaultAggregateMaxEvents = 10 + defaultAggregateIntervalInSeconds = 600 + + // by default, allow a source to send 25 events about an object + // but control the refill rate to 1 new event every 5 minutes + // this helps control the long-tail of events for things that are always + // unhealthy + defaultSpamBurst = 25 + defaultSpamQPS = 1. / 300. +) + +// getEventKey builds unique event key based on source, involvedObject, reason, message +func getEventKey(event *v1.Event) string { + return strings.Join([]string{ + event.Source.Component, + event.Source.Host, + event.InvolvedObject.Kind, + event.InvolvedObject.Namespace, + event.InvolvedObject.Name, + event.InvolvedObject.FieldPath, + string(event.InvolvedObject.UID), + event.InvolvedObject.APIVersion, + event.Type, + event.Reason, + event.Message, + }, + "") +} + +// getSpamKey builds unique event key based on source, involvedObject +func getSpamKey(event *v1.Event) string { + return strings.Join([]string{ + event.Source.Component, + event.Source.Host, + event.InvolvedObject.Kind, + event.InvolvedObject.Namespace, + event.InvolvedObject.Name, + string(event.InvolvedObject.UID), + event.InvolvedObject.APIVersion, + }, + "") +} + +// EventFilterFunc is a function that returns true if the event should be skipped +type EventFilterFunc func(event *v1.Event) bool + +// EventSourceObjectSpamFilter is responsible for throttling +// the amount of events a source and object can produce. +type EventSourceObjectSpamFilter struct { + sync.RWMutex + + // the cache that manages last synced state + cache *lru.Cache + + // burst is the amount of events we allow per source + object + burst int + + // qps is the refill rate of the token bucket in queries per second + qps float32 + + // clock is used to allow for testing over a time interval + clock clock.Clock +} + +// NewEventSourceObjectSpamFilter allows burst events from a source about an object with the specified qps refill. +func NewEventSourceObjectSpamFilter(lruCacheSize, burst int, qps float32, clock clock.Clock) *EventSourceObjectSpamFilter { + return &EventSourceObjectSpamFilter{ + cache: lru.New(lruCacheSize), + burst: burst, + qps: qps, + clock: clock, + } +} + +// spamRecord holds data used to perform spam filtering decisions. +type spamRecord struct { + // rateLimiter controls the rate of events about this object + rateLimiter flowcontrol.RateLimiter +} + +// Filter controls that a given source+object are not exceeding the allowed rate. +func (f *EventSourceObjectSpamFilter) Filter(event *v1.Event) bool { + var record spamRecord + + // controls our cached information about this event (source+object) + eventKey := getSpamKey(event) + + // do we have a record of similar events in our cache? + f.Lock() + defer f.Unlock() + value, found := f.cache.Get(eventKey) + if found { + record = value.(spamRecord) + } + + // verify we have a rate limiter for this record + if record.rateLimiter == nil { + record.rateLimiter = flowcontrol.NewTokenBucketRateLimiterWithClock(f.qps, f.burst, f.clock) + } + + // ensure we have available rate + filter := !record.rateLimiter.TryAccept() + + // update the cache + f.cache.Add(eventKey, record) + + return filter +} + +// EventAggregatorKeyFunc is responsible for grouping events for aggregation +// It returns a tuple of the following: +// aggregateKey - key the identifies the aggregate group to bucket this event +// localKey - key that makes this event in the local group +type EventAggregatorKeyFunc func(event *v1.Event) (aggregateKey string, localKey string) + +// EventAggregatorByReasonFunc aggregates events by exact match on event.Source, event.InvolvedObject, event.Type, +// event.Reason, event.ReportingController and event.ReportingInstance +func EventAggregatorByReasonFunc(event *v1.Event) (string, string) { + return strings.Join([]string{ + event.Source.Component, + event.Source.Host, + event.InvolvedObject.Kind, + event.InvolvedObject.Namespace, + event.InvolvedObject.Name, + string(event.InvolvedObject.UID), + event.InvolvedObject.APIVersion, + event.Type, + event.Reason, + event.ReportingController, + event.ReportingInstance, + }, + ""), event.Message +} + +// EventAggregatorMessageFunc is responsible for producing an aggregation message +type EventAggregatorMessageFunc func(event *v1.Event) string + +// EventAggregratorByReasonMessageFunc returns an aggregate message by prefixing the incoming message +func EventAggregatorByReasonMessageFunc(event *v1.Event) string { + return "(combined from similar events): " + event.Message +} + +// EventAggregator identifies similar events and aggregates them into a single event +type EventAggregator struct { + sync.RWMutex + + // The cache that manages aggregation state + cache *lru.Cache + + // The function that groups events for aggregation + keyFunc EventAggregatorKeyFunc + + // The function that generates a message for an aggregate event + messageFunc EventAggregatorMessageFunc + + // The maximum number of events in the specified interval before aggregation occurs + maxEvents uint + + // The amount of time in seconds that must transpire since the last occurrence of a similar event before it's considered new + maxIntervalInSeconds uint + + // clock is used to allow for testing over a time interval + clock clock.Clock +} + +// NewEventAggregator returns a new instance of an EventAggregator +func NewEventAggregator(lruCacheSize int, keyFunc EventAggregatorKeyFunc, messageFunc EventAggregatorMessageFunc, + maxEvents int, maxIntervalInSeconds int, clock clock.Clock) *EventAggregator { + return &EventAggregator{ + cache: lru.New(lruCacheSize), + keyFunc: keyFunc, + messageFunc: messageFunc, + maxEvents: uint(maxEvents), + maxIntervalInSeconds: uint(maxIntervalInSeconds), + clock: clock, + } +} + +// aggregateRecord holds data used to perform aggregation decisions +type aggregateRecord struct { + // we track the number of unique local keys we have seen in the aggregate set to know when to actually aggregate + // if the size of this set exceeds the max, we know we need to aggregate + localKeys sets.String + // The last time at which the aggregate was recorded + lastTimestamp metav1.Time +} + +// EventAggregate checks if a similar event has been seen according to the +// aggregation configuration (max events, max interval, etc) and returns: +// +// - The (potentially modified) event that should be created +// - The cache key for the event, for correlation purposes. This will be set to +// the full key for normal events, and to the result of +// EventAggregatorMessageFunc for aggregate events. +func (e *EventAggregator) EventAggregate(newEvent *v1.Event) (*v1.Event, string) { + now := metav1.NewTime(e.clock.Now()) + var record aggregateRecord + // eventKey is the full cache key for this event + eventKey := getEventKey(newEvent) + // aggregateKey is for the aggregate event, if one is needed. + aggregateKey, localKey := e.keyFunc(newEvent) + + // Do we have a record of similar events in our cache? + e.Lock() + defer e.Unlock() + value, found := e.cache.Get(aggregateKey) + if found { + record = value.(aggregateRecord) + } + + // Is the previous record too old? If so, make a fresh one. Note: if we didn't + // find a similar record, its lastTimestamp will be the zero value, so we + // create a new one in that case. + maxInterval := time.Duration(e.maxIntervalInSeconds) * time.Second + interval := now.Time.Sub(record.lastTimestamp.Time) + if interval > maxInterval { + record = aggregateRecord{localKeys: sets.NewString()} + } + + // Write the new event into the aggregation record and put it on the cache + record.localKeys.Insert(localKey) + record.lastTimestamp = now + e.cache.Add(aggregateKey, record) + + // If we are not yet over the threshold for unique events, don't correlate them + if uint(record.localKeys.Len()) < e.maxEvents { + return newEvent, eventKey + } + + // do not grow our local key set any larger than max + record.localKeys.PopAny() + + // create a new aggregate event, and return the aggregateKey as the cache key + // (so that it can be overwritten.) + eventCopy := &v1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", newEvent.InvolvedObject.Name, now.UnixNano()), + Namespace: newEvent.Namespace, + }, + Count: 1, + FirstTimestamp: now, + InvolvedObject: newEvent.InvolvedObject, + LastTimestamp: now, + Message: e.messageFunc(newEvent), + Type: newEvent.Type, + Reason: newEvent.Reason, + Source: newEvent.Source, + } + return eventCopy, aggregateKey +} + +// eventLog records data about when an event was observed +type eventLog struct { + // The number of times the event has occurred since first occurrence. + count uint + + // The time at which the event was first recorded. + firstTimestamp metav1.Time + + // The unique name of the first occurrence of this event + name string + + // Resource version returned from previous interaction with server + resourceVersion string +} + +// eventLogger logs occurrences of an event +type eventLogger struct { + sync.RWMutex + cache *lru.Cache + clock clock.Clock +} + +// newEventLogger observes events and counts their frequencies +func newEventLogger(lruCacheEntries int, clock clock.Clock) *eventLogger { + return &eventLogger{cache: lru.New(lruCacheEntries), clock: clock} +} + +// eventObserve records an event, or updates an existing one if key is a cache hit +func (e *eventLogger) eventObserve(newEvent *v1.Event, key string) (*v1.Event, []byte, error) { + var ( + patch []byte + err error + ) + eventCopy := *newEvent + event := &eventCopy + + e.Lock() + defer e.Unlock() + + // Check if there is an existing event we should update + lastObservation := e.lastEventObservationFromCache(key) + + // If we found a result, prepare a patch + if lastObservation.count > 0 { + // update the event based on the last observation so patch will work as desired + event.Name = lastObservation.name + event.ResourceVersion = lastObservation.resourceVersion + event.FirstTimestamp = lastObservation.firstTimestamp + event.Count = int32(lastObservation.count) + 1 + + eventCopy2 := *event + eventCopy2.Count = 0 + eventCopy2.LastTimestamp = metav1.NewTime(time.Unix(0, 0)) + eventCopy2.Message = "" + + newData, _ := json.Marshal(event) + oldData, _ := json.Marshal(eventCopy2) + patch, err = strategicpatch.CreateTwoWayMergePatch(oldData, newData, event) + } + + // record our new observation + e.cache.Add( + key, + eventLog{ + count: uint(event.Count), + firstTimestamp: event.FirstTimestamp, + name: event.Name, + resourceVersion: event.ResourceVersion, + }, + ) + return event, patch, err +} + +// updateState updates its internal tracking information based on latest server state +func (e *eventLogger) updateState(event *v1.Event) { + key := getEventKey(event) + e.Lock() + defer e.Unlock() + // record our new observation + e.cache.Add( + key, + eventLog{ + count: uint(event.Count), + firstTimestamp: event.FirstTimestamp, + name: event.Name, + resourceVersion: event.ResourceVersion, + }, + ) +} + +// lastEventObservationFromCache returns the event from the cache, reads must be protected via external lock +func (e *eventLogger) lastEventObservationFromCache(key string) eventLog { + value, ok := e.cache.Get(key) + if ok { + observationValue, ok := value.(eventLog) + if ok { + return observationValue + } + } + return eventLog{} +} + +// EventCorrelator processes all incoming events and performs analysis to avoid overwhelming the system. It can filter all +// incoming events to see if the event should be filtered from further processing. It can aggregate similar events that occur +// frequently to protect the system from spamming events that are difficult for users to distinguish. It performs de-duplication +// to ensure events that are observed multiple times are compacted into a single event with increasing counts. +type EventCorrelator struct { + // the function to filter the event + filterFunc EventFilterFunc + // the object that performs event aggregation + aggregator *EventAggregator + // the object that observes events as they come through + logger *eventLogger +} + +// EventCorrelateResult is the result of a Correlate +type EventCorrelateResult struct { + // the event after correlation + Event *v1.Event + // if provided, perform a strategic patch when updating the record on the server + Patch []byte + // if true, do no further processing of the event + Skip bool +} + +// NewEventCorrelator returns an EventCorrelator configured with default values. +// +// The EventCorrelator is responsible for event filtering, aggregating, and counting +// prior to interacting with the API server to record the event. +// +// The default behavior is as follows: +// * Aggregation is performed if a similar event is recorded 10 times in a +// in a 10 minute rolling interval. A similar event is an event that varies only by +// the Event.Message field. Rather than recording the precise event, aggregation +// will create a new event whose message reports that it has combined events with +// the same reason. +// * Events are incrementally counted if the exact same event is encountered multiple +// times. +// * A source may burst 25 events about an object, but has a refill rate budget +// per object of 1 event every 5 minutes to control long-tail of spam. +func NewEventCorrelator(clock clock.Clock) *EventCorrelator { + cacheSize := maxLruCacheEntries + spamFilter := NewEventSourceObjectSpamFilter(cacheSize, defaultSpamBurst, defaultSpamQPS, clock) + return &EventCorrelator{ + filterFunc: spamFilter.Filter, + aggregator: NewEventAggregator( + cacheSize, + EventAggregatorByReasonFunc, + EventAggregatorByReasonMessageFunc, + defaultAggregateMaxEvents, + defaultAggregateIntervalInSeconds, + clock), + + logger: newEventLogger(cacheSize, clock), + } +} + +func NewEventCorrelatorWithOptions(options CorrelatorOptions) *EventCorrelator { + optionsWithDefaults := populateDefaults(options) + spamFilter := NewEventSourceObjectSpamFilter(optionsWithDefaults.LRUCacheSize, + optionsWithDefaults.BurstSize, optionsWithDefaults.QPS, optionsWithDefaults.Clock) + return &EventCorrelator{ + filterFunc: spamFilter.Filter, + aggregator: NewEventAggregator( + optionsWithDefaults.LRUCacheSize, + optionsWithDefaults.KeyFunc, + optionsWithDefaults.MessageFunc, + optionsWithDefaults.MaxEvents, + optionsWithDefaults.MaxIntervalInSeconds, + optionsWithDefaults.Clock), + logger: newEventLogger(optionsWithDefaults.LRUCacheSize, optionsWithDefaults.Clock), + } +} + +// populateDefaults populates the zero value options with defaults +func populateDefaults(options CorrelatorOptions) CorrelatorOptions { + if options.LRUCacheSize == 0 { + options.LRUCacheSize = maxLruCacheEntries + } + if options.BurstSize == 0 { + options.BurstSize = defaultSpamBurst + } + if options.QPS == 0 { + options.QPS = defaultSpamQPS + } + if options.KeyFunc == nil { + options.KeyFunc = EventAggregatorByReasonFunc + } + if options.MessageFunc == nil { + options.MessageFunc = EventAggregatorByReasonMessageFunc + } + if options.MaxEvents == 0 { + options.MaxEvents = defaultAggregateMaxEvents + } + if options.MaxIntervalInSeconds == 0 { + options.MaxIntervalInSeconds = defaultAggregateIntervalInSeconds + } + if options.Clock == nil { + options.Clock = clock.RealClock{} + } + return options +} + +// EventCorrelate filters, aggregates, counts, and de-duplicates all incoming events +func (c *EventCorrelator) EventCorrelate(newEvent *v1.Event) (*EventCorrelateResult, error) { + if newEvent == nil { + return nil, fmt.Errorf("event is nil") + } + aggregateEvent, ckey := c.aggregator.EventAggregate(newEvent) + observedEvent, patch, err := c.logger.eventObserve(aggregateEvent, ckey) + if c.filterFunc(observedEvent) { + return &EventCorrelateResult{Skip: true}, nil + } + return &EventCorrelateResult{Event: observedEvent, Patch: patch}, err +} + +// UpdateState based on the latest observed state from server +func (c *EventCorrelator) UpdateState(event *v1.Event) { + c.logger.updateState(event) +} diff --git a/vendor/k8s.io/client-go/tools/record/fake.go b/vendor/k8s.io/client-go/tools/record/fake.go new file mode 100644 index 000000000..0b3f344a9 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/fake.go @@ -0,0 +1,66 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package record + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" +) + +// FakeRecorder is used as a fake during tests. It is thread safe. It is usable +// when created manually and not by NewFakeRecorder, however all events may be +// thrown away in this case. +type FakeRecorder struct { + Events chan string + + IncludeObject bool +} + +func objectString(object runtime.Object, includeObject bool) string { + if !includeObject { + return "" + } + return fmt.Sprintf(" involvedObject{kind=%s,apiVersion=%s}", + object.GetObjectKind().GroupVersionKind().Kind, + object.GetObjectKind().GroupVersionKind().GroupVersion(), + ) +} + +func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) { + if f.Events != nil { + f.Events <- fmt.Sprintf("%s %s %s%s", eventtype, reason, message, objectString(object, f.IncludeObject)) + } +} + +func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + if f.Events != nil { + f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) + objectString(object, f.IncludeObject) + } +} + +func (f *FakeRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { + f.Eventf(object, eventtype, reason, messageFmt, args...) +} + +// NewFakeRecorder creates new fake event recorder with event channel with +// buffer of given size. +func NewFakeRecorder(bufferSize int) *FakeRecorder { + return &FakeRecorder{ + Events: make(chan string, bufferSize), + } +} diff --git a/vendor/k8s.io/client-go/tools/record/util/util.go b/vendor/k8s.io/client-go/tools/record/util/util.go new file mode 100644 index 000000000..d1818a8d9 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/util/util.go @@ -0,0 +1,44 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "net/http" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" +) + +// ValidateEventType checks that eventtype is an expected type of event +func ValidateEventType(eventtype string) bool { + switch eventtype { + case v1.EventTypeNormal, v1.EventTypeWarning: + return true + } + return false +} + +// IsKeyNotFoundError is utility function that checks if an error is not found error +func IsKeyNotFoundError(err error) bool { + statusErr, _ := err.(*errors.StatusError) + + if statusErr != nil && statusErr.Status().Code == http.StatusNotFound { + return true + } + + return false +} diff --git a/vendor/k8s.io/client-go/transport/cache.go b/vendor/k8s.io/client-go/transport/cache.go index 3ec4e1935..5fe768ed5 100644 --- a/vendor/k8s.io/client-go/transport/cache.go +++ b/vendor/k8s.io/client-go/transport/cache.go @@ -44,15 +44,12 @@ type tlsCacheKey struct { insecure bool caData string certData string - keyData string + keyData string `datapolicy:"security-key"` certFile string keyFile string - getCert string serverName string nextProtos string - dial string disableCompression bool - proxy string } func (t tlsCacheKey) String() string { @@ -60,22 +57,24 @@ func (t tlsCacheKey) String() string { if len(t.keyData) > 0 { keyText = "" } - return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, getCert: %s, serverName:%s, dial:%s disableCompression:%t, proxy: %s", t.insecure, t.caData, t.certData, keyText, t.getCert, t.serverName, t.dial, t.disableCompression, t.proxy) + return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, serverName:%s, disableCompression:%t", t.insecure, t.caData, t.certData, keyText, t.serverName, t.disableCompression) } func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { - key, err := tlsConfigKey(config) + key, canCache, err := tlsConfigKey(config) if err != nil { return nil, err } - // Ensure we only create a single transport for the given TLS options - c.mu.Lock() - defer c.mu.Unlock() + if canCache { + // Ensure we only create a single transport for the given TLS options + c.mu.Lock() + defer c.mu.Unlock() - // See if we already have a custom transport for this config - if t, ok := c.transports[key]; ok { - return t, nil + // See if we already have a custom transport for this config + if t, ok := c.transports[key]; ok { + return t, nil + } } // Get the TLS options for this client config @@ -110,8 +109,7 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { proxy = config.Proxy } - // Cache a single transport for these options - c.transports[key] = utilnet.SetTransportDefaults(&http.Transport{ + transport := utilnet.SetTransportDefaults(&http.Transport{ Proxy: proxy, TLSHandshakeTimeout: 10 * time.Second, TLSClientConfig: tlsConfig, @@ -119,24 +117,33 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { DialContext: dial, DisableCompression: config.DisableCompression, }) - return c.transports[key], nil + + if canCache { + // Cache a single transport for these options + c.transports[key] = transport + } + + return transport, nil } // tlsConfigKey returns a unique key for tls.Config objects returned from TLSConfigFor -func tlsConfigKey(c *Config) (tlsCacheKey, error) { +func tlsConfigKey(c *Config) (tlsCacheKey, bool, error) { // Make sure ca/key/cert content is loaded if err := loadTLSFiles(c); err != nil { - return tlsCacheKey{}, err + return tlsCacheKey{}, false, err } + + if c.TLS.GetCert != nil || c.Dial != nil || c.Proxy != nil { + // cannot determine equality for functions + return tlsCacheKey{}, false, nil + } + k := tlsCacheKey{ insecure: c.TLS.Insecure, caData: string(c.TLS.CAData), - getCert: fmt.Sprintf("%p", c.TLS.GetCert), serverName: c.TLS.ServerName, nextProtos: strings.Join(c.TLS.NextProtos, ","), - dial: fmt.Sprintf("%p", c.Dial), disableCompression: c.DisableCompression, - proxy: fmt.Sprintf("%p", c.Proxy), } if c.TLS.ReloadTLSFiles { @@ -147,5 +154,5 @@ func tlsConfigKey(c *Config) (tlsCacheKey, error) { k.keyData = string(c.TLS.KeyData) } - return k, nil + return k, true, nil } diff --git a/vendor/k8s.io/client-go/transport/config.go b/vendor/k8s.io/client-go/transport/config.go index 45db24864..070474831 100644 --- a/vendor/k8s.io/client-go/transport/config.go +++ b/vendor/k8s.io/client-go/transport/config.go @@ -35,10 +35,10 @@ type Config struct { // Username and password for basic authentication Username string - Password string + Password string `datapolicy:"password"` // Bearer token for authentication - BearerToken string + BearerToken string `datapolicy:"token"` // Path to a file containing a BearerToken. // If set, the contents are periodically read. @@ -70,7 +70,7 @@ type Config struct { // Dial specifies the dial function for creating unencrypted TCP connections. Dial func(ctx context.Context, network, address string) (net.Conn, error) - // Proxy is the the proxy func to be used for all requests made by this + // Proxy is the proxy func to be used for all requests made by this // transport. If Proxy is nil, http.ProxyFromEnvironment is used. If Proxy // returns a nil *URL, no proxy is used. // @@ -108,7 +108,7 @@ func (c *Config) HasCertAuth() bool { return (len(c.TLS.CertData) != 0 || len(c.TLS.CertFile) != 0) && (len(c.TLS.KeyData) != 0 || len(c.TLS.KeyFile) != 0) } -// HasCertCallbacks returns whether the configuration has certificate callback or not. +// HasCertCallback returns whether the configuration has certificate callback or not. func (c *Config) HasCertCallback() bool { return c.TLS.GetCert != nil } diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go index a05208d92..056bc023c 100644 --- a/vendor/k8s.io/client-go/transport/round_trippers.go +++ b/vendor/k8s.io/client-go/transport/round_trippers.go @@ -146,6 +146,7 @@ type userAgentRoundTripper struct { rt http.RoundTripper } +// NewUserAgentRoundTripper will add User-Agent header to a request unless it has already been set. func NewUserAgentRoundTripper(agent string, rt http.RoundTripper) http.RoundTripper { return &userAgentRoundTripper{agent, rt} } @@ -167,7 +168,7 @@ func (rt *userAgentRoundTripper) WrappedRoundTripper() http.RoundTripper { retur type basicAuthRoundTripper struct { username string - password string + password string `datapolicy:"password"` rt http.RoundTripper } @@ -260,7 +261,7 @@ func NewBearerAuthRoundTripper(bearer string, rt http.RoundTripper) http.RoundTr return &bearerAuthRoundTripper{bearer, nil, rt} } -// NewBearerAuthRoundTripper adds the provided bearer token to a request +// NewBearerAuthWithRefreshRoundTripper adds the provided bearer token to a request // unless the authorization header has already been set. // If tokenFile is non-empty, it is periodically read, // and the last successfully read content is used as the bearer token. @@ -305,7 +306,7 @@ func (rt *bearerAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { retu // requestInfo keeps track of information about a request/response combination type requestInfo struct { - RequestHeaders http.Header + RequestHeaders http.Header `datapolicy:"token"` RequestVerb string RequestURL string @@ -340,6 +341,7 @@ func (r *requestInfo) toCurl() string { headers := "" for key, values := range r.RequestHeaders { for _, value := range values { + value = maskValue(key, value) headers += fmt.Sprintf(` -H %q`, fmt.Sprintf("%s: %s", key, value)) } } diff --git a/vendor/k8s.io/component-base/cli/flag/ciphersuites_flag.go b/vendor/k8s.io/component-base/cli/flag/ciphersuites_flag.go new file mode 100644 index 000000000..a52faf919 --- /dev/null +++ b/vendor/k8s.io/component-base/cli/flag/ciphersuites_flag.go @@ -0,0 +1,162 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + "crypto/tls" + "fmt" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// ciphers maps strings into tls package cipher constants in +// https://golang.org/pkg/crypto/tls/#pkg-constants +// to be replaced by tls.CipherSuites() when the project migrates to go1.14. +var ciphers = map[string]uint16{ + "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, + "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, + "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, + "TLS_AES_128_GCM_SHA256": tls.TLS_AES_128_GCM_SHA256, + "TLS_CHACHA20_POLY1305_SHA256": tls.TLS_CHACHA20_POLY1305_SHA256, + "TLS_AES_256_GCM_SHA384": tls.TLS_AES_256_GCM_SHA384, +} + +// to be replaced by tls.InsecureCipherSuites() when the project migrates to go1.14. +var insecureCiphers = map[string]uint16{ + "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, +} + +// InsecureTLSCiphers returns the cipher suites implemented by crypto/tls which have +// security issues. +func InsecureTLSCiphers() map[string]uint16 { + cipherKeys := make(map[string]uint16, len(insecureCiphers)) + for k, v := range insecureCiphers { + cipherKeys[k] = v + } + return cipherKeys +} + +// InsecureTLSCipherNames returns a list of cipher suite names implemented by crypto/tls +// which have security issues. +func InsecureTLSCipherNames() []string { + cipherKeys := sets.NewString() + for key := range insecureCiphers { + cipherKeys.Insert(key) + } + return cipherKeys.List() +} + +// PreferredTLSCipherNames returns a list of cipher suite names implemented by crypto/tls. +func PreferredTLSCipherNames() []string { + cipherKeys := sets.NewString() + for key := range ciphers { + cipherKeys.Insert(key) + } + return cipherKeys.List() +} + +func allCiphers() map[string]uint16 { + acceptedCiphers := make(map[string]uint16, len(ciphers)+len(insecureCiphers)) + for k, v := range ciphers { + acceptedCiphers[k] = v + } + for k, v := range insecureCiphers { + acceptedCiphers[k] = v + } + return acceptedCiphers +} + +// TLSCipherPossibleValues returns all acceptable cipher suite names. +// This is a combination of both InsecureTLSCipherNames() and PreferredTLSCipherNames(). +func TLSCipherPossibleValues() []string { + cipherKeys := sets.NewString() + acceptedCiphers := allCiphers() + for key := range acceptedCiphers { + cipherKeys.Insert(key) + } + return cipherKeys.List() +} + +// TLSCipherSuites returns a list of cipher suite IDs from the cipher suite names passed. +func TLSCipherSuites(cipherNames []string) ([]uint16, error) { + if len(cipherNames) == 0 { + return nil, nil + } + ciphersIntSlice := make([]uint16, 0) + possibleCiphers := allCiphers() + for _, cipher := range cipherNames { + intValue, ok := possibleCiphers[cipher] + if !ok { + return nil, fmt.Errorf("Cipher suite %s not supported or doesn't exist", cipher) + } + ciphersIntSlice = append(ciphersIntSlice, intValue) + } + return ciphersIntSlice, nil +} + +var versions = map[string]uint16{ + "VersionTLS10": tls.VersionTLS10, + "VersionTLS11": tls.VersionTLS11, + "VersionTLS12": tls.VersionTLS12, + "VersionTLS13": tls.VersionTLS13, +} + +// TLSPossibleVersions returns all acceptable values for TLS Version. +func TLSPossibleVersions() []string { + versionsKeys := sets.NewString() + for key := range versions { + versionsKeys.Insert(key) + } + return versionsKeys.List() +} + +// TLSVersion returns the TLS Version ID for the version name passed. +func TLSVersion(versionName string) (uint16, error) { + if len(versionName) == 0 { + return DefaultTLSVersion(), nil + } + if version, ok := versions[versionName]; ok { + return version, nil + } + return 0, fmt.Errorf("unknown tls version %q", versionName) +} + +// DefaultTLSVersion defines the default TLS Version. +func DefaultTLSVersion() uint16 { + // Can't use SSLv3 because of POODLE and BEAST + // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher + // Can't use TLSv1.1 because of RC4 cipher usage + return tls.VersionTLS12 +} diff --git a/vendor/k8s.io/component-base/cli/flag/ciphersuites_flag_114.go b/vendor/k8s.io/component-base/cli/flag/ciphersuites_flag_114.go new file mode 100644 index 000000000..45b39eb0f --- /dev/null +++ b/vendor/k8s.io/component-base/cli/flag/ciphersuites_flag_114.go @@ -0,0 +1,29 @@ +// +build go1.14 + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + "crypto/tls" +) + +func init() { + // support official IANA names as well on go1.14 + ciphers["TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256"] = tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 + ciphers["TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256"] = tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 +} diff --git a/vendor/k8s.io/component-base/cli/flag/colon_separated_multimap_string_string.go b/vendor/k8s.io/component-base/cli/flag/colon_separated_multimap_string_string.go new file mode 100644 index 000000000..bd2cf5f87 --- /dev/null +++ b/vendor/k8s.io/component-base/cli/flag/colon_separated_multimap_string_string.go @@ -0,0 +1,102 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + "fmt" + "sort" + "strings" +) + +// ColonSeparatedMultimapStringString supports setting a map[string][]string from an encoding +// that separates keys from values with ':' and separates key-value pairs with ','. +// A key can be repeated multiple times, in which case the values are appended to a +// slice of strings associated with that key. Items in the list associated with a given +// key will appear in the order provided. +// For example: `a:hello,b:again,c:world,b:beautiful` results in `{"a": ["hello"], "b": ["again", "beautiful"], "c": ["world"]}` +// The first call to Set will clear the map before adding entries; subsequent calls will simply append to the map. +// This makes it possible to override default values with a command-line option rather than appending to defaults, +// while still allowing the distribution of key-value pairs across multiple flag invocations. +// For example: `--flag "a:hello" --flag "b:again" --flag "b:beautiful" --flag "c:world"` results in `{"a": ["hello"], "b": ["again", "beautiful"], "c": ["world"]}` +type ColonSeparatedMultimapStringString struct { + Multimap *map[string][]string + initialized bool // set to true after the first Set call +} + +// NewColonSeparatedMultimapStringString takes a pointer to a map[string][]string and returns the +// ColonSeparatedMultimapStringString flag parsing shim for that map. +func NewColonSeparatedMultimapStringString(m *map[string][]string) *ColonSeparatedMultimapStringString { + return &ColonSeparatedMultimapStringString{Multimap: m} +} + +// Set implements github.com/spf13/pflag.Value +func (m *ColonSeparatedMultimapStringString) Set(value string) error { + if m.Multimap == nil { + return fmt.Errorf("no target (nil pointer to map[string][]string)") + } + if !m.initialized || *m.Multimap == nil { + // clear default values, or allocate if no existing map + *m.Multimap = make(map[string][]string) + m.initialized = true + } + for _, pair := range strings.Split(value, ",") { + if len(pair) == 0 { + continue + } + kv := strings.SplitN(pair, ":", 2) + if len(kv) != 2 { + return fmt.Errorf("malformed pair, expect string:string") + } + k := strings.TrimSpace(kv[0]) + v := strings.TrimSpace(kv[1]) + (*m.Multimap)[k] = append((*m.Multimap)[k], v) + } + return nil +} + +// String implements github.com/spf13/pflag.Value +func (m *ColonSeparatedMultimapStringString) String() string { + type kv struct { + k string + v string + } + kvs := make([]kv, 0, len(*m.Multimap)) + for k, vs := range *m.Multimap { + for i := range vs { + kvs = append(kvs, kv{k: k, v: vs[i]}) + } + } + // stable sort by keys, order of values should be preserved + sort.SliceStable(kvs, func(i, j int) bool { + return kvs[i].k < kvs[j].k + }) + pairs := make([]string, 0, len(kvs)) + for i := range kvs { + pairs = append(pairs, fmt.Sprintf("%s:%s", kvs[i].k, kvs[i].v)) + } + return strings.Join(pairs, ",") +} + +// Type implements github.com/spf13/pflag.Value +func (m *ColonSeparatedMultimapStringString) Type() string { + return "colonSeparatedMultimapStringString" +} + +// Empty implements OmitEmpty +func (m *ColonSeparatedMultimapStringString) Empty() bool { + return len(*m.Multimap) == 0 +} diff --git a/vendor/k8s.io/component-base/cli/flag/configuration_map.go b/vendor/k8s.io/component-base/cli/flag/configuration_map.go new file mode 100644 index 000000000..911b05ec6 --- /dev/null +++ b/vendor/k8s.io/component-base/cli/flag/configuration_map.go @@ -0,0 +1,53 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + "fmt" + "sort" + "strings" +) + +type ConfigurationMap map[string]string + +func (m *ConfigurationMap) String() string { + pairs := []string{} + for k, v := range *m { + pairs = append(pairs, fmt.Sprintf("%s=%s", k, v)) + } + sort.Strings(pairs) + return strings.Join(pairs, ",") +} + +func (m *ConfigurationMap) Set(value string) error { + for _, s := range strings.Split(value, ",") { + if len(s) == 0 { + continue + } + arr := strings.SplitN(s, "=", 2) + if len(arr) == 2 { + (*m)[strings.TrimSpace(arr[0])] = strings.TrimSpace(arr[1]) + } else { + (*m)[strings.TrimSpace(arr[0])] = "" + } + } + return nil +} + +func (*ConfigurationMap) Type() string { + return "mapStringString" +} diff --git a/vendor/k8s.io/component-base/cli/flag/flags.go b/vendor/k8s.io/component-base/cli/flag/flags.go new file mode 100644 index 000000000..70146f335 --- /dev/null +++ b/vendor/k8s.io/component-base/cli/flag/flags.go @@ -0,0 +1,61 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + goflag "flag" + "strings" + + "github.com/spf13/pflag" + "k8s.io/klog/v2" +) + +// WordSepNormalizeFunc changes all flags that contain "_" separators +func WordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { + if strings.Contains(name, "_") { + return pflag.NormalizedName(strings.Replace(name, "_", "-", -1)) + } + return pflag.NormalizedName(name) +} + +// WarnWordSepNormalizeFunc changes and warns for flags that contain "_" separators +func WarnWordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { + if strings.Contains(name, "_") { + nname := strings.Replace(name, "_", "-", -1) + klog.Warningf("%s is DEPRECATED and will be removed in a future version. Use %s instead.", name, nname) + + return pflag.NormalizedName(nname) + } + return pflag.NormalizedName(name) +} + +// InitFlags normalizes, parses, then logs the command line flags +func InitFlags() { + pflag.CommandLine.SetNormalizeFunc(WordSepNormalizeFunc) + pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) + pflag.Parse() + pflag.VisitAll(func(flag *pflag.Flag) { + klog.V(2).Infof("FLAG: --%s=%q", flag.Name, flag.Value) + }) +} + +// PrintFlags logs the flags in the flagset +func PrintFlags(flags *pflag.FlagSet) { + flags.VisitAll(func(flag *pflag.Flag) { + klog.V(1).Infof("FLAG: --%s=%q", flag.Name, flag.Value) + }) +} diff --git a/vendor/k8s.io/component-base/cli/flag/langle_separated_map_string_string.go b/vendor/k8s.io/component-base/cli/flag/langle_separated_map_string_string.go new file mode 100644 index 000000000..bf8dbfb9b --- /dev/null +++ b/vendor/k8s.io/component-base/cli/flag/langle_separated_map_string_string.go @@ -0,0 +1,82 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + "fmt" + "sort" + "strings" +) + +// LangleSeparatedMapStringString can be set from the command line with the format `--flag "string 0 { + s = s + ":" + strings.Join(nkc.Names, ",") + } + return s +} + +func (nkc *NamedCertKey) Set(value string) error { + cs := strings.SplitN(value, ":", 2) + var keycert string + if len(cs) == 2 { + var names string + keycert, names = strings.TrimSpace(cs[0]), strings.TrimSpace(cs[1]) + if names == "" { + return errors.New("empty names list is not allowed") + } + nkc.Names = nil + for _, name := range strings.Split(names, ",") { + nkc.Names = append(nkc.Names, strings.TrimSpace(name)) + } + } else { + nkc.Names = nil + keycert = strings.TrimSpace(cs[0]) + } + cs = strings.Split(keycert, ",") + if len(cs) != 2 { + return errors.New("expected comma separated certificate and key file paths") + } + nkc.CertFile = strings.TrimSpace(cs[0]) + nkc.KeyFile = strings.TrimSpace(cs[1]) + return nil +} + +func (*NamedCertKey) Type() string { + return "namedCertKey" +} + +// NamedCertKeyArray is a flag value parsing NamedCertKeys, each passed with its own +// flag instance (in contrast to comma separated slices). +type NamedCertKeyArray struct { + value *[]NamedCertKey + changed bool +} + +var _ flag.Value = &NamedCertKeyArray{} + +// NewNamedKeyCertArray creates a new NamedCertKeyArray with the internal value +// pointing to p. +func NewNamedCertKeyArray(p *[]NamedCertKey) *NamedCertKeyArray { + return &NamedCertKeyArray{ + value: p, + } +} + +func (a *NamedCertKeyArray) Set(val string) error { + nkc := NamedCertKey{} + err := nkc.Set(val) + if err != nil { + return err + } + if !a.changed { + *a.value = []NamedCertKey{nkc} + a.changed = true + } else { + *a.value = append(*a.value, nkc) + } + return nil +} + +func (a *NamedCertKeyArray) Type() string { + return "namedCertKey" +} + +func (a *NamedCertKeyArray) String() string { + nkcs := make([]string, 0, len(*a.value)) + for i := range *a.value { + nkcs = append(nkcs, (*a.value)[i].String()) + } + return "[" + strings.Join(nkcs, ";") + "]" +} diff --git a/vendor/k8s.io/component-base/cli/flag/noop.go b/vendor/k8s.io/component-base/cli/flag/noop.go new file mode 100644 index 000000000..03f7f14c0 --- /dev/null +++ b/vendor/k8s.io/component-base/cli/flag/noop.go @@ -0,0 +1,41 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + goflag "flag" + "github.com/spf13/pflag" +) + +// NoOp implements goflag.Value and plfag.Value, +// but has a noop Set implementation +type NoOp struct{} + +var _ goflag.Value = NoOp{} +var _ pflag.Value = NoOp{} + +func (NoOp) String() string { + return "" +} + +func (NoOp) Set(val string) error { + return nil +} + +func (NoOp) Type() string { + return "NoOp" +} diff --git a/vendor/k8s.io/component-base/cli/flag/omitempty.go b/vendor/k8s.io/component-base/cli/flag/omitempty.go new file mode 100644 index 000000000..c354754ea --- /dev/null +++ b/vendor/k8s.io/component-base/cli/flag/omitempty.go @@ -0,0 +1,24 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +// OmitEmpty is an interface for flags to report whether their underlying value +// is "empty." If a flag implements OmitEmpty and returns true for a call to Empty(), +// it is assumed that flag may be omitted from the command line. +type OmitEmpty interface { + Empty() bool +} diff --git a/vendor/k8s.io/component-base/cli/flag/sectioned.go b/vendor/k8s.io/component-base/cli/flag/sectioned.go new file mode 100644 index 000000000..493a6c0f0 --- /dev/null +++ b/vendor/k8s.io/component-base/cli/flag/sectioned.go @@ -0,0 +1,79 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + "bytes" + "fmt" + "io" + "strings" + + "github.com/spf13/pflag" +) + +// NamedFlagSets stores named flag sets in the order of calling FlagSet. +type NamedFlagSets struct { + // Order is an ordered list of flag set names. + Order []string + // FlagSets stores the flag sets by name. + FlagSets map[string]*pflag.FlagSet +} + +// FlagSet returns the flag set with the given name and adds it to the +// ordered name list if it is not in there yet. +func (nfs *NamedFlagSets) FlagSet(name string) *pflag.FlagSet { + if nfs.FlagSets == nil { + nfs.FlagSets = map[string]*pflag.FlagSet{} + } + if _, ok := nfs.FlagSets[name]; !ok { + nfs.FlagSets[name] = pflag.NewFlagSet(name, pflag.ExitOnError) + nfs.Order = append(nfs.Order, name) + } + return nfs.FlagSets[name] +} + +// PrintSections prints the given names flag sets in sections, with the maximal given column number. +// If cols is zero, lines are not wrapped. +func PrintSections(w io.Writer, fss NamedFlagSets, cols int) { + for _, name := range fss.Order { + fs := fss.FlagSets[name] + if !fs.HasFlags() { + continue + } + + wideFS := pflag.NewFlagSet("", pflag.ExitOnError) + wideFS.AddFlagSet(fs) + + var zzz string + if cols > 24 { + zzz = strings.Repeat("z", cols-24) + wideFS.Int(zzz, 0, strings.Repeat("z", cols-24)) + } + + var buf bytes.Buffer + fmt.Fprintf(&buf, "\n%s flags:\n\n%s", strings.ToUpper(name[:1])+name[1:], wideFS.FlagUsagesWrapped(cols)) + + if cols > 24 { + i := strings.Index(buf.String(), zzz) + lines := strings.Split(buf.String()[:i], "\n") + fmt.Fprint(w, strings.Join(lines[:len(lines)-1], "\n")) + fmt.Fprintln(w) + } else { + fmt.Fprint(w, buf.String()) + } + } +} diff --git a/vendor/k8s.io/component-base/cli/flag/string_flag.go b/vendor/k8s.io/component-base/cli/flag/string_flag.go new file mode 100644 index 000000000..331bdb66e --- /dev/null +++ b/vendor/k8s.io/component-base/cli/flag/string_flag.go @@ -0,0 +1,56 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +// StringFlag is a string flag compatible with flags and pflags that keeps track of whether it had a value supplied or not. +type StringFlag struct { + // If Set has been invoked this value is true + provided bool + // The exact value provided on the flag + value string +} + +func NewStringFlag(defaultVal string) StringFlag { + return StringFlag{value: defaultVal} +} + +func (f *StringFlag) Default(value string) { + f.value = value +} + +func (f StringFlag) String() string { + return f.value +} + +func (f StringFlag) Value() string { + return f.value +} + +func (f *StringFlag) Set(value string) error { + f.value = value + f.provided = true + + return nil +} + +func (f StringFlag) Provided() bool { + return f.provided +} + +func (f *StringFlag) Type() string { + return "string" +} diff --git a/vendor/k8s.io/component-base/cli/flag/tristate.go b/vendor/k8s.io/component-base/cli/flag/tristate.go new file mode 100644 index 000000000..cf16376bf --- /dev/null +++ b/vendor/k8s.io/component-base/cli/flag/tristate.go @@ -0,0 +1,83 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + "fmt" + "strconv" +) + +// Tristate is a flag compatible with flags and pflags that +// keeps track of whether it had a value supplied or not. +type Tristate int + +const ( + Unset Tristate = iota // 0 + True + False +) + +func (f *Tristate) Default(value bool) { + *f = triFromBool(value) +} + +func (f Tristate) String() string { + b := boolFromTri(f) + return fmt.Sprintf("%t", b) +} + +func (f Tristate) Value() bool { + b := boolFromTri(f) + return b +} + +func (f *Tristate) Set(value string) error { + boolVal, err := strconv.ParseBool(value) + if err != nil { + return err + } + + *f = triFromBool(boolVal) + return nil +} + +func (f Tristate) Provided() bool { + if f != Unset { + return true + } + return false +} + +func (f *Tristate) Type() string { + return "tristate" +} + +func boolFromTri(t Tristate) bool { + if t == True { + return true + } else { + return false + } +} + +func triFromBool(b bool) Tristate { + if b { + return True + } else { + return False + } +} diff --git a/vendor/k8s.io/component-base/logs/OWNERS b/vendor/k8s.io/component-base/logs/OWNERS new file mode 100644 index 000000000..58b1a4d7b --- /dev/null +++ b/vendor/k8s.io/component-base/logs/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- sig-instrumentation-approvers +reviewers: +- sig-instrumentation-reviewers +labels: +- sig/instrumentation diff --git a/vendor/k8s.io/component-base/logs/datapol/datapol.go b/vendor/k8s.io/component-base/logs/datapol/datapol.go new file mode 100644 index 000000000..6ec8fb12d --- /dev/null +++ b/vendor/k8s.io/component-base/logs/datapol/datapol.go @@ -0,0 +1,99 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package datapol contains functions to determine if objects contain sensitive +// data to e.g. make decisions on whether to log them or not. +package datapol + +import ( + "reflect" + "strings" + + "k8s.io/klog/v2" +) + +// Verify returns a list of the datatypes contained in the argument that can be +// considered sensitive w.r.t. to logging +func Verify(value interface{}) []string { + defer func() { + if r := recover(); r != nil { + //TODO maybe export a metric + klog.Warningf("Error while inspecting arguments for sensitive data: %v", r) + } + }() + t := reflect.ValueOf(value) + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return datatypes(t) +} + +func datatypes(v reflect.Value) []string { + if types := byType(v.Type()); len(types) > 0 { + // Slices, and maps can be nil or empty, only the nil case is zero + switch v.Kind() { + case reflect.Slice, reflect.Map: + if !v.IsZero() && v.Len() > 0 { + return types + } + default: + if !v.IsZero() { + return types + } + } + } + switch v.Kind() { + case reflect.Interface: + return datatypes(v.Elem()) + case reflect.Slice, reflect.Array: + for i := 0; i < v.Len(); i++ { + if types := datatypes(v.Index(i)); len(types) > 0 { + return types + } + } + case reflect.Map: + mapIter := v.MapRange() + for mapIter.Next() { + k := mapIter.Key() + v := mapIter.Value() + if types := datatypes(k); len(types) > 0 { + return types + } + if types := datatypes(v); len(types) > 0 { + return types + } + } + case reflect.Struct: + t := v.Type() + numField := t.NumField() + + for i := 0; i < numField; i++ { + f := t.Field(i) + if f.Type.Kind() == reflect.Ptr { + continue + } + if reason, ok := f.Tag.Lookup("datapolicy"); ok { + if !v.Field(i).IsZero() { + return strings.Split(reason, ",") + } + } + if types := datatypes(v.Field(i)); len(types) > 0 { + return types + } + } + } + return nil +} diff --git a/vendor/k8s.io/component-base/logs/datapol/externaltypes.go b/vendor/k8s.io/component-base/logs/datapol/externaltypes.go new file mode 100644 index 000000000..d343d3e65 --- /dev/null +++ b/vendor/k8s.io/component-base/logs/datapol/externaltypes.go @@ -0,0 +1,49 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datapol + +import ( + "fmt" + "reflect" +) + +const ( + httpHeader = "net/http.Header" + httpCookie = "net/http.Cookie" + x509Certificate = "crypto/x509.Certificate" +) + +// GlobalDatapolicyMapping returns the list of sensitive datatypes are embedded +// in types not native to Kubernetes. +func GlobalDatapolicyMapping(v interface{}) []string { + return byType(reflect.TypeOf(v)) +} + +func byType(t reflect.Type) []string { + // Use string representation of the type to prevent taking a depency on the actual type. + switch fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) { + case httpHeader: + return []string{"password", "token"} + case httpCookie: + return []string{"token"} + case x509Certificate: + return []string{"security-key"} + default: + return nil + } + +} diff --git a/vendor/k8s.io/component-base/logs/json/json.go b/vendor/k8s.io/component-base/logs/json/json.go new file mode 100644 index 000000000..f9bb55656 --- /dev/null +++ b/vendor/k8s.io/component-base/logs/json/json.go @@ -0,0 +1,177 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logs + +import ( + "os" + "time" + + "github.com/go-logr/logr" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// Inspired from https://github.com/go-logr/zapr, some functions is copy from the repo. + +var ( + // JSONLogger is global json log format logr + JSONLogger logr.Logger + + // timeNow stubbed out for testing + timeNow = time.Now +) + +// zapLogger is a logr.Logger that uses Zap to record log. +type zapLogger struct { + // NB: this looks very similar to zap.SugaredLogger, but + // deals with our desire to have multiple verbosity levels. + l *zap.Logger + lvl int +} + +// implement logr.Logger +var _ logr.Logger = &zapLogger{} + +// Enabled should always return true +func (l *zapLogger) Enabled() bool { + return true +} + +// Info write message to error level log +func (l *zapLogger) Info(msg string, keysAndVals ...interface{}) { + entry := zapcore.Entry{ + Time: timeNow(), + Message: msg, + } + checkedEntry := l.l.Core().Check(entry, nil) + checkedEntry.Write(l.handleFields(keysAndVals)...) +} + +// dPanic write message to DPanicLevel level log +// we need implement this because unit test case need stub time.Now +// otherwise the ts field always changed +func (l *zapLogger) dPanic(msg string) { + entry := zapcore.Entry{ + Level: zapcore.DPanicLevel, + Time: timeNow(), + Message: msg, + } + checkedEntry := l.l.Core().Check(entry, nil) + checkedEntry.Write(zap.Int("v", l.lvl)) +} + +// handleFields converts a bunch of arbitrary key-value pairs into Zap fields. It takes +// additional pre-converted Zap fields, for use with automatically attached fields, like +// `error`. +func (l *zapLogger) handleFields(args []interface{}, additional ...zap.Field) []zap.Field { + // a slightly modified version of zap.SugaredLogger.sweetenFields + if len(args) == 0 { + // fast-return if we have no suggared fields. + return append(additional, zap.Int("v", l.lvl)) + } + + // unlike Zap, we can be pretty sure users aren't passing structured + // fields (since logr has no concept of that), so guess that we need a + // little less space. + fields := make([]zap.Field, 0, len(args)/2+len(additional)+1) + fields = append(fields, zap.Int("v", l.lvl)) + for i := 0; i < len(args)-1; i += 2 { + // check just in case for strongly-typed Zap fields, which is illegal (since + // it breaks implementation agnosticism), so we can give a better error message. + if _, ok := args[i].(zap.Field); ok { + l.dPanic("strongly-typed Zap Field passed to logr") + break + } + + // process a key-value pair, + // ensuring that the key is a string + key, val := args[i], args[i+1] + keyStr, isString := key.(string) + if !isString { + // if the key isn't a string, stop logging + l.dPanic("non-string key argument passed to logging, ignoring all later arguments") + break + } + + fields = append(fields, zap.Any(keyStr, val)) + } + + return append(fields, additional...) +} + +// Error write log message to error level +func (l *zapLogger) Error(err error, msg string, keysAndVals ...interface{}) { + entry := zapcore.Entry{ + Level: zapcore.ErrorLevel, + Time: timeNow(), + Message: msg, + } + checkedEntry := l.l.Core().Check(entry, nil) + checkedEntry.Write(l.handleFields(keysAndVals, handleError(err))...) +} + +// V return info logr.Logger with specified level +func (l *zapLogger) V(level int) logr.Logger { + return &zapLogger{ + lvl: l.lvl + level, + l: l.l, + } +} + +// WithValues return logr.Logger with some keys And Values +func (l *zapLogger) WithValues(keysAndValues ...interface{}) logr.Logger { + l.l = l.l.With(l.handleFields(keysAndValues)...) + return l +} + +// WithName return logger Named with specified name +func (l *zapLogger) WithName(name string) logr.Logger { + l.l = l.l.Named(name) + return l +} + +// encoderConfig config zap encodetime format +var encoderConfig = zapcore.EncoderConfig{ + MessageKey: "msg", + + TimeKey: "ts", + EncodeTime: zapcore.EpochMillisTimeEncoder, +} + +// NewJSONLogger creates a new json logr.Logger using the given Zap Logger to log. +func NewJSONLogger(w zapcore.WriteSyncer) logr.Logger { + l, _ := zap.NewProduction() + if w == nil { + w = os.Stdout + } + log := l.WithOptions(zap.AddCallerSkip(1), + zap.WrapCore( + func(zapcore.Core) zapcore.Core { + return zapcore.NewCore(zapcore.NewJSONEncoder(encoderConfig), zapcore.AddSync(w), zapcore.DebugLevel) + })) + return &zapLogger{ + l: log, + } +} + +func handleError(err error) zap.Field { + return zap.NamedError("err", err) +} + +func init() { + JSONLogger = NewJSONLogger(nil) +} diff --git a/vendor/k8s.io/component-base/logs/logs.go b/vendor/k8s.io/component-base/logs/logs.go new file mode 100644 index 000000000..073e0312f --- /dev/null +++ b/vendor/k8s.io/component-base/logs/logs.go @@ -0,0 +1,78 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logs + +import ( + "flag" + "fmt" + "log" + "time" + + "github.com/spf13/pflag" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" +) + +const logFlushFreqFlagName = "log-flush-frequency" + +var logFlushFreq = pflag.Duration(logFlushFreqFlagName, 5*time.Second, "Maximum number of seconds between log flushes") + +func init() { + klog.InitFlags(flag.CommandLine) +} + +// AddFlags registers this package's flags on arbitrary FlagSets, such that they point to the +// same value as the global flags. +func AddFlags(fs *pflag.FlagSet) { + fs.AddFlag(pflag.Lookup(logFlushFreqFlagName)) +} + +// KlogWriter serves as a bridge between the standard log package and the glog package. +type KlogWriter struct{} + +// Write implements the io.Writer interface. +func (writer KlogWriter) Write(data []byte) (n int, err error) { + klog.InfoDepth(1, string(data)) + return len(data), nil +} + +// InitLogs initializes logs the way we want for kubernetes. +func InitLogs() { + log.SetOutput(KlogWriter{}) + log.SetFlags(0) + // The default glog flush interval is 5 seconds. + go wait.Forever(klog.Flush, *logFlushFreq) +} + +// FlushLogs flushes logs immediately. +func FlushLogs() { + klog.Flush() +} + +// NewLogger creates a new log.Logger which sends logs to klog.Info. +func NewLogger(prefix string) *log.Logger { + return log.New(KlogWriter{}, prefix, 0) +} + +// GlogSetter is a setter to set glog level. +func GlogSetter(val string) (string, error) { + var level klog.Level + if err := level.Set(val); err != nil { + return "", fmt.Errorf("failed set klog.logging.verbosity %s: %v", val, err) + } + return fmt.Sprintf("successfully set klog.logging.verbosity to %s", val), nil +} diff --git a/vendor/k8s.io/component-base/logs/options.go b/vendor/k8s.io/component-base/logs/options.go new file mode 100644 index 000000000..3d9fd4c08 --- /dev/null +++ b/vendor/k8s.io/component-base/logs/options.go @@ -0,0 +1,133 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logs + +import ( + "flag" + "fmt" + "strings" + + "github.com/go-logr/logr" + "github.com/spf13/pflag" + + "k8s.io/component-base/logs/sanitization" + "k8s.io/klog/v2" +) + +const ( + logFormatFlagName = "logging-format" + defaultLogFormat = "text" +) + +// List of logs (k8s.io/klog + k8s.io/component-base/logs) flags supported by all logging formats +var supportedLogsFlags = map[string]struct{}{ + "v": {}, + // TODO: support vmodule after 1.19 Alpha +} + +// Options has klog format parameters +type Options struct { + LogFormat string + LogSanitization bool +} + +// NewOptions return new klog options +func NewOptions() *Options { + return &Options{ + LogFormat: defaultLogFormat, + } +} + +// Validate verifies if any unsupported flag is set +// for non-default logging format +func (o *Options) Validate() []error { + errs := []error{} + if o.LogFormat != defaultLogFormat { + allFlags := unsupportedLoggingFlags() + for _, fname := range allFlags { + if flagIsSet(fname) { + errs = append(errs, fmt.Errorf("non-default logging format doesn't honor flag: %s", fname)) + } + } + } + if _, err := o.Get(); err != nil { + errs = append(errs, fmt.Errorf("unsupported log format: %s", o.LogFormat)) + } + return errs +} + +func flagIsSet(name string) bool { + f := flag.Lookup(name) + if f != nil { + return f.DefValue != f.Value.String() + } + pf := pflag.Lookup(name) + if pf != nil { + return pf.DefValue != pf.Value.String() + } + panic("failed to lookup unsupported log flag") +} + +// AddFlags add logging-format flag +func (o *Options) AddFlags(fs *pflag.FlagSet) { + unsupportedFlags := fmt.Sprintf("--%s", strings.Join(unsupportedLoggingFlags(), ", --")) + formats := fmt.Sprintf(`"%s"`, strings.Join(logRegistry.List(), `", "`)) + fs.StringVar(&o.LogFormat, logFormatFlagName, defaultLogFormat, fmt.Sprintf("Sets the log format. Permitted formats: %s.\nNon-default formats don't honor these flags: %s.\nNon-default choices are currently alpha and subject to change without warning.", formats, unsupportedFlags)) + + // No new log formats should be added after generation is of flag options + logRegistry.Freeze() + fs.BoolVar(&o.LogSanitization, "experimental-logging-sanitization", o.LogSanitization, `[Experimental] When enabled prevents logging of fields tagged as sensitive (passwords, keys, tokens). +Runtime log sanitization may introduce significant computation overhead and therefore should not be enabled in production.`) +} + +// Apply set klog logger from LogFormat type +func (o *Options) Apply() { + // if log format not exists, use nil loggr + loggr, _ := o.Get() + klog.SetLogger(loggr) + if o.LogSanitization { + klog.SetLogFilter(&sanitization.SanitizingFilter{}) + } +} + +// Get logger with LogFormat field +func (o *Options) Get() (logr.Logger, error) { + return logRegistry.Get(o.LogFormat) +} + +func unsupportedLoggingFlags() []string { + allFlags := []string{} + + // k8s.io/klog flags + fs := &flag.FlagSet{} + klog.InitFlags(fs) + fs.VisitAll(func(flag *flag.Flag) { + if _, found := supportedLogsFlags[flag.Name]; !found { + allFlags = append(allFlags, flag.Name) + } + }) + + // k8s.io/component-base/logs flags + pfs := &pflag.FlagSet{} + AddFlags(pfs) + pfs.VisitAll(func(flag *pflag.Flag) { + if _, found := supportedLogsFlags[flag.Name]; !found { + allFlags = append(allFlags, flag.Name) + } + }) + return allFlags +} diff --git a/vendor/k8s.io/component-base/logs/registry.go b/vendor/k8s.io/component-base/logs/registry.go new file mode 100644 index 000000000..c71899db6 --- /dev/null +++ b/vendor/k8s.io/component-base/logs/registry.go @@ -0,0 +1,106 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logs + +import ( + "fmt" + "sort" + + "github.com/go-logr/logr" + json "k8s.io/component-base/logs/json" +) + +const ( + jsonLogFormat = "json" +) + +var logRegistry = NewLogFormatRegistry() + +// LogFormatRegistry store klog format registry +type LogFormatRegistry struct { + registry map[string]logr.Logger + frozen bool +} + +// NewLogFormatRegistry return new init LogFormatRegistry struct +func NewLogFormatRegistry() *LogFormatRegistry { + return &LogFormatRegistry{ + registry: make(map[string]logr.Logger), + frozen: false, + } +} + +// Register new log format registry to global logRegistry +func (lfr *LogFormatRegistry) Register(name string, logger logr.Logger) error { + if lfr.frozen { + return fmt.Errorf("log format is frozen, unable to register log format") + } + if _, ok := lfr.registry[name]; ok { + return fmt.Errorf("log format: %s already exists", name) + } + lfr.registry[name] = logger + return nil +} + +// Get specified log format logger +func (lfr *LogFormatRegistry) Get(name string) (logr.Logger, error) { + re, ok := lfr.registry[name] + if !ok { + return nil, fmt.Errorf("log format: %s does not exists", name) + } + return re, nil +} + +// Set specified log format logger +func (lfr *LogFormatRegistry) Set(name string, logger logr.Logger) error { + if lfr.frozen { + return fmt.Errorf("log format is frozen, unable to set log format") + } + + lfr.registry[name] = logger + return nil +} + +// Delete specified log format logger +func (lfr *LogFormatRegistry) Delete(name string) error { + if lfr.frozen { + return fmt.Errorf("log format is frozen, unable to delete log format") + } + + delete(lfr.registry, name) + return nil +} + +// List names of registered log formats (sorted) +func (lfr *LogFormatRegistry) List() []string { + formats := make([]string, 0, len(lfr.registry)) + for f := range lfr.registry { + formats = append(formats, f) + } + sort.Strings(formats) + return formats +} + +// Freeze freezes the log format registry +func (lfr *LogFormatRegistry) Freeze() { + lfr.frozen = true +} +func init() { + // Text format is default klog format + logRegistry.Register(defaultLogFormat, nil) + logRegistry.Register(jsonLogFormat, json.JSONLogger) +} diff --git a/vendor/k8s.io/component-base/logs/sanitization/sanitization.go b/vendor/k8s.io/component-base/logs/sanitization/sanitization.go new file mode 100644 index 000000000..0d1b7d52f --- /dev/null +++ b/vendor/k8s.io/component-base/logs/sanitization/sanitization.go @@ -0,0 +1,69 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sanitization + +import ( + "fmt" + + "k8s.io/component-base/logs/datapol" +) + +const ( + datapolMsgFmt = "Log message has been redacted. Log argument #%d contains: %v" + datapolMsg = "Log message has been redacted." +) + +// SanitizingFilter implements the LogFilter interface from klog with a set of functions that inspects the arguments with the datapol library +type SanitizingFilter struct{} + +// Filter is the filter function for the non-formatting logging functions of klog. +func (sf *SanitizingFilter) Filter(args []interface{}) []interface{} { + for i, v := range args { + types := datapol.Verify(v) + if len(types) > 0 { + return []interface{}{fmt.Sprintf(datapolMsgFmt, i, types)} + } + } + return args +} + +// FilterF is the filter function for the formatting logging functions of klog +func (sf *SanitizingFilter) FilterF(fmt string, args []interface{}) (string, []interface{}) { + for i, v := range args { + types := datapol.Verify(v) + if len(types) > 0 { + return datapolMsgFmt, []interface{}{i, types} + } + } + return fmt, args + +} + +// FilterS is the filter for the structured logging functions of klog. +func (sf *SanitizingFilter) FilterS(msg string, keysAndValues []interface{}) (string, []interface{}) { + for i, v := range keysAndValues { + types := datapol.Verify(v) + if len(types) > 0 { + if i%2 == 0 { + return datapolMsg, []interface{}{"key_index", i, "types", types} + } + // since we scanned linearly we can safely log the key. + return datapolMsg, []interface{}{"key", keysAndValues[i-1], "types", types} + } + } + return msg, keysAndValues +} diff --git a/vendor/k8s.io/component-base/metrics/metric.go b/vendor/k8s.io/component-base/metrics/metric.go index 1cf788f8c..bb1f69627 100644 --- a/vendor/k8s.io/component-base/metrics/metric.go +++ b/vendor/k8s.io/component-base/metrics/metric.go @@ -188,9 +188,6 @@ func (c *selfCollector) Collect(ch chan<- prometheus.Metric) { // no-op vecs for convenience var noopCounterVec = &prometheus.CounterVec{} var noopHistogramVec = &prometheus.HistogramVec{} - -// lint:ignore U1000 Keep it for future use -var noopSummaryVec = &prometheus.SummaryVec{} var noopGaugeVec = &prometheus.GaugeVec{} var noopObserverVec = &noopObserverVector{} diff --git a/vendor/k8s.io/component-base/metrics/processstarttime.go b/vendor/k8s.io/component-base/metrics/processstarttime.go index b20516ec9..8dde45881 100644 --- a/vendor/k8s.io/component-base/metrics/processstarttime.go +++ b/vendor/k8s.io/component-base/metrics/processstarttime.go @@ -43,6 +43,12 @@ func RegisterProcessStartTime(registrationFunc func(Registerable) error) error { klog.Errorf("Could not get process start time, %v", err) start = float64(time.Now().Unix()) } + // processStartTime is a lazy metric which only get initialized after registered. + // so we have to explicitly create it before setting the label value. Otherwise + // it is a noop. + if !processStartTime.IsCreated() { + processStartTime.initializeMetric() + } processStartTime.WithLabelValues().Set(start) return registrationFunc(processStartTime) } @@ -54,7 +60,7 @@ func getProcessStart() (float64, error) { return 0, err } - if stat, err := p.NewStat(); err == nil { + if stat, err := p.Stat(); err == nil { return stat.StartTime() } return 0, err diff --git a/vendor/k8s.io/component-base/metrics/prometheus/workqueue/metrics.go b/vendor/k8s.io/component-base/metrics/prometheus/workqueue/metrics.go new file mode 100644 index 000000000..a0192acb0 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/prometheus/workqueue/metrics.go @@ -0,0 +1,130 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +import ( + "k8s.io/client-go/util/workqueue" + k8smetrics "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +// Package prometheus sets the workqueue DefaultMetricsFactory to produce +// prometheus metrics. To use this package, you just have to import it. + +// Metrics subsystem and keys used by the workqueue. +const ( + WorkQueueSubsystem = "workqueue" + DepthKey = "depth" + AddsKey = "adds_total" + QueueLatencyKey = "queue_duration_seconds" + WorkDurationKey = "work_duration_seconds" + UnfinishedWorkKey = "unfinished_work_seconds" + LongestRunningProcessorKey = "longest_running_processor_seconds" + RetriesKey = "retries_total" +) + +var ( + depth = k8smetrics.NewGaugeVec(&k8smetrics.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: DepthKey, + Help: "Current depth of workqueue", + }, []string{"name"}) + + adds = k8smetrics.NewCounterVec(&k8smetrics.CounterOpts{ + Subsystem: WorkQueueSubsystem, + Name: AddsKey, + Help: "Total number of adds handled by workqueue", + }, []string{"name"}) + + latency = k8smetrics.NewHistogramVec(&k8smetrics.HistogramOpts{ + Subsystem: WorkQueueSubsystem, + Name: QueueLatencyKey, + Help: "How long in seconds an item stays in workqueue before being requested.", + Buckets: k8smetrics.ExponentialBuckets(10e-9, 10, 10), + }, []string{"name"}) + + workDuration = k8smetrics.NewHistogramVec(&k8smetrics.HistogramOpts{ + Subsystem: WorkQueueSubsystem, + Name: WorkDurationKey, + Help: "How long in seconds processing an item from workqueue takes.", + Buckets: k8smetrics.ExponentialBuckets(10e-9, 10, 10), + }, []string{"name"}) + + unfinished = k8smetrics.NewGaugeVec(&k8smetrics.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: UnfinishedWorkKey, + Help: "How many seconds of work has done that " + + "is in progress and hasn't been observed by work_duration. Large " + + "values indicate stuck threads. One can deduce the number of stuck " + + "threads by observing the rate at which this increases.", + }, []string{"name"}) + + longestRunningProcessor = k8smetrics.NewGaugeVec(&k8smetrics.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: LongestRunningProcessorKey, + Help: "How many seconds has the longest running " + + "processor for workqueue been running.", + }, []string{"name"}) + + retries = k8smetrics.NewCounterVec(&k8smetrics.CounterOpts{ + Subsystem: WorkQueueSubsystem, + Name: RetriesKey, + Help: "Total number of retries handled by workqueue", + }, []string{"name"}) + + metrics = []k8smetrics.Registerable{ + depth, adds, latency, workDuration, unfinished, longestRunningProcessor, retries, + } +) + +type prometheusMetricsProvider struct { +} + +func init() { + for _, m := range metrics { + legacyregistry.MustRegister(m) + } + workqueue.SetProvider(prometheusMetricsProvider{}) +} + +func (prometheusMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric { + return depth.WithLabelValues(name) +} + +func (prometheusMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric { + return adds.WithLabelValues(name) +} + +func (prometheusMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric { + return latency.WithLabelValues(name) +} + +func (prometheusMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric { + return workDuration.WithLabelValues(name) +} + +func (prometheusMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { + return unfinished.WithLabelValues(name) +} + +func (prometheusMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { + return longestRunningProcessor.WithLabelValues(name) +} + +func (prometheusMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric { + return retries.WithLabelValues(name) +} diff --git a/vendor/k8s.io/component-base/metrics/testutil/metrics.go b/vendor/k8s.io/component-base/metrics/testutil/metrics.go new file mode 100644 index 000000000..389655012 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/testutil/metrics.go @@ -0,0 +1,368 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "fmt" + "io" + "math" + "reflect" + "sort" + "strings" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" + + "k8s.io/component-base/metrics" +) + +var ( + // MetricNameLabel is label under which model.Sample stores metric name + MetricNameLabel model.LabelName = model.MetricNameLabel + // QuantileLabel is label under which model.Sample stores latency quantile value + QuantileLabel model.LabelName = model.QuantileLabel +) + +// Metrics is generic metrics for other specific metrics +type Metrics map[string]model.Samples + +// Equal returns true if all metrics are the same as the arguments. +func (m *Metrics) Equal(o Metrics) bool { + var leftKeySet []string + var rightKeySet []string + for k := range *m { + leftKeySet = append(leftKeySet, k) + } + for k := range o { + rightKeySet = append(rightKeySet, k) + } + if !reflect.DeepEqual(leftKeySet, rightKeySet) { + return false + } + for _, k := range leftKeySet { + if !(*m)[k].Equal(o[k]) { + return false + } + } + return true +} + +// NewMetrics returns new metrics which are initialized. +func NewMetrics() Metrics { + result := make(Metrics) + return result +} + +// ParseMetrics parses Metrics from data returned from prometheus endpoint +func ParseMetrics(data string, output *Metrics) error { + dec := expfmt.NewDecoder(strings.NewReader(data), expfmt.FmtText) + decoder := expfmt.SampleDecoder{ + Dec: dec, + Opts: &expfmt.DecodeOptions{}, + } + + for { + var v model.Vector + if err := decoder.Decode(&v); err != nil { + if err == io.EOF { + // Expected loop termination condition. + return nil + } + continue + } + for _, metric := range v { + name := string(metric.Metric[MetricNameLabel]) + (*output)[name] = append((*output)[name], metric) + } + } +} + +// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange +// format and creates MetricFamily proto messages. It returns the MetricFamily +// proto messages in a map where the metric names are the keys, along with any +// error encountered. +func TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { + var textParser expfmt.TextParser + return textParser.TextToMetricFamilies(in) +} + +// PrintSample returns formatted representation of metric Sample +func PrintSample(sample *model.Sample) string { + buf := make([]string, 0) + // Id is a VERY special label. For 'normal' container it's useless, but it's necessary + // for 'system' containers (e.g. /docker-daemon, /kubelet, etc.). We know if that's the + // case by checking if there's a label "kubernetes_container_name" present. It's hacky + // but it works... + _, normalContainer := sample.Metric["kubernetes_container_name"] + for k, v := range sample.Metric { + if strings.HasPrefix(string(k), "__") { + continue + } + + if string(k) == "id" && normalContainer { + continue + } + buf = append(buf, fmt.Sprintf("%v=%v", string(k), v)) + } + return fmt.Sprintf("[%v] = %v", strings.Join(buf, ","), sample.Value) +} + +// ComputeHistogramDelta computes the change in histogram metric for a selected label. +// Results are stored in after samples +func ComputeHistogramDelta(before, after model.Samples, label model.LabelName) { + beforeSamplesMap := make(map[string]*model.Sample) + for _, bSample := range before { + beforeSamplesMap[makeKey(bSample.Metric[label], bSample.Metric["le"])] = bSample + } + for _, aSample := range after { + if bSample, found := beforeSamplesMap[makeKey(aSample.Metric[label], aSample.Metric["le"])]; found { + aSample.Value = aSample.Value - bSample.Value + } + } +} + +func makeKey(a, b model.LabelValue) string { + return string(a) + "___" + string(b) +} + +// GetMetricValuesForLabel returns value of metric for a given dimension +func GetMetricValuesForLabel(ms Metrics, metricName, label string) map[string]int64 { + samples, found := ms[metricName] + result := make(map[string]int64, len(samples)) + if !found { + return result + } + for _, sample := range samples { + count := int64(sample.Value) + dimensionName := string(sample.Metric[model.LabelName(label)]) + result[dimensionName] = count + } + return result +} + +// ValidateMetrics verifies if every sample of metric has all expected labels +func ValidateMetrics(metrics Metrics, metricName string, expectedLabels ...string) error { + samples, ok := metrics[metricName] + if !ok { + return fmt.Errorf("metric %q was not found in metrics", metricName) + } + for _, sample := range samples { + for _, l := range expectedLabels { + if _, ok := sample.Metric[model.LabelName(l)]; !ok { + return fmt.Errorf("metric %q is missing label %q, sample: %q", metricName, l, sample.String()) + } + } + } + return nil +} + +// Histogram wraps prometheus histogram DTO (data transfer object) +type Histogram struct { + *dto.Histogram +} + +// GetHistogramFromGatherer collects a metric from a gatherer implementing k8s.io/component-base/metrics.Gatherer interface. +// Used only for testing purposes where we need to gather metrics directly from a running binary (without metrics endpoint). +func GetHistogramFromGatherer(gatherer metrics.Gatherer, metricName string) (Histogram, error) { + var metricFamily *dto.MetricFamily + m, err := gatherer.Gather() + if err != nil { + return Histogram{}, err + } + for _, mFamily := range m { + if mFamily.GetName() == metricName { + metricFamily = mFamily + break + } + } + + if metricFamily == nil { + return Histogram{}, fmt.Errorf("metric %q not found", metricName) + } + + if metricFamily.GetMetric() == nil { + return Histogram{}, fmt.Errorf("metric %q is empty", metricName) + } + + if len(metricFamily.GetMetric()) == 0 { + return Histogram{}, fmt.Errorf("metric %q is empty", metricName) + } + + return Histogram{ + // Histograms are stored under the first index (based on observation). + // Given there's only one histogram registered per each metric name, accessing + // the first index is sufficient. + metricFamily.GetMetric()[0].GetHistogram(), + }, nil +} + +func uint64Ptr(u uint64) *uint64 { + return &u +} + +// Bucket of a histogram +type bucket struct { + upperBound float64 + count float64 +} + +func bucketQuantile(q float64, buckets []bucket) float64 { + if q < 0 { + return math.Inf(-1) + } + if q > 1 { + return math.Inf(+1) + } + + if len(buckets) < 2 { + return math.NaN() + } + + rank := q * buckets[len(buckets)-1].count + b := sort.Search(len(buckets)-1, func(i int) bool { return buckets[i].count >= rank }) + + if b == 0 { + return buckets[0].upperBound * (rank / buckets[0].count) + } + + if b == len(buckets)-1 && math.IsInf(buckets[b].upperBound, 1) { + return buckets[len(buckets)-2].upperBound + } + + // linear approximation of b-th bucket + brank := rank - buckets[b-1].count + bSize := buckets[b].upperBound - buckets[b-1].upperBound + bCount := buckets[b].count - buckets[b-1].count + + return buckets[b-1].upperBound + bSize*(brank/bCount) +} + +// Quantile computes q-th quantile of a cumulative histogram. +// It's expected the histogram is valid (by calling Validate) +func (hist *Histogram) Quantile(q float64) float64 { + var buckets []bucket + + for _, bckt := range hist.Bucket { + buckets = append(buckets, bucket{ + count: float64(bckt.GetCumulativeCount()), + upperBound: bckt.GetUpperBound(), + }) + } + + if len(buckets) == 0 || buckets[len(buckets)-1].upperBound != math.Inf(+1) { + // The list of buckets in dto.Histogram doesn't include the final +Inf bucket, so we + // add it here for the reset of the samples. + buckets = append(buckets, bucket{ + count: float64(hist.GetSampleCount()), + upperBound: math.Inf(+1), + }) + } + + return bucketQuantile(q, buckets) +} + +// Average computes histogram's average value +func (hist *Histogram) Average() float64 { + return hist.GetSampleSum() / float64(hist.GetSampleCount()) +} + +// Clear clears all fields of the wrapped histogram +func (hist *Histogram) Clear() { + if hist.SampleCount != nil { + *hist.SampleCount = 0 + } + if hist.SampleSum != nil { + *hist.SampleSum = 0 + } + for _, b := range hist.Bucket { + if b.CumulativeCount != nil { + *b.CumulativeCount = 0 + } + } +} + +// Validate makes sure the wrapped histogram has all necessary fields set and with valid values. +func (hist *Histogram) Validate() error { + if hist.SampleCount == nil || hist.GetSampleCount() == 0 { + return fmt.Errorf("nil or empty histogram SampleCount") + } + + if hist.SampleSum == nil || hist.GetSampleSum() == 0 { + return fmt.Errorf("nil or empty histogram SampleSum") + } + + for _, bckt := range hist.Bucket { + if bckt == nil { + return fmt.Errorf("empty histogram bucket") + } + if bckt.UpperBound == nil || bckt.GetUpperBound() < 0 { + return fmt.Errorf("nil or negative histogram bucket UpperBound") + } + } + + return nil +} + +// GetGaugeMetricValue extract metric value from GaugeMetric +func GetGaugeMetricValue(m metrics.GaugeMetric) (float64, error) { + metricProto := &dto.Metric{} + if err := m.Write(metricProto); err != nil { + return 0, fmt.Errorf("error writing m: %v", err) + } + return metricProto.Gauge.GetValue(), nil +} + +// GetCounterMetricValue extract metric value from CounterMetric +func GetCounterMetricValue(m metrics.CounterMetric) (float64, error) { + metricProto := &dto.Metric{} + if err := m.(metrics.Metric).Write(metricProto); err != nil { + return 0, fmt.Errorf("error writing m: %v", err) + } + return metricProto.Counter.GetValue(), nil +} + +// GetHistogramMetricValue extract sum of all samples from ObserverMetric +func GetHistogramMetricValue(m metrics.ObserverMetric) (float64, error) { + metricProto := &dto.Metric{} + if err := m.(metrics.Metric).Write(metricProto); err != nil { + return 0, fmt.Errorf("error writing m: %v", err) + } + return metricProto.Histogram.GetSampleSum(), nil +} + +// LabelsMatch returns true if metric has all expected labels otherwise false +func LabelsMatch(metric *dto.Metric, labelFilter map[string]string) bool { + metricLabels := map[string]string{} + + for _, labelPair := range metric.Label { + metricLabels[labelPair.GetName()] = labelPair.GetValue() + } + + // length comparison then match key to values in the maps + if len(labelFilter) > len(metricLabels) { + return false + } + + for labelName, labelValue := range labelFilter { + if value, ok := metricLabels[labelName]; !ok || value != labelValue { + return false + } + } + + return true +} diff --git a/vendor/k8s.io/component-base/metrics/testutil/promlint.go b/vendor/k8s.io/component-base/metrics/testutil/promlint.go new file mode 100644 index 000000000..33b83f05c --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/testutil/promlint.go @@ -0,0 +1,156 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "fmt" + "io" + "strings" + + "github.com/prometheus/client_golang/prometheus/testutil/promlint" +) + +// exceptionMetrics is an exception list of metrics which violates promlint rules. +// +// The original entries come from the existing metrics when we introduce promlint. +// We setup this list for allow and not fail on the current violations. +// Generally speaking, you need to fix the problem for a new metric rather than add it into the list. +var exceptionMetrics = []string{ + // k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/egressselector + "apiserver_egress_dialer_dial_failure_count", // counter metrics should have "_total" suffix + + // k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz + "apiserver_request_total", // label names should be written in 'snake_case' not 'camelCase' + + // k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters + "authenticated_user_requests", // counter metrics should have "_total" suffix + "authentication_attempts", // counter metrics should have "_total" suffix + + // kube-apiserver + "aggregator_openapi_v2_regeneration_count", + "apiserver_admission_step_admission_duration_seconds_summary", + "apiserver_current_inflight_requests", + "apiserver_longrunning_gauge", + "get_token_count", + "get_token_fail_count", + "ssh_tunnel_open_count", + "ssh_tunnel_open_fail_count", + + // kube-controller-manager + "attachdetach_controller_forced_detaches", + "authenticated_user_requests", + "authentication_attempts", + "get_token_count", + "get_token_fail_count", + "node_collector_evictions_number", + + // k8s.io/kubernetes/pkg/kubelet/server/stats + // The two metrics have been deprecated and will be removed in release v1.20+. + "container_cpu_usage_seconds_total", // non-counter metrics should not have "_total" suffix + "node_cpu_usage_seconds_total", // non-counter metrics should not have "_total" suffix +} + +// A Problem is an issue detected by a Linter. +type Problem promlint.Problem + +func (p *Problem) String() string { + return fmt.Sprintf("%s:%s", p.Metric, p.Text) +} + +// A Linter is a Prometheus metrics linter. It identifies issues with metric +// names, types, and metadata, and reports them to the caller. +type Linter struct { + promLinter *promlint.Linter +} + +// Lint performs a linting pass, returning a slice of Problems indicating any +// issues found in the metrics stream. The slice is sorted by metric name +// and issue description. +func (l *Linter) Lint() ([]Problem, error) { + promProblems, err := l.promLinter.Lint() + if err != nil { + return nil, err + } + + // Ignore problems those in exception list + problems := make([]Problem, 0, len(promProblems)) + for i := range promProblems { + if !l.shouldIgnore(promProblems[i].Metric) { + problems = append(problems, Problem(promProblems[i])) + } + } + + return problems, nil +} + +// shouldIgnore returns true if metric in the exception list, otherwise returns false. +func (l *Linter) shouldIgnore(metricName string) bool { + for i := range exceptionMetrics { + if metricName == exceptionMetrics[i] { + return true + } + } + + return false +} + +// NewPromLinter creates a new Linter that reads an input stream of Prometheus metrics. +// Only the text exposition format is supported. +func NewPromLinter(r io.Reader) *Linter { + return &Linter{ + promLinter: promlint.New(r), + } +} + +func mergeProblems(problems []Problem) string { + var problemsMsg []string + + for index := range problems { + problemsMsg = append(problemsMsg, problems[index].String()) + } + + return strings.Join(problemsMsg, ",") +} + +// shouldIgnore returns true if metric in the exception list, otherwise returns false. +func shouldIgnore(metricName string) bool { + for i := range exceptionMetrics { + if metricName == exceptionMetrics[i] { + return true + } + } + + return false +} + +// getLintError will ignore the metrics in exception list and converts lint problem to error. +func getLintError(problems []promlint.Problem) error { + var filteredProblems []Problem + for _, problem := range problems { + if shouldIgnore(problem.Metric) { + continue + } + + filteredProblems = append(filteredProblems, Problem(problem)) + } + + if len(filteredProblems) == 0 { + return nil + } + + return fmt.Errorf("lint error: %s", mergeProblems(filteredProblems)) +} diff --git a/vendor/k8s.io/component-base/metrics/testutil/testutil.go b/vendor/k8s.io/component-base/metrics/testutil/testutil.go new file mode 100644 index 000000000..439045989 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/testutil/testutil.go @@ -0,0 +1,86 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "fmt" + "io" + + "github.com/prometheus/client_golang/prometheus/testutil" + + apimachineryversion "k8s.io/apimachinery/pkg/version" + "k8s.io/component-base/metrics" +) + +// CollectAndCompare registers the provided Collector with a newly created +// pedantic Registry. It then does the same as GatherAndCompare, gathering the +// metrics from the pedantic Registry. +func CollectAndCompare(c metrics.Collector, expected io.Reader, metricNames ...string) error { + lintProblems, err := testutil.CollectAndLint(c, metricNames...) + if err != nil { + return err + } + if err := getLintError(lintProblems); err != nil { + return err + } + + return testutil.CollectAndCompare(c, expected, metricNames...) +} + +// GatherAndCompare gathers all metrics from the provided Gatherer and compares +// it to an expected output read from the provided Reader in the Prometheus text +// exposition format. If any metricNames are provided, only metrics with those +// names are compared. +func GatherAndCompare(g metrics.Gatherer, expected io.Reader, metricNames ...string) error { + lintProblems, err := testutil.GatherAndLint(g, metricNames...) + if err != nil { + return err + } + if err := getLintError(lintProblems); err != nil { + return err + } + + return testutil.GatherAndCompare(g, expected, metricNames...) +} + +// CustomCollectAndCompare registers the provided StableCollector with a newly created +// registry. It then does the same as GatherAndCompare, gathering the +// metrics from the pedantic Registry. +func CustomCollectAndCompare(c metrics.StableCollector, expected io.Reader, metricNames ...string) error { + registry := metrics.NewKubeRegistry() + registry.CustomMustRegister(c) + + return GatherAndCompare(registry, expected, metricNames...) +} + +// NewFakeKubeRegistry creates a fake `KubeRegistry` that takes the input version as `build in version`. +// It should only be used in testing scenario especially for the deprecated metrics. +// The input version format should be `major.minor.patch`, e.g. '1.18.0'. +func NewFakeKubeRegistry(ver string) metrics.KubeRegistry { + backup := metrics.BuildVersion + defer func() { + metrics.BuildVersion = backup + }() + + metrics.BuildVersion = func() apimachineryversion.Info { + return apimachineryversion.Info{ + GitVersion: fmt.Sprintf("v%s-alpha+1.12345", ver), + } + } + + return metrics.NewKubeRegistry() +} diff --git a/vendor/k8s.io/klog/v2/README.md b/vendor/k8s.io/klog/v2/README.md index 2f9f6f0d8..64d29622e 100644 --- a/vendor/k8s.io/klog/v2/README.md +++ b/vendor/k8s.io/klog/v2/README.md @@ -27,7 +27,7 @@ Historical context is available here: How to use klog =============== -- Replace imports for `github.com/golang/glog` with `k8s.io/klog` +- Replace imports for `"github.com/golang/glog"` with `"k8s.io/klog/v2"` - Use `klog.InitFlags(nil)` explicitly for initializing global flags as we no longer use `init()` method to register the flags - You can now use `log_file` instead of `log_dir` for logging to a single file (See `examples/log_file/usage_log_file.go`) - If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`) @@ -35,6 +35,10 @@ How to use klog **NOTE**: please use the newer go versions that support semantic import versioning in modules, ideally go 1.11.4 or greater. +### Coexisting with klog/v2 + +See [this example](examples/coexist_klog_v1_and_v2/) to see how to coexist with both klog/v1 and klog/v2. + ### Coexisting with glog This package can be used side by side with glog. [This example](examples/coexist_glog/coexist_glog.go) shows how to initialize and synchronize flags from the global `flag.CommandLine` FlagSet. In addition, the example makes use of stderr as combined output by setting `alsologtostderr` (or `logtostderr`) to `true`. diff --git a/vendor/k8s.io/klog/v2/SECURITY.md b/vendor/k8s.io/klog/v2/SECURITY.md new file mode 100644 index 000000000..2083d44cd --- /dev/null +++ b/vendor/k8s.io/klog/v2/SECURITY.md @@ -0,0 +1,22 @@ +# Security Policy + +## Security Announcements + +Join the [kubernetes-security-announce] group for security and vulnerability announcements. + +You can also subscribe to an RSS feed of the above using [this link][kubernetes-security-announce-rss]. + +## Reporting a Vulnerability + +Instructions for reporting a vulnerability can be found on the +[Kubernetes Security and Disclosure Information] page. + +## Supported Versions + +Information about supported Kubernetes versions can be found on the +[Kubernetes version and version skew support policy] page on the Kubernetes website. + +[kubernetes-security-announce]: https://groups.google.com/forum/#!forum/kubernetes-security-announce +[kubernetes-security-announce-rss]: https://groups.google.com/forum/feed/kubernetes-security-announce/msgs/rss_v2_0.xml?num=50 +[Kubernetes version and version skew support policy]: https://kubernetes.io/docs/setup/release/version-skew-policy/#supported-versions +[Kubernetes Security and Disclosure Information]: https://kubernetes.io/docs/reference/issues-security/security/#report-a-vulnerability diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 49f1f2dd2..23cced625 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -413,6 +413,7 @@ func init() { logging.skipHeaders = false logging.addDirHeader = false logging.skipLogHeaders = false + logging.oneOutput = false go logging.flushDaemon() } @@ -432,6 +433,7 @@ func InitFlags(flagset *flag.FlagSet) { flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") flagset.BoolVar(&logging.addDirHeader, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header of the log messages") flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages") + flagset.BoolVar(&logging.oneOutput, "one_output", logging.oneOutput, "If true, only write logs to their native severity level (vs also writing to each lower severity level") flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files") flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") @@ -505,6 +507,12 @@ type loggingT struct { // If set, all output will be redirected unconditionally to the provided logr.Logger logr logr.Logger + + // If true, messages will not be propagated to lower severity log levels + oneOutput bool + + // If set, all output will be filtered through the filter. + filter LogFilter } // buffer holds a byte Buffer for reuse. The zero value is ready for use. @@ -687,7 +695,7 @@ func (buf *buffer) someDigits(i, d int) int { return copy(buf.tmp[i:], buf.tmp[j:]) } -func (l *loggingT) println(s severity, logr logr.Logger, args ...interface{}) { +func (l *loggingT) println(s severity, logr logr.Logger, filter LogFilter, args ...interface{}) { buf, file, line := l.header(s, 0) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers @@ -695,15 +703,18 @@ func (l *loggingT) println(s severity, logr logr.Logger, args ...interface{}) { l.putBuffer(buf) buf = l.getBuffer() } + if filter != nil { + args = filter.Filter(args) + } fmt.Fprintln(buf, args...) l.output(s, logr, buf, file, line, false) } -func (l *loggingT) print(s severity, logr logr.Logger, args ...interface{}) { - l.printDepth(s, logr, 1, args...) +func (l *loggingT) print(s severity, logr logr.Logger, filter LogFilter, args ...interface{}) { + l.printDepth(s, logr, filter, 1, args...) } -func (l *loggingT) printDepth(s severity, logr logr.Logger, depth int, args ...interface{}) { +func (l *loggingT) printDepth(s severity, logr logr.Logger, filter LogFilter, depth int, args ...interface{}) { buf, file, line := l.header(s, depth) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers @@ -711,6 +722,9 @@ func (l *loggingT) printDepth(s severity, logr logr.Logger, depth int, args ...i l.putBuffer(buf) buf = l.getBuffer() } + if filter != nil { + args = filter.Filter(args) + } fmt.Fprint(buf, args...) if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') @@ -718,7 +732,7 @@ func (l *loggingT) printDepth(s severity, logr logr.Logger, depth int, args ...i l.output(s, logr, buf, file, line, false) } -func (l *loggingT) printf(s severity, logr logr.Logger, format string, args ...interface{}) { +func (l *loggingT) printf(s severity, logr logr.Logger, filter LogFilter, format string, args ...interface{}) { buf, file, line := l.header(s, 0) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers @@ -726,6 +740,9 @@ func (l *loggingT) printf(s severity, logr logr.Logger, format string, args ...i l.putBuffer(buf) buf = l.getBuffer() } + if filter != nil { + format, args = filter.FilterF(format, args) + } fmt.Fprintf(buf, format, args...) if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') @@ -736,7 +753,7 @@ func (l *loggingT) printf(s severity, logr logr.Logger, format string, args ...i // printWithFileLine behaves like print but uses the provided file and line number. If // alsoLogToStderr is true, the log message always appears on standard error; it // will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, file string, line int, alsoToStderr bool, args ...interface{}) { +func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { buf := l.formatHeader(s, file, line) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers @@ -744,6 +761,9 @@ func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, file string, l.putBuffer(buf) buf = l.getBuffer() } + if filter != nil { + args = filter.Filter(args) + } fmt.Fprint(buf, args...) if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') @@ -752,18 +772,24 @@ func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, file string, } // if loggr is specified, will call loggr.Error, otherwise output with logging module. -func (l *loggingT) errorS(err error, loggr logr.Logger, msg string, keysAndValues ...interface{}) { +func (l *loggingT) errorS(err error, loggr logr.Logger, filter LogFilter, msg string, keysAndValues ...interface{}) { + if filter != nil { + msg, keysAndValues = filter.FilterS(msg, keysAndValues) + } if loggr != nil { - loggr.Error(err, msg, keysAndValues) + loggr.Error(err, msg, keysAndValues...) return } l.printS(err, msg, keysAndValues...) } // if loggr is specified, will call loggr.Info, otherwise output with logging module. -func (l *loggingT) infoS(loggr logr.Logger, msg string, keysAndValues ...interface{}) { +func (l *loggingT) infoS(loggr logr.Logger, filter LogFilter, msg string, keysAndValues ...interface{}) { + if filter != nil { + msg, keysAndValues = filter.FilterS(msg, keysAndValues) + } if loggr != nil { - loggr.Info(msg, keysAndValues) + loggr.Info(msg, keysAndValues...) return } l.printS(nil, msg, keysAndValues...) @@ -785,7 +811,7 @@ func (l *loggingT) printS(err error, msg string, keysAndValues ...interface{}) { } else { s = errorLog } - l.printDepth(s, logging.logr, 2, b) + l.printDepth(s, logging.logr, nil, 2, b) } const missingValue = "(MISSING)" @@ -919,18 +945,22 @@ func (l *loggingT) output(s severity, log logr.Logger, buf *buffer, file string, } } - switch s { - case fatalLog: - l.file[fatalLog].Write(data) - fallthrough - case errorLog: - l.file[errorLog].Write(data) - fallthrough - case warningLog: - l.file[warningLog].Write(data) - fallthrough - case infoLog: - l.file[infoLog].Write(data) + if l.oneOutput { + l.file[s].Write(data) + } else { + switch s { + case fatalLog: + l.file[fatalLog].Write(data) + fallthrough + case errorLog: + l.file[errorLog].Write(data) + fallthrough + case warningLog: + l.file[warningLog].Write(data) + fallthrough + case infoLog: + l.file[infoLog].Write(data) + } } } } @@ -1077,11 +1107,19 @@ func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error { } var err error sb.file, _, err = create(severityName[sb.sev], now, startup) - sb.nbytes = 0 if err != nil { return err } - + if startup { + fileInfo, err := sb.file.Stat() + if err != nil { + return fmt.Errorf("file stat could not get fileinfo: %v", err) + } + // init file size + sb.nbytes = uint64(fileInfo.Size()) + } else { + sb.nbytes = 0 + } sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) if sb.logger.skipLogHeaders { @@ -1197,7 +1235,7 @@ func (lb logBridge) Write(b []byte) (n int, err error) { } // printWithFileLine with alsoToStderr=true, so standard log messages // always appear on standard error. - logging.printWithFileLine(severity(lb), logging.logr, file, line, true, text) + logging.printWithFileLine(severity(lb), logging.logr, logging.filter, file, line, true, text) return len(b), nil } @@ -1232,13 +1270,14 @@ func (l *loggingT) setV(pc uintptr) Level { type Verbose struct { enabled bool logr logr.Logger + filter LogFilter } func newVerbose(level Level, b bool) Verbose { if logging.logr == nil { - return Verbose{b, nil} + return Verbose{b, nil, logging.filter} } - return Verbose{b, logging.logr.V(int(level))} + return Verbose{b, logging.logr.V(int(level)), logging.filter} } // V reports whether verbosity at the call site is at least the requested level. @@ -1265,7 +1304,7 @@ func V(level Level) Verbose { return newVerbose(level, true) } - // It's off globally but it vmodule may still be set. + // It's off globally but vmodule may still be set. // Here is another cheap but safe test to see if vmodule is enabled. if atomic.LoadInt32(&logging.filterLength) > 0 { // Now we need a proper lock to use the logging structure. The pcs field @@ -1296,7 +1335,7 @@ func (v Verbose) Enabled() bool { // See the documentation of V for usage. func (v Verbose) Info(args ...interface{}) { if v.enabled { - logging.print(infoLog, v.logr, args...) + logging.print(infoLog, v.logr, v.filter, args...) } } @@ -1304,7 +1343,7 @@ func (v Verbose) Info(args ...interface{}) { // See the documentation of V for usage. func (v Verbose) Infoln(args ...interface{}) { if v.enabled { - logging.println(infoLog, v.logr, args...) + logging.println(infoLog, v.logr, v.filter, args...) } } @@ -1312,7 +1351,7 @@ func (v Verbose) Infoln(args ...interface{}) { // See the documentation of V for usage. func (v Verbose) Infof(format string, args ...interface{}) { if v.enabled { - logging.printf(infoLog, v.logr, format, args...) + logging.printf(infoLog, v.logr, v.filter, format, args...) } } @@ -1320,14 +1359,14 @@ func (v Verbose) Infof(format string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) { if v.enabled { - logging.infoS(v.logr, msg, keysAndValues...) + logging.infoS(v.logr, v.filter, msg, keysAndValues...) } } // Deprecated: Use ErrorS instead. func (v Verbose) Error(err error, msg string, args ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, msg, args...) + logging.errorS(err, v.logr, v.filter, msg, args...) } } @@ -1335,32 +1374,32 @@ func (v Verbose) Error(err error, msg string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, msg, keysAndValues...) + logging.errorS(err, v.logr, v.filter, msg, keysAndValues...) } } // Info logs to the INFO log. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Info(args ...interface{}) { - logging.print(infoLog, logging.logr, args...) + logging.print(infoLog, logging.logr, logging.filter, args...) } // InfoDepth acts as Info but uses depth to determine which call frame to log. // InfoDepth(0, "msg") is the same as Info("msg"). func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(infoLog, logging.logr, depth, args...) + logging.printDepth(infoLog, logging.logr, logging.filter, depth, args...) } // Infoln logs to the INFO log. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Infoln(args ...interface{}) { - logging.println(infoLog, logging.logr, args...) + logging.println(infoLog, logging.logr, logging.filter, args...) } // Infof logs to the INFO log. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Infof(format string, args ...interface{}) { - logging.printf(infoLog, logging.logr, format, args...) + logging.printf(infoLog, logging.logr, logging.filter, format, args...) } // InfoS structured logs to the INFO log. @@ -1372,55 +1411,55 @@ func Infof(format string, args ...interface{}) { // output: // >> I1025 00:15:15.525108 1 controller_utils.go:116] "Pod status updated" pod="kubedns" status="ready" func InfoS(msg string, keysAndValues ...interface{}) { - logging.infoS(logging.logr, msg, keysAndValues...) + logging.infoS(logging.logr, logging.filter, msg, keysAndValues...) } // Warning logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Warning(args ...interface{}) { - logging.print(warningLog, logging.logr, args...) + logging.print(warningLog, logging.logr, logging.filter, args...) } // WarningDepth acts as Warning but uses depth to determine which call frame to log. // WarningDepth(0, "msg") is the same as Warning("msg"). func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(warningLog, logging.logr, depth, args...) + logging.printDepth(warningLog, logging.logr, logging.filter, depth, args...) } // Warningln logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Warningln(args ...interface{}) { - logging.println(warningLog, logging.logr, args...) + logging.println(warningLog, logging.logr, logging.filter, args...) } // Warningf logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Warningf(format string, args ...interface{}) { - logging.printf(warningLog, logging.logr, format, args...) + logging.printf(warningLog, logging.logr, logging.filter, format, args...) } // Error logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Error(args ...interface{}) { - logging.print(errorLog, logging.logr, args...) + logging.print(errorLog, logging.logr, logging.filter, args...) } // ErrorDepth acts as Error but uses depth to determine which call frame to log. // ErrorDepth(0, "msg") is the same as Error("msg"). func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(errorLog, logging.logr, depth, args...) + logging.printDepth(errorLog, logging.logr, logging.filter, depth, args...) } // Errorln logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Errorln(args ...interface{}) { - logging.println(errorLog, logging.logr, args...) + logging.println(errorLog, logging.logr, logging.filter, args...) } // Errorf logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Errorf(format string, args ...interface{}) { - logging.printf(errorLog, logging.logr, format, args...) + logging.printf(errorLog, logging.logr, logging.filter, format, args...) } // ErrorS structured logs to the ERROR, WARNING, and INFO logs. @@ -1433,34 +1472,34 @@ func Errorf(format string, args ...interface{}) { // output: // >> E1025 00:15:15.525108 1 controller_utils.go:114] "Failed to update pod status" err="timeout" func ErrorS(err error, msg string, keysAndValues ...interface{}) { - logging.errorS(err, logging.logr, msg, keysAndValues...) + logging.errorS(err, logging.logr, logging.filter, msg, keysAndValues...) } // Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls os.Exit(255). // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Fatal(args ...interface{}) { - logging.print(fatalLog, logging.logr, args...) + logging.print(fatalLog, logging.logr, logging.filter, args...) } // FatalDepth acts as Fatal but uses depth to determine which call frame to log. // FatalDepth(0, "msg") is the same as Fatal("msg"). func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(fatalLog, logging.logr, depth, args...) + logging.printDepth(fatalLog, logging.logr, logging.filter, depth, args...) } // Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls os.Exit(255). // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Fatalln(args ...interface{}) { - logging.println(fatalLog, logging.logr, args...) + logging.println(fatalLog, logging.logr, logging.filter, args...) } // Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls os.Exit(255). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Fatalf(format string, args ...interface{}) { - logging.printf(fatalLog, logging.logr, format, args...) + logging.printf(fatalLog, logging.logr, logging.filter, format, args...) } // fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. @@ -1471,27 +1510,42 @@ var fatalNoStacks uint32 // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Exit(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(fatalLog, logging.logr, args...) + logging.print(fatalLog, logging.logr, logging.filter, args...) } // ExitDepth acts as Exit but uses depth to determine which call frame to log. // ExitDepth(0, "msg") is the same as Exit("msg"). func ExitDepth(depth int, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(fatalLog, logging.logr, depth, args...) + logging.printDepth(fatalLog, logging.logr, logging.filter, depth, args...) } // Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). func Exitln(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(fatalLog, logging.logr, args...) + logging.println(fatalLog, logging.logr, logging.filter, args...) } // Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Exitf(format string, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(fatalLog, logging.logr, format, args...) + logging.printf(fatalLog, logging.logr, logging.filter, format, args...) +} + +// LogFilter is a collection of functions that can filter all logging calls, +// e.g. for sanitization of arguments and prevent accidental leaking of secrets. +type LogFilter interface { + Filter(args []interface{}) []interface{} + FilterF(format string, args []interface{}) (string, []interface{}) + FilterS(msg string, keysAndValues []interface{}) (string, []interface{}) +} + +func SetLogFilter(filter LogFilter) { + logging.mu.Lock() + defer logging.mu.Unlock() + + logging.filter = filter } // ObjectRef references a kubernetes object diff --git a/vendor/k8s.io/kube-openapi/pkg/builder/doc.go b/vendor/k8s.io/kube-openapi/pkg/builder/doc.go new file mode 100644 index 000000000..c3109067f --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/builder/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package builder contains code to generate OpenAPI discovery spec (which +// initial version of it also known as Swagger 2.0). +// For more details: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md +package builder diff --git a/vendor/k8s.io/kube-openapi/pkg/builder/openapi.go b/vendor/k8s.io/kube-openapi/pkg/builder/openapi.go new file mode 100644 index 000000000..d04080d57 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/builder/openapi.go @@ -0,0 +1,445 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + + restful "github.com/emicklei/go-restful" + "github.com/go-openapi/spec" + + "k8s.io/kube-openapi/pkg/common" + "k8s.io/kube-openapi/pkg/util" +) + +const ( + OpenAPIVersion = "2.0" +) + +type openAPI struct { + config *common.Config + swagger *spec.Swagger + protocolList []string + definitions map[string]common.OpenAPIDefinition +} + +// BuildOpenAPISpec builds OpenAPI spec given a list of webservices (containing routes) and common.Config to customize it. +func BuildOpenAPISpec(webServices []*restful.WebService, config *common.Config) (*spec.Swagger, error) { + o := newOpenAPI(config) + err := o.buildPaths(webServices) + if err != nil { + return nil, err + } + return o.finalizeSwagger() +} + +// BuildOpenAPIDefinitionsForResource builds a partial OpenAPI spec given a sample object and common.Config to customize it. +func BuildOpenAPIDefinitionsForResource(model interface{}, config *common.Config) (*spec.Definitions, error) { + o := newOpenAPI(config) + // We can discard the return value of toSchema because all we care about is the side effect of calling it. + // All the models created for this resource get added to o.swagger.Definitions + _, err := o.toSchema(util.GetCanonicalTypeName(model)) + if err != nil { + return nil, err + } + swagger, err := o.finalizeSwagger() + if err != nil { + return nil, err + } + return &swagger.Definitions, nil +} + +// BuildOpenAPIDefinitionsForResources returns the OpenAPI spec which includes the definitions for the +// passed type names. +func BuildOpenAPIDefinitionsForResources(config *common.Config, names ...string) (*spec.Swagger, error) { + o := newOpenAPI(config) + // We can discard the return value of toSchema because all we care about is the side effect of calling it. + // All the models created for this resource get added to o.swagger.Definitions + for _, name := range names { + _, err := o.toSchema(name) + if err != nil { + return nil, err + } + } + return o.finalizeSwagger() +} + +// newOpenAPI sets up the openAPI object so we can build the spec. +func newOpenAPI(config *common.Config) openAPI { + o := openAPI{ + config: config, + swagger: &spec.Swagger{ + SwaggerProps: spec.SwaggerProps{ + Swagger: OpenAPIVersion, + Definitions: spec.Definitions{}, + Responses: config.ResponseDefinitions, + Paths: &spec.Paths{Paths: map[string]spec.PathItem{}}, + Info: config.Info, + }, + }, + } + if o.config.GetOperationIDAndTags == nil { + o.config.GetOperationIDAndTags = func(r *restful.Route) (string, []string, error) { + return r.Operation, nil, nil + } + } + if o.config.GetDefinitionName == nil { + o.config.GetDefinitionName = func(name string) (string, spec.Extensions) { + return name[strings.LastIndex(name, "/")+1:], nil + } + } + o.definitions = o.config.GetDefinitions(func(name string) spec.Ref { + defName, _ := o.config.GetDefinitionName(name) + return spec.MustCreateRef("#/definitions/" + common.EscapeJsonPointer(defName)) + }) + if o.config.CommonResponses == nil { + o.config.CommonResponses = map[int]spec.Response{} + } + return o +} + +// finalizeSwagger is called after the spec is built and returns the final spec. +// NOTE: finalizeSwagger also make changes to the final spec, as specified in the config. +func (o *openAPI) finalizeSwagger() (*spec.Swagger, error) { + if o.config.SecurityDefinitions != nil { + o.swagger.SecurityDefinitions = *o.config.SecurityDefinitions + o.swagger.Security = o.config.DefaultSecurity + } + if o.config.PostProcessSpec != nil { + var err error + o.swagger, err = o.config.PostProcessSpec(o.swagger) + if err != nil { + return nil, err + } + } + + return o.swagger, nil +} + +func (o *openAPI) buildDefinitionRecursively(name string) error { + uniqueName, extensions := o.config.GetDefinitionName(name) + if _, ok := o.swagger.Definitions[uniqueName]; ok { + return nil + } + if item, ok := o.definitions[name]; ok { + schema := spec.Schema{ + VendorExtensible: item.Schema.VendorExtensible, + SchemaProps: item.Schema.SchemaProps, + SwaggerSchemaProps: item.Schema.SwaggerSchemaProps, + } + if extensions != nil { + if schema.Extensions == nil { + schema.Extensions = spec.Extensions{} + } + for k, v := range extensions { + schema.Extensions[k] = v + } + } + if v, ok := item.Schema.Extensions[common.ExtensionV2Schema]; ok { + if v2Schema, isOpenAPISchema := v.(spec.Schema); isOpenAPISchema { + schema = v2Schema + } + } + o.swagger.Definitions[uniqueName] = schema + for _, v := range item.Dependencies { + if err := o.buildDefinitionRecursively(v); err != nil { + return err + } + } + } else { + return fmt.Errorf("cannot find model definition for %v. If you added a new type, you may need to add +k8s:openapi-gen=true to the package or type and run code-gen again", name) + } + return nil +} + +// buildDefinitionForType build a definition for a given type and return a referable name to its definition. +// This is the main function that keep track of definitions used in this spec and is depend on code generated +// by k8s.io/kubernetes/cmd/libs/go2idl/openapi-gen. +func (o *openAPI) buildDefinitionForType(name string) (string, error) { + if err := o.buildDefinitionRecursively(name); err != nil { + return "", err + } + defName, _ := o.config.GetDefinitionName(name) + return "#/definitions/" + common.EscapeJsonPointer(defName), nil +} + +// buildPaths builds OpenAPI paths using go-restful's web services. +func (o *openAPI) buildPaths(webServices []*restful.WebService) error { + pathsToIgnore := util.NewTrie(o.config.IgnorePrefixes) + duplicateOpId := make(map[string]string) + for _, w := range webServices { + rootPath := w.RootPath() + if pathsToIgnore.HasPrefix(rootPath) { + continue + } + commonParams, err := o.buildParameters(w.PathParameters()) + if err != nil { + return err + } + for path, routes := range groupRoutesByPath(w.Routes()) { + // go-swagger has special variable definition {$NAME:*} that can only be + // used at the end of the path and it is not recognized by OpenAPI. + if strings.HasSuffix(path, ":*}") { + path = path[:len(path)-3] + "}" + } + if pathsToIgnore.HasPrefix(path) { + continue + } + // Aggregating common parameters make API spec (and generated clients) simpler + inPathCommonParamsMap, err := o.findCommonParameters(routes) + if err != nil { + return err + } + pathItem, exists := o.swagger.Paths.Paths[path] + if exists { + return fmt.Errorf("duplicate webservice route has been found for path: %v", path) + } + pathItem = spec.PathItem{ + PathItemProps: spec.PathItemProps{ + Parameters: make([]spec.Parameter, 0), + }, + } + // add web services's parameters as well as any parameters appears in all ops, as common parameters + pathItem.Parameters = append(pathItem.Parameters, commonParams...) + for _, p := range inPathCommonParamsMap { + pathItem.Parameters = append(pathItem.Parameters, p) + } + sortParameters(pathItem.Parameters) + for _, route := range routes { + op, err := o.buildOperations(route, inPathCommonParamsMap) + sortParameters(op.Parameters) + if err != nil { + return err + } + dpath, exists := duplicateOpId[op.ID] + if exists { + return fmt.Errorf("duplicate Operation ID %v for path %v and %v", op.ID, dpath, path) + } else { + duplicateOpId[op.ID] = path + } + switch strings.ToUpper(route.Method) { + case "GET": + pathItem.Get = op + case "POST": + pathItem.Post = op + case "HEAD": + pathItem.Head = op + case "PUT": + pathItem.Put = op + case "DELETE": + pathItem.Delete = op + case "OPTIONS": + pathItem.Options = op + case "PATCH": + pathItem.Patch = op + } + } + o.swagger.Paths.Paths[path] = pathItem + } + } + return nil +} + +// buildOperations builds operations for each webservice path +func (o *openAPI) buildOperations(route restful.Route, inPathCommonParamsMap map[interface{}]spec.Parameter) (ret *spec.Operation, err error) { + ret = &spec.Operation{ + OperationProps: spec.OperationProps{ + Description: route.Doc, + Consumes: route.Consumes, + Produces: route.Produces, + Schemes: o.config.ProtocolList, + Responses: &spec.Responses{ + ResponsesProps: spec.ResponsesProps{ + StatusCodeResponses: make(map[int]spec.Response), + }, + }, + }, + } + for k, v := range route.Metadata { + if strings.HasPrefix(k, common.ExtensionPrefix) { + if ret.Extensions == nil { + ret.Extensions = spec.Extensions{} + } + ret.Extensions.Add(k, v) + } + } + if ret.ID, ret.Tags, err = o.config.GetOperationIDAndTags(&route); err != nil { + return ret, err + } + + // Build responses + for _, resp := range route.ResponseErrors { + ret.Responses.StatusCodeResponses[resp.Code], err = o.buildResponse(resp.Model, resp.Message) + if err != nil { + return ret, err + } + } + // If there is no response but a write sample, assume that write sample is an http.StatusOK response. + if len(ret.Responses.StatusCodeResponses) == 0 && route.WriteSample != nil { + ret.Responses.StatusCodeResponses[http.StatusOK], err = o.buildResponse(route.WriteSample, "OK") + if err != nil { + return ret, err + } + } + for code, resp := range o.config.CommonResponses { + if _, exists := ret.Responses.StatusCodeResponses[code]; !exists { + ret.Responses.StatusCodeResponses[code] = resp + } + } + // If there is still no response, use default response provided. + if len(ret.Responses.StatusCodeResponses) == 0 { + ret.Responses.Default = o.config.DefaultResponse + } + + // Build non-common Parameters + ret.Parameters = make([]spec.Parameter, 0) + for _, param := range route.ParameterDocs { + if _, isCommon := inPathCommonParamsMap[mapKeyFromParam(param)]; !isCommon { + openAPIParam, err := o.buildParameter(param.Data(), route.ReadSample) + if err != nil { + return ret, err + } + ret.Parameters = append(ret.Parameters, openAPIParam) + } + } + return ret, nil +} + +func (o *openAPI) buildResponse(model interface{}, description string) (spec.Response, error) { + schema, err := o.toSchema(util.GetCanonicalTypeName(model)) + if err != nil { + return spec.Response{}, err + } + return spec.Response{ + ResponseProps: spec.ResponseProps{ + Description: description, + Schema: schema, + }, + }, nil +} + +func (o *openAPI) findCommonParameters(routes []restful.Route) (map[interface{}]spec.Parameter, error) { + commonParamsMap := make(map[interface{}]spec.Parameter, 0) + paramOpsCountByName := make(map[interface{}]int, 0) + paramNameKindToDataMap := make(map[interface{}]restful.ParameterData, 0) + for _, route := range routes { + routeParamDuplicateMap := make(map[interface{}]bool) + s := "" + for _, param := range route.ParameterDocs { + m, _ := json.Marshal(param.Data()) + s += string(m) + "\n" + key := mapKeyFromParam(param) + if routeParamDuplicateMap[key] { + msg, _ := json.Marshal(route.ParameterDocs) + return commonParamsMap, fmt.Errorf("duplicate parameter %v for route %v, %v", param.Data().Name, string(msg), s) + } + routeParamDuplicateMap[key] = true + paramOpsCountByName[key]++ + paramNameKindToDataMap[key] = param.Data() + } + } + for key, count := range paramOpsCountByName { + paramData := paramNameKindToDataMap[key] + if count == len(routes) && paramData.Kind != restful.BodyParameterKind { + openAPIParam, err := o.buildParameter(paramData, nil) + if err != nil { + return commonParamsMap, err + } + commonParamsMap[key] = openAPIParam + } + } + return commonParamsMap, nil +} + +func (o *openAPI) toSchema(name string) (_ *spec.Schema, err error) { + if openAPIType, openAPIFormat := common.OpenAPITypeFormat(name); openAPIType != "" { + return &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{openAPIType}, + Format: openAPIFormat, + }, + }, nil + } else { + ref, err := o.buildDefinitionForType(name) + if err != nil { + return nil, err + } + return &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: spec.MustCreateRef(ref), + }, + }, nil + } +} + +func (o *openAPI) buildParameter(restParam restful.ParameterData, bodySample interface{}) (ret spec.Parameter, err error) { + ret = spec.Parameter{ + ParamProps: spec.ParamProps{ + Name: restParam.Name, + Description: restParam.Description, + Required: restParam.Required, + }, + } + switch restParam.Kind { + case restful.BodyParameterKind: + if bodySample != nil { + ret.In = "body" + ret.Schema, err = o.toSchema(util.GetCanonicalTypeName(bodySample)) + return ret, err + } else { + // There is not enough information in the body parameter to build the definition. + // Body parameter has a data type that is a short name but we need full package name + // of the type to create a definition. + return ret, fmt.Errorf("restful body parameters are not supported: %v", restParam.DataType) + } + case restful.PathParameterKind: + ret.In = "path" + if !restParam.Required { + return ret, fmt.Errorf("path parameters should be marked at required for parameter %v", restParam) + } + case restful.QueryParameterKind: + ret.In = "query" + case restful.HeaderParameterKind: + ret.In = "header" + case restful.FormParameterKind: + ret.In = "formData" + default: + return ret, fmt.Errorf("unknown restful operation kind : %v", restParam.Kind) + } + openAPIType, openAPIFormat := common.OpenAPITypeFormat(restParam.DataType) + if openAPIType == "" { + return ret, fmt.Errorf("non-body Restful parameter type should be a simple type, but got : %v", restParam.DataType) + } + ret.Type = openAPIType + ret.Format = openAPIFormat + ret.UniqueItems = !restParam.AllowMultiple + return ret, nil +} + +func (o *openAPI) buildParameters(restParam []*restful.Parameter) (ret []spec.Parameter, err error) { + ret = make([]spec.Parameter, len(restParam)) + for i, v := range restParam { + ret[i], err = o.buildParameter(v.Data(), nil) + if err != nil { + return ret, err + } + } + return ret, nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/builder/util.go b/vendor/k8s.io/kube-openapi/pkg/builder/util.go new file mode 100644 index 000000000..5e9a56a6b --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/builder/util.go @@ -0,0 +1,61 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "sort" + + "github.com/emicklei/go-restful" + "github.com/go-openapi/spec" +) + +type parameters []spec.Parameter + +func (s parameters) Len() int { return len(s) } +func (s parameters) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// byNameIn used in sorting parameters by Name and In fields. +type byNameIn struct { + parameters +} + +func (s byNameIn) Less(i, j int) bool { + return s.parameters[i].Name < s.parameters[j].Name || (s.parameters[i].Name == s.parameters[j].Name && s.parameters[i].In < s.parameters[j].In) +} + +// SortParameters sorts parameters by Name and In fields. +func sortParameters(p []spec.Parameter) { + sort.Sort(byNameIn{p}) +} + +func groupRoutesByPath(routes []restful.Route) map[string][]restful.Route { + pathToRoutes := make(map[string][]restful.Route) + for _, r := range routes { + pathToRoutes[r.Path] = append(pathToRoutes[r.Path], r) + } + return pathToRoutes +} + +func mapKeyFromParam(param *restful.Parameter) interface{} { + return struct { + Name string + Kind int + }{ + Name: param.Data().Name, + Kind: param.Data().Kind, + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/common/common.go b/vendor/k8s.io/kube-openapi/pkg/common/common.go new file mode 100644 index 000000000..40be34786 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/common/common.go @@ -0,0 +1,208 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "net/http" + "strings" + + "github.com/emicklei/go-restful" + "github.com/go-openapi/spec" +) + +const ( + // TODO: Make this configurable. + ExtensionPrefix = "x-kubernetes-" + ExtensionV2Schema = ExtensionPrefix + "v2-schema" +) + +// OpenAPIDefinition describes single type. Normally these definitions are auto-generated using gen-openapi. +type OpenAPIDefinition struct { + Schema spec.Schema + Dependencies []string +} + +type ReferenceCallback func(path string) spec.Ref + +// GetOpenAPIDefinitions is collection of all definitions. +type GetOpenAPIDefinitions func(ReferenceCallback) map[string]OpenAPIDefinition + +// OpenAPIDefinitionGetter gets openAPI definitions for a given type. If a type implements this interface, +// the definition returned by it will be used, otherwise the auto-generated definitions will be used. See +// GetOpenAPITypeFormat for more information about trade-offs of using this interface or GetOpenAPITypeFormat method when +// possible. +type OpenAPIDefinitionGetter interface { + OpenAPIDefinition() *OpenAPIDefinition +} + +type OpenAPIV3DefinitionGetter interface { + OpenAPIV3Definition() *OpenAPIDefinition +} + +type PathHandler interface { + Handle(path string, handler http.Handler) +} + +// Config is set of configuration for openAPI spec generation. +type Config struct { + // List of supported protocols such as https, http, etc. + ProtocolList []string + + // Info is general information about the API. + Info *spec.Info + + // DefaultResponse will be used if an operation does not have any responses listed. It + // will show up as ... "responses" : {"default" : $DefaultResponse} in the spec. + DefaultResponse *spec.Response + + // ResponseDefinitions will be added to "responses" under the top-level swagger object. This is an object + // that holds responses definitions that can be used across operations. This property does not define + // global responses for all operations. For more info please refer: + // https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#fixed-fields + ResponseDefinitions map[string]spec.Response + + // CommonResponses will be added as a response to all operation specs. This is a good place to add common + // responses such as authorization failed. + CommonResponses map[int]spec.Response + + // List of webservice's path prefixes to ignore + IgnorePrefixes []string + + // OpenAPIDefinitions should provide definition for all models used by routes. Failure to provide this map + // or any of the models will result in spec generation failure. + GetDefinitions GetOpenAPIDefinitions + + // GetOperationIDAndTags returns operation id and tags for a restful route. It is an optional function to customize operation IDs. + GetOperationIDAndTags func(r *restful.Route) (string, []string, error) + + // GetDefinitionName returns a friendly name for a definition base on the serving path. parameter `name` is the full name of the definition. + // It is an optional function to customize model names. + GetDefinitionName func(name string) (string, spec.Extensions) + + // PostProcessSpec runs after the spec is ready to serve. It allows a final modification to the spec before serving. + PostProcessSpec func(*spec.Swagger) (*spec.Swagger, error) + + // SecurityDefinitions is list of all security definitions for OpenAPI service. If this is not nil, the user of config + // is responsible to provide DefaultSecurity and (maybe) add unauthorized response to CommonResponses. + SecurityDefinitions *spec.SecurityDefinitions + + // DefaultSecurity for all operations. This will pass as spec.SwaggerProps.Security to OpenAPI. + // For most cases, this will be list of acceptable definitions in SecurityDefinitions. + DefaultSecurity []map[string][]string +} + +type typeInfo struct { + name string + format string + zero interface{} +} + +var schemaTypeFormatMap = map[string]typeInfo{ + "uint": {"integer", "int32", 0.}, + "uint8": {"integer", "byte", 0.}, + "uint16": {"integer", "int32", 0.}, + "uint32": {"integer", "int64", 0.}, + "uint64": {"integer", "int64", 0.}, + "int": {"integer", "int32", 0.}, + "int8": {"integer", "byte", 0.}, + "int16": {"integer", "int32", 0.}, + "int32": {"integer", "int32", 0.}, + "int64": {"integer", "int64", 0.}, + "byte": {"integer", "byte", 0}, + "float64": {"number", "double", 0.}, + "float32": {"number", "float", 0.}, + "bool": {"boolean", "", false}, + "time.Time": {"string", "date-time", ""}, + "string": {"string", "", ""}, + "integer": {"integer", "", 0.}, + "number": {"number", "", 0.}, + "boolean": {"boolean", "", false}, + "[]byte": {"string", "byte", ""}, // base64 encoded characters + "interface{}": {"object", "", interface{}(nil)}, +} + +// This function is a reference for converting go (or any custom type) to a simple open API type,format pair. There are +// two ways to customize spec for a type. If you add it here, a type will be converted to a simple type and the type +// comment (the comment that is added before type definition) will be lost. The spec will still have the property +// comment. The second way is to implement OpenAPIDefinitionGetter interface. That function can customize the spec (so +// the spec does not need to be simple type,format) or can even return a simple type,format (e.g. IntOrString). For simple +// type formats, the benefit of adding OpenAPIDefinitionGetter interface is to keep both type and property documentation. +// Example: +// type Sample struct { +// ... +// // port of the server +// port IntOrString +// ... +// } +// // IntOrString documentation... +// type IntOrString { ... } +// +// Adding IntOrString to this function: +// "port" : { +// format: "string", +// type: "int-or-string", +// Description: "port of the server" +// } +// +// Implement OpenAPIDefinitionGetter for IntOrString: +// +// "port" : { +// $Ref: "#/definitions/IntOrString" +// Description: "port of the server" +// } +// ... +// definitions: +// { +// "IntOrString": { +// format: "string", +// type: "int-or-string", +// Description: "IntOrString documentation..." // new +// } +// } +// +func OpenAPITypeFormat(typeName string) (string, string) { + mapped, ok := schemaTypeFormatMap[typeName] + if !ok { + return "", "" + } + return mapped.name, mapped.format +} + +// Returns the zero-value for the given type along with true if the type +// could be found. +func OpenAPIZeroValue(typeName string) (interface{}, bool) { + mapped, ok := schemaTypeFormatMap[typeName] + if !ok { + return nil, false + } + return mapped.zero, true +} + +func EscapeJsonPointer(p string) string { + // Escaping reference name using rfc6901 + p = strings.Replace(p, "~", "~0", -1) + p = strings.Replace(p, "/", "~1", -1) + return p +} + +func EmbedOpenAPIDefinitionIntoV2Extension(main OpenAPIDefinition, embedded OpenAPIDefinition) OpenAPIDefinition { + if main.Schema.Extensions == nil { + main.Schema.Extensions = make(map[string]interface{}) + } + main.Schema.Extensions[ExtensionV2Schema] = embedded.Schema + return main +} diff --git a/vendor/k8s.io/kube-openapi/pkg/common/doc.go b/vendor/k8s.io/kube-openapi/pkg/common/doc.go new file mode 100644 index 000000000..2ba6d247b --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/common/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package common holds shared code and types between open API code +// generator and spec generator. +package common diff --git a/vendor/k8s.io/kube-openapi/pkg/handler/default_pruning.go b/vendor/k8s.io/kube-openapi/pkg/handler/default_pruning.go new file mode 100644 index 000000000..69646871c --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/handler/default_pruning.go @@ -0,0 +1,208 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import "github.com/go-openapi/spec" + +// PruneDefaults remove all the defaults recursively from all the +// schemas in the definitions, and does not modify the definitions in +// place. +func PruneDefaults(definitions spec.Definitions) spec.Definitions { + definitionsCloned := false + for k, v := range definitions { + if s := PruneDefaultsSchema(&v); s != &v { + if !definitionsCloned { + definitionsCloned = true + orig := definitions + definitions = make(spec.Definitions, len(orig)) + for k2, v2 := range orig { + definitions[k2] = v2 + } + } + definitions[k] = *s + } + } + return definitions +} + +// PruneDefaultsSchema remove all the defaults recursively from the +// schema in place. +func PruneDefaultsSchema(schema *spec.Schema) *spec.Schema { + if schema == nil { + return nil + } + + orig := schema + clone := func() { + if orig == schema { + schema = &spec.Schema{} + *schema = *orig + } + } + + if schema.Default != nil { + clone() + schema.Default = nil + } + + definitionsCloned := false + for k, v := range schema.Definitions { + if s := PruneDefaultsSchema(&v); s != &v { + if !definitionsCloned { + definitionsCloned = true + clone() + schema.Definitions = make(spec.Definitions, len(orig.Definitions)) + for k2, v2 := range orig.Definitions { + schema.Definitions[k2] = v2 + } + } + schema.Definitions[k] = *s + } + } + + propertiesCloned := false + for k, v := range schema.Properties { + if s := PruneDefaultsSchema(&v); s != &v { + if !propertiesCloned { + propertiesCloned = true + clone() + schema.Properties = make(map[string]spec.Schema, len(orig.Properties)) + for k2, v2 := range orig.Properties { + schema.Properties[k2] = v2 + } + } + schema.Properties[k] = *s + } + } + + patternPropertiesCloned := false + for k, v := range schema.PatternProperties { + if s := PruneDefaultsSchema(&v); s != &v { + if !patternPropertiesCloned { + patternPropertiesCloned = true + clone() + schema.PatternProperties = make(map[string]spec.Schema, len(orig.PatternProperties)) + for k2, v2 := range orig.PatternProperties { + schema.PatternProperties[k2] = v2 + } + } + schema.PatternProperties[k] = *s + } + } + + dependenciesCloned := false + for k, v := range schema.Dependencies { + if s := PruneDefaultsSchema(v.Schema); s != v.Schema { + if !dependenciesCloned { + dependenciesCloned = true + clone() + schema.Dependencies = make(spec.Dependencies, len(orig.Dependencies)) + for k2, v2 := range orig.Dependencies { + schema.Dependencies[k2] = v2 + } + } + v.Schema = s + schema.Dependencies[k] = v + } + } + + allOfCloned := false + for i := range schema.AllOf { + if s := PruneDefaultsSchema(&schema.AllOf[i]); s != &schema.AllOf[i] { + if !allOfCloned { + allOfCloned = true + clone() + schema.AllOf = make([]spec.Schema, len(orig.AllOf)) + copy(schema.AllOf, orig.AllOf) + } + schema.AllOf[i] = *s + } + } + + anyOfCloned := false + for i := range schema.AnyOf { + if s := PruneDefaultsSchema(&schema.AnyOf[i]); s != &schema.AnyOf[i] { + if !anyOfCloned { + anyOfCloned = true + clone() + schema.AnyOf = make([]spec.Schema, len(orig.AnyOf)) + copy(schema.AnyOf, orig.AnyOf) + } + schema.AnyOf[i] = *s + } + } + + oneOfCloned := false + for i := range schema.OneOf { + if s := PruneDefaultsSchema(&schema.OneOf[i]); s != &schema.OneOf[i] { + if !oneOfCloned { + oneOfCloned = true + clone() + schema.OneOf = make([]spec.Schema, len(orig.OneOf)) + copy(schema.OneOf, orig.OneOf) + } + schema.OneOf[i] = *s + } + } + + if schema.Not != nil { + if s := PruneDefaultsSchema(schema.Not); s != schema.Not { + clone() + schema.Not = s + } + } + + if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { + if s := PruneDefaultsSchema(schema.AdditionalProperties.Schema); s != schema.AdditionalProperties.Schema { + clone() + schema.AdditionalProperties = &spec.SchemaOrBool{Schema: s, Allows: schema.AdditionalProperties.Allows} + } + } + + if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { + if s := PruneDefaultsSchema(schema.AdditionalItems.Schema); s != schema.AdditionalItems.Schema { + clone() + schema.AdditionalItems = &spec.SchemaOrBool{Schema: s, Allows: schema.AdditionalItems.Allows} + } + } + + if schema.Items != nil { + if schema.Items.Schema != nil { + if s := PruneDefaultsSchema(schema.Items.Schema); s != schema.Items.Schema { + clone() + schema.Items = &spec.SchemaOrArray{Schema: s} + } + } else { + itemsCloned := false + for i := range schema.Items.Schemas { + if s := PruneDefaultsSchema(&schema.Items.Schemas[i]); s != &schema.Items.Schemas[i] { + if !itemsCloned { + clone() + schema.Items = &spec.SchemaOrArray{ + Schemas: make([]spec.Schema, len(orig.Items.Schemas)), + } + itemsCloned = true + copy(schema.Items.Schemas, orig.Items.Schemas) + } + schema.Items.Schemas[i] = *s + } + } + } + } + + return schema +} diff --git a/vendor/k8s.io/kube-openapi/pkg/handler/handler.go b/vendor/k8s.io/kube-openapi/pkg/handler/handler.go new file mode 100644 index 000000000..7cd1bbd0f --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/handler/handler.go @@ -0,0 +1,267 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "bytes" + "compress/gzip" + "crypto/sha512" + "fmt" + "mime" + "net/http" + "sync" + "time" + + "github.com/NYTimes/gziphandler" + "github.com/emicklei/go-restful" + "github.com/go-openapi/spec" + "github.com/golang/protobuf/proto" + "github.com/googleapis/gnostic/compiler" + openapi_v2 "github.com/googleapis/gnostic/openapiv2" + jsoniter "github.com/json-iterator/go" + "github.com/munnerz/goautoneg" + "gopkg.in/yaml.v2" + + "k8s.io/kube-openapi/pkg/builder" + "k8s.io/kube-openapi/pkg/common" +) + +const ( + jsonExt = ".json" + + mimeJson = "application/json" + // TODO(mehdy): change @68f4ded to a version tag when gnostic add version tags. + mimePb = "application/com.github.googleapis.gnostic.OpenAPIv2@68f4ded+protobuf" + mimePbGz = "application/x-gzip" +) + +// OpenAPIService is the service responsible for serving OpenAPI spec. It has +// the ability to safely change the spec while serving it. +type OpenAPIService struct { + // rwMutex protects All members of this service. + rwMutex sync.RWMutex + + lastModified time.Time + + specBytes []byte + specPb []byte + specPbGz []byte + + specBytesETag string + specPbETag string + specPbGzETag string +} + +func init() { + mime.AddExtensionType(".json", mimeJson) + mime.AddExtensionType(".pb-v1", mimePb) + mime.AddExtensionType(".gz", mimePbGz) +} + +func computeETag(data []byte) string { + return fmt.Sprintf("\"%X\"", sha512.Sum512(data)) +} + +// NewOpenAPIService builds an OpenAPIService starting with the given spec. +func NewOpenAPIService(spec *spec.Swagger) (*OpenAPIService, error) { + o := &OpenAPIService{} + if err := o.UpdateSpec(spec); err != nil { + return nil, err + } + return o, nil +} + +func (o *OpenAPIService) getSwaggerBytes() ([]byte, string, time.Time) { + o.rwMutex.RLock() + defer o.rwMutex.RUnlock() + return o.specBytes, o.specBytesETag, o.lastModified +} + +func (o *OpenAPIService) getSwaggerPbBytes() ([]byte, string, time.Time) { + o.rwMutex.RLock() + defer o.rwMutex.RUnlock() + return o.specPb, o.specPbETag, o.lastModified +} + +func (o *OpenAPIService) getSwaggerPbGzBytes() ([]byte, string, time.Time) { + o.rwMutex.RLock() + defer o.rwMutex.RUnlock() + return o.specPbGz, o.specPbGzETag, o.lastModified +} + +func (o *OpenAPIService) UpdateSpec(openapiSpec *spec.Swagger) (err error) { + specBytes, err := jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(openapiSpec) + if err != nil { + return err + } + var json map[string]interface{} + if err := jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal(specBytes, &json); err != nil { + return err + } + specPb, err := ToProtoBinary(json) + if err != nil { + return err + } + specPbGz := toGzip(specPb) + + specBytesETag := computeETag(specBytes) + specPbETag := computeETag(specPb) + specPbGzETag := computeETag(specPbGz) + + lastModified := time.Now() + + o.rwMutex.Lock() + defer o.rwMutex.Unlock() + + o.specBytes = specBytes + o.specPb = specPb + o.specPbGz = specPbGz + o.specBytesETag = specBytesETag + o.specPbETag = specPbETag + o.specPbGzETag = specPbGzETag + o.lastModified = lastModified + + return nil +} + +func jsonToYAML(j map[string]interface{}) yaml.MapSlice { + if j == nil { + return nil + } + ret := make(yaml.MapSlice, 0, len(j)) + for k, v := range j { + ret = append(ret, yaml.MapItem{k, jsonToYAMLValue(v)}) + } + return ret +} + +func jsonToYAMLValue(j interface{}) interface{} { + switch j := j.(type) { + case map[string]interface{}: + return jsonToYAML(j) + case []interface{}: + ret := make([]interface{}, len(j)) + for i := range j { + ret[i] = jsonToYAMLValue(j[i]) + } + return ret + case float64: + // replicate the logic in https://github.com/go-yaml/yaml/blob/51d6538a90f86fe93ac480b35f37b2be17fef232/resolve.go#L151 + if i64 := int64(j); j == float64(i64) { + if i := int(i64); i64 == int64(i) { + return i + } + return i64 + } + if ui64 := uint64(j); j == float64(ui64) { + return ui64 + } + return j + case int64: + if i := int(j); j == int64(i) { + return i + } + return j + } + return j +} + +func ToProtoBinary(json map[string]interface{}) ([]byte, error) { + document, err := openapi_v2.NewDocument(jsonToYAML(json), compiler.NewContext("$root", nil)) + if err != nil { + return nil, err + } + return proto.Marshal(document) +} + +func toGzip(data []byte) []byte { + var buf bytes.Buffer + zw := gzip.NewWriter(&buf) + zw.Write(data) + zw.Close() + return buf.Bytes() +} + +// RegisterOpenAPIVersionedService registers a handler to provide access to provided swagger spec. +// +// Deprecated: use OpenAPIService.RegisterOpenAPIVersionedService instead. +func RegisterOpenAPIVersionedService(spec *spec.Swagger, servePath string, handler common.PathHandler) (*OpenAPIService, error) { + o, err := NewOpenAPIService(spec) + if err != nil { + return nil, err + } + return o, o.RegisterOpenAPIVersionedService(servePath, handler) +} + +// RegisterOpenAPIVersionedService registers a handler to provide access to provided swagger spec. +func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handler common.PathHandler) error { + accepted := []struct { + Type string + SubType string + GetDataAndETag func() ([]byte, string, time.Time) + }{ + {"application", "json", o.getSwaggerBytes}, + {"application", "com.github.proto-openapi.spec.v2@v1.0+protobuf", o.getSwaggerPbBytes}, + } + + handler.Handle(servePath, gziphandler.GzipHandler(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + decipherableFormats := r.Header.Get("Accept") + if decipherableFormats == "" { + decipherableFormats = "*/*" + } + clauses := goautoneg.ParseAccept(decipherableFormats) + w.Header().Add("Vary", "Accept") + for _, clause := range clauses { + for _, accepts := range accepted { + if clause.Type != accepts.Type && clause.Type != "*" { + continue + } + if clause.SubType != accepts.SubType && clause.SubType != "*" { + continue + } + + // serve the first matching media type in the sorted clause list + data, etag, lastModified := accepts.GetDataAndETag() + w.Header().Set("Etag", etag) + // ServeContent will take care of caching using eTag. + http.ServeContent(w, r, servePath, lastModified, bytes.NewReader(data)) + return + } + } + // Return 406 for not acceptable format + w.WriteHeader(406) + return + }), + )) + + return nil +} + +// BuildAndRegisterOpenAPIVersionedService builds the spec and registers a handler to provide access to it. +// Use this method if your OpenAPI spec is static. If you want to update the spec, use BuildOpenAPISpec then RegisterOpenAPIVersionedService. +func BuildAndRegisterOpenAPIVersionedService(servePath string, webServices []*restful.WebService, config *common.Config, handler common.PathHandler) (*OpenAPIService, error) { + spec, err := builder.BuildOpenAPISpec(webServices, config) + if err != nil { + return nil, err + } + o, err := NewOpenAPIService(spec) + if err != nil { + return nil, err + } + return o, o.RegisterOpenAPIVersionedService(servePath, handler) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go new file mode 100644 index 000000000..adc76f6c0 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go @@ -0,0 +1,452 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schemaconv + +import ( + "errors" + "fmt" + "path" + "sort" + "strings" + + "k8s.io/kube-openapi/pkg/util/proto" + "sigs.k8s.io/structured-merge-diff/v4/schema" +) + +// ToSchema converts openapi definitions into a schema suitable for structured +// merge (i.e. kubectl apply v2). +func ToSchema(models proto.Models) (*schema.Schema, error) { + return ToSchemaWithPreserveUnknownFields(models, false) +} + +// ToSchemaWithPreserveUnknownFields converts openapi definitions into a schema suitable for structured +// merge (i.e. kubectl apply v2), it will preserve unknown fields if specified. +func ToSchemaWithPreserveUnknownFields(models proto.Models, preserveUnknownFields bool) (*schema.Schema, error) { + c := convert{ + input: models, + preserveUnknownFields: preserveUnknownFields, + output: &schema.Schema{}, + } + if err := c.convertAll(); err != nil { + return nil, err + } + c.addCommonTypes() + return c.output, nil +} + +type convert struct { + input proto.Models + preserveUnknownFields bool + output *schema.Schema + + currentName string + current *schema.Atom + errorMessages []string +} + +func (c *convert) push(name string, a *schema.Atom) *convert { + return &convert{ + input: c.input, + preserveUnknownFields: c.preserveUnknownFields, + output: c.output, + currentName: name, + current: a, + } +} + +func (c *convert) top() *schema.Atom { return c.current } + +func (c *convert) pop(c2 *convert) { + c.errorMessages = append(c.errorMessages, c2.errorMessages...) +} + +func (c *convert) convertAll() error { + for _, name := range c.input.ListModels() { + model := c.input.LookupModel(name) + c.insertTypeDef(name, model) + } + if len(c.errorMessages) > 0 { + return errors.New(strings.Join(c.errorMessages, "\n")) + } + return nil +} + +func (c *convert) reportError(format string, args ...interface{}) { + c.errorMessages = append(c.errorMessages, + c.currentName+": "+fmt.Sprintf(format, args...), + ) +} + +func (c *convert) insertTypeDef(name string, model proto.Schema) { + def := schema.TypeDef{ + Name: name, + } + c2 := c.push(name, &def.Atom) + model.Accept(c2) + c.pop(c2) + if def.Atom == (schema.Atom{}) { + // This could happen if there were a top-level reference. + return + } + c.output.Types = append(c.output.Types, def) +} + +func (c *convert) addCommonTypes() { + c.output.Types = append(c.output.Types, untypedDef) + c.output.Types = append(c.output.Types, deducedDef) +} + +var untypedName string = "__untyped_atomic_" + +var untypedDef schema.TypeDef = schema.TypeDef{ + Name: untypedName, + Atom: schema.Atom{ + Scalar: ptr(schema.Scalar("untyped")), + List: &schema.List{ + ElementType: schema.TypeRef{ + NamedType: &untypedName, + }, + ElementRelationship: schema.Atomic, + }, + Map: &schema.Map{ + ElementType: schema.TypeRef{ + NamedType: &untypedName, + }, + ElementRelationship: schema.Atomic, + }, + }, +} + +var deducedName string = "__untyped_deduced_" + +var deducedDef schema.TypeDef = schema.TypeDef{ + Name: deducedName, + Atom: schema.Atom{ + Scalar: ptr(schema.Scalar("untyped")), + List: &schema.List{ + ElementType: schema.TypeRef{ + NamedType: &untypedName, + }, + ElementRelationship: schema.Atomic, + }, + Map: &schema.Map{ + ElementType: schema.TypeRef{ + NamedType: &deducedName, + }, + ElementRelationship: schema.Separable, + }, + }, +} + +func (c *convert) makeRef(model proto.Schema, preserveUnknownFields bool) schema.TypeRef { + var tr schema.TypeRef + if r, ok := model.(*proto.Ref); ok { + if r.Reference() == "io.k8s.apimachinery.pkg.runtime.RawExtension" { + return schema.TypeRef{ + NamedType: &untypedName, + } + } + // reference a named type + _, n := path.Split(r.Reference()) + tr.NamedType = &n + } else { + // compute the type inline + c2 := c.push("inlined in "+c.currentName, &tr.Inlined) + c2.preserveUnknownFields = preserveUnknownFields + model.Accept(c2) + c.pop(c2) + + if tr == (schema.TypeRef{}) { + // emit warning? + tr.NamedType = &untypedName + } + } + return tr +} + +func makeUnions(extensions map[string]interface{}) ([]schema.Union, error) { + schemaUnions := []schema.Union{} + if iunions, ok := extensions["x-kubernetes-unions"]; ok { + unions, ok := iunions.([]interface{}) + if !ok { + return nil, fmt.Errorf(`"x-kubernetes-unions" should be a list, got %#v`, unions) + } + for _, iunion := range unions { + union, ok := iunion.(map[interface{}]interface{}) + if !ok { + return nil, fmt.Errorf(`"x-kubernetes-unions" items should be a map of string to unions, got %#v`, iunion) + } + unionMap := map[string]interface{}{} + for k, v := range union { + key, ok := k.(string) + if !ok { + return nil, fmt.Errorf(`"x-kubernetes-unions" has non-string key: %#v`, k) + } + unionMap[key] = v + } + schemaUnion, err := makeUnion(unionMap) + if err != nil { + return nil, err + } + schemaUnions = append(schemaUnions, schemaUnion) + } + } + + // Make sure we have no overlap between unions + fs := map[string]struct{}{} + for _, u := range schemaUnions { + if u.Discriminator != nil { + if _, ok := fs[*u.Discriminator]; ok { + return nil, fmt.Errorf("%v field appears multiple times in unions", *u.Discriminator) + } + fs[*u.Discriminator] = struct{}{} + } + for _, f := range u.Fields { + if _, ok := fs[f.FieldName]; ok { + return nil, fmt.Errorf("%v field appears multiple times in unions", f.FieldName) + } + fs[f.FieldName] = struct{}{} + } + } + + return schemaUnions, nil +} + +func makeUnion(extensions map[string]interface{}) (schema.Union, error) { + union := schema.Union{ + Fields: []schema.UnionField{}, + } + + if idiscriminator, ok := extensions["discriminator"]; ok { + discriminator, ok := idiscriminator.(string) + if !ok { + return schema.Union{}, fmt.Errorf(`"discriminator" must be a string, got: %#v`, idiscriminator) + } + union.Discriminator = &discriminator + } + + if ifields, ok := extensions["fields-to-discriminateBy"]; ok { + fields, ok := ifields.(map[interface{}]interface{}) + if !ok { + return schema.Union{}, fmt.Errorf(`"fields-to-discriminateBy" must be a map[string]string, got: %#v`, ifields) + } + // Needs sorted keys by field. + keys := []string{} + for ifield := range fields { + field, ok := ifield.(string) + if !ok { + return schema.Union{}, fmt.Errorf(`"fields-to-discriminateBy": field must be a string, got: %#v`, ifield) + } + keys = append(keys, field) + + } + sort.Strings(keys) + reverseMap := map[string]struct{}{} + for _, field := range keys { + value := fields[field] + discriminated, ok := value.(string) + if !ok { + return schema.Union{}, fmt.Errorf(`"fields-to-discriminateBy"/%v: value must be a string, got: %#v`, field, value) + } + union.Fields = append(union.Fields, schema.UnionField{ + FieldName: field, + DiscriminatorValue: discriminated, + }) + + // Check that we don't have the same discriminateBy multiple times. + if _, ok := reverseMap[discriminated]; ok { + return schema.Union{}, fmt.Errorf("Multiple fields have the same discriminated name: %v", discriminated) + } + reverseMap[discriminated] = struct{}{} + } + } + + if union.Discriminator != nil && len(union.Fields) == 0 { + return schema.Union{}, fmt.Errorf("discriminator set to %v, but no fields in union", *union.Discriminator) + } + return union, nil +} + +func (c *convert) VisitKind(k *proto.Kind) { + preserveUnknownFields := c.preserveUnknownFields + if p, ok := k.GetExtensions()["x-kubernetes-preserve-unknown-fields"]; ok && p == true { + preserveUnknownFields = true + } + + a := c.top() + a.Map = &schema.Map{} + for _, name := range k.FieldOrder { + member := k.Fields[name] + tr := c.makeRef(member, preserveUnknownFields) + a.Map.Fields = append(a.Map.Fields, schema.StructField{ + Name: name, + Type: tr, + Default: member.GetDefault(), + }) + } + + unions, err := makeUnions(k.GetExtensions()) + if err != nil { + c.reportError(err.Error()) + return + } + // TODO: We should check that the fields and discriminator + // specified in the union are actual fields in the struct. + a.Map.Unions = unions + + if preserveUnknownFields { + a.Map.ElementType = schema.TypeRef{ + NamedType: &deducedName, + } + } + + ext := k.GetExtensions() + if val, ok := ext["x-kubernetes-map-type"]; ok { + switch val { + case "atomic": + a.Map.ElementRelationship = schema.Atomic + case "granular": + a.Map.ElementRelationship = schema.Separable + default: + c.reportError("unknown map type %v", val) + } + } +} + +func toStringSlice(o interface{}) (out []string, ok bool) { + switch t := o.(type) { + case []interface{}: + for _, v := range t { + switch vt := v.(type) { + case string: + out = append(out, vt) + } + } + return out, true + } + return nil, false +} + +func (c *convert) VisitArray(a *proto.Array) { + atom := c.top() + atom.List = &schema.List{ + ElementRelationship: schema.Atomic, + } + l := atom.List + l.ElementType = c.makeRef(a.SubType, c.preserveUnknownFields) + + ext := a.GetExtensions() + + if val, ok := ext["x-kubernetes-list-type"]; ok { + if val == "atomic" { + l.ElementRelationship = schema.Atomic + } else if val == "set" { + l.ElementRelationship = schema.Associative + } else if val == "map" { + l.ElementRelationship = schema.Associative + if keys, ok := ext["x-kubernetes-list-map-keys"]; ok { + if keyNames, ok := toStringSlice(keys); ok { + l.Keys = keyNames + } else { + c.reportError("uninterpreted map keys: %#v", keys) + } + } else { + c.reportError("missing map keys") + } + } else { + c.reportError("unknown list type %v", val) + l.ElementRelationship = schema.Atomic + } + } else if val, ok := ext["x-kubernetes-patch-strategy"]; ok { + if val == "merge" || val == "merge,retainKeys" { + l.ElementRelationship = schema.Associative + if key, ok := ext["x-kubernetes-patch-merge-key"]; ok { + if keyName, ok := key.(string); ok { + l.Keys = []string{keyName} + } else { + c.reportError("uninterpreted merge key: %#v", key) + } + } else { + // It's not an error for this to be absent, it + // means it's a set. + } + } else if val == "retainKeys" { + } else { + c.reportError("unknown patch strategy %v", val) + l.ElementRelationship = schema.Atomic + } + } +} + +func (c *convert) VisitMap(m *proto.Map) { + a := c.top() + a.Map = &schema.Map{} + a.Map.ElementType = c.makeRef(m.SubType, c.preserveUnknownFields) + + ext := m.GetExtensions() + if val, ok := ext["x-kubernetes-map-type"]; ok { + switch val { + case "atomic": + a.Map.ElementRelationship = schema.Atomic + case "granular": + a.Map.ElementRelationship = schema.Separable + default: + c.reportError("unknown map type %v", val) + } + } +} + +func ptr(s schema.Scalar) *schema.Scalar { return &s } + +func (c *convert) VisitPrimitive(p *proto.Primitive) { + a := c.top() + switch p.Type { + case proto.Integer: + a.Scalar = ptr(schema.Numeric) + case proto.Number: + a.Scalar = ptr(schema.Numeric) + case proto.String: + switch p.Format { + case "": + a.Scalar = ptr(schema.String) + case "byte": + // byte really means []byte and is encoded as a string. + a.Scalar = ptr(schema.String) + case "int-or-string": + a.Scalar = ptr(schema.Scalar("untyped")) + case "date-time": + a.Scalar = ptr(schema.Scalar("untyped")) + default: + a.Scalar = ptr(schema.Scalar("untyped")) + } + case proto.Boolean: + a.Scalar = ptr(schema.Boolean) + default: + a.Scalar = ptr(schema.Scalar("untyped")) + } +} + +func (c *convert) VisitArbitrary(a *proto.Arbitrary) { + *c.top() = untypedDef.Atom + if c.preserveUnknownFields { + *c.top() = deducedDef.Atom + } +} + +func (c *convert) VisitReference(proto.Reference) { + // Do nothing, we handle references specially +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go index 6df0df389..4abcf9b82 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -109,19 +109,39 @@ func (d *Definitions) parseReference(s *openapi_v2.Schema, path *Path) (Schema, if _, ok := d.models[reference]; !ok { return nil, newSchemaError(path, "unknown model in reference: %q", reference) } + base, err := d.parseBaseSchema(s, path) + if err != nil { + return nil, err + } return &Ref{ - BaseSchema: d.parseBaseSchema(s, path), + BaseSchema: base, reference: reference, definitions: d, }, nil } -func (d *Definitions) parseBaseSchema(s *openapi_v2.Schema, path *Path) BaseSchema { +func parseDefault(def *openapi_v2.Any) (interface{}, error) { + if def == nil { + return nil, nil + } + var i interface{} + if err := yaml.Unmarshal([]byte(def.Yaml), &i); err != nil { + return nil, err + } + return i, nil +} + +func (d *Definitions) parseBaseSchema(s *openapi_v2.Schema, path *Path) (BaseSchema, error) { + def, err := parseDefault(s.GetDefault()) + if err != nil { + return BaseSchema{}, err + } return BaseSchema{ Description: s.GetDescription(), + Default: def, Extensions: VendorExtensionToMap(s.GetVendorExtension()), Path: *path, - } + }, nil } // We believe the schema is a map, verify and return a new schema @@ -132,8 +152,12 @@ func (d *Definitions) parseMap(s *openapi_v2.Schema, path *Path) (Schema, error) var sub Schema // TODO(incomplete): this misses the boolean case as AdditionalProperties is a bool+schema sum type. if s.GetAdditionalProperties().GetSchema() == nil { + base, err := d.parseBaseSchema(s, path) + if err != nil { + return nil, err + } sub = &Arbitrary{ - BaseSchema: d.parseBaseSchema(s, path), + BaseSchema: base, } } else { var err error @@ -142,8 +166,12 @@ func (d *Definitions) parseMap(s *openapi_v2.Schema, path *Path) (Schema, error) return nil, err } } + base, err := d.parseBaseSchema(s, path) + if err != nil { + return nil, err + } return &Map{ - BaseSchema: d.parseBaseSchema(s, path), + BaseSchema: base, SubType: sub, }, nil } @@ -165,8 +193,12 @@ func (d *Definitions) parsePrimitive(s *openapi_v2.Schema, path *Path) (Schema, default: return nil, newSchemaError(path, "Unknown primitive type: %q", t) } + base, err := d.parseBaseSchema(s, path) + if err != nil { + return nil, err + } return &Primitive{ - BaseSchema: d.parseBaseSchema(s, path), + BaseSchema: base, Type: t, Format: s.GetFormat(), }, nil @@ -188,8 +220,12 @@ func (d *Definitions) parseArray(s *openapi_v2.Schema, path *Path) (Schema, erro if err != nil { return nil, err } + base, err := d.parseBaseSchema(s, path) + if err != nil { + return nil, err + } return &Array{ - BaseSchema: d.parseBaseSchema(s, path), + BaseSchema: base, SubType: sub, }, nil } @@ -216,8 +252,12 @@ func (d *Definitions) parseKind(s *openapi_v2.Schema, path *Path) (Schema, error fieldOrder = append(fieldOrder, name) } + base, err := d.parseBaseSchema(s, path) + if err != nil { + return nil, err + } return &Kind{ - BaseSchema: d.parseBaseSchema(s, path), + BaseSchema: base, RequiredFields: s.GetRequired(), Fields: fields, FieldOrder: fieldOrder, @@ -225,8 +265,12 @@ func (d *Definitions) parseKind(s *openapi_v2.Schema, path *Path) (Schema, error } func (d *Definitions) parseArbitrary(s *openapi_v2.Schema, path *Path) (Schema, error) { + base, err := d.parseBaseSchema(s, path) + if err != nil { + return nil, err + } return &Arbitrary{ - BaseSchema: d.parseBaseSchema(s, path), + BaseSchema: base, }, nil } diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go index 46643aa50..f31a2de2c 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go @@ -77,6 +77,8 @@ type Schema interface { GetPath() *Path // Describes the field. GetDescription() string + // Default for that schema. + GetDefault() interface{} // Returns type extensions. GetExtensions() map[string]interface{} } @@ -129,6 +131,7 @@ func (p *Path) FieldPath(field string) Path { type BaseSchema struct { Description string Extensions map[string]interface{} + Default interface{} Path Path } @@ -141,6 +144,10 @@ func (b *BaseSchema) GetExtensions() map[string]interface{} { return b.Extensions } +func (b *BaseSchema) GetDefault() interface{} { + return b.Default +} + func (b *BaseSchema) GetPath() *Path { return &b.Path } diff --git a/vendor/k8s.io/kube-openapi/pkg/util/trie.go b/vendor/k8s.io/kube-openapi/pkg/util/trie.go new file mode 100644 index 000000000..a9a76c179 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/trie.go @@ -0,0 +1,79 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +// A simple trie implementation with Add and HasPrefix methods only. +type Trie struct { + children map[byte]*Trie + wordTail bool + word string +} + +// NewTrie creates a Trie and add all strings in the provided list to it. +func NewTrie(list []string) Trie { + ret := Trie{ + children: make(map[byte]*Trie), + wordTail: false, + } + for _, v := range list { + ret.Add(v) + } + return ret +} + +// Add adds a word to this trie +func (t *Trie) Add(v string) { + root := t + for _, b := range []byte(v) { + child, exists := root.children[b] + if !exists { + child = &Trie{ + children: make(map[byte]*Trie), + wordTail: false, + } + root.children[b] = child + } + root = child + } + root.wordTail = true + root.word = v +} + +// HasPrefix returns true of v has any of the prefixes stored in this trie. +func (t *Trie) HasPrefix(v string) bool { + _, has := t.GetPrefix(v) + return has +} + +// GetPrefix is like HasPrefix but return the prefix in case of match or empty string otherwise. +func (t *Trie) GetPrefix(v string) (string, bool) { + root := t + if root.wordTail { + return root.word, true + } + for _, b := range []byte(v) { + child, exists := root.children[b] + if !exists { + return "", false + } + if child.wordTail { + return child.word, true + } + root = child + } + return "", false +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/util.go b/vendor/k8s.io/kube-openapi/pkg/util/util.go new file mode 100644 index 000000000..1eb674eea --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/util.go @@ -0,0 +1,105 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "reflect" + "strings" +) + +// [DEPRECATED] ToCanonicalName converts Golang package/type canonical name into REST friendly OpenAPI name. +// This method is deprecated because it has a misleading name. Please use ToRESTFriendlyName +// instead +// +// NOTE: actually the "canonical name" in this method should be named "REST friendly OpenAPI name", +// which is different from "canonical name" defined in GetCanonicalTypeName. The "canonical name" defined +// in GetCanonicalTypeName means Go type names with full package path. +// +// Examples of REST friendly OpenAPI name: +// Input: k8s.io/api/core/v1.Pod +// Output: io.k8s.api.core.v1.Pod +// +// Input: k8s.io/api/core/v1 +// Output: io.k8s.api.core.v1 +// +// Input: csi.storage.k8s.io/v1alpha1.CSINodeInfo +// Output: io.k8s.storage.csi.v1alpha1.CSINodeInfo +func ToCanonicalName(name string) string { + return ToRESTFriendlyName(name) +} + +// ToRESTFriendlyName converts Golang package/type canonical name into REST friendly OpenAPI name. +// +// Examples of REST friendly OpenAPI name: +// Input: k8s.io/api/core/v1.Pod +// Output: io.k8s.api.core.v1.Pod +// +// Input: k8s.io/api/core/v1 +// Output: io.k8s.api.core.v1 +// +// Input: csi.storage.k8s.io/v1alpha1.CSINodeInfo +// Output: io.k8s.storage.csi.v1alpha1.CSINodeInfo +func ToRESTFriendlyName(name string) string { + nameParts := strings.Split(name, "/") + // Reverse first part. e.g., io.k8s... instead of k8s.io... + if len(nameParts) > 0 && strings.Contains(nameParts[0], ".") { + parts := strings.Split(nameParts[0], ".") + for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 { + parts[i], parts[j] = parts[j], parts[i] + } + nameParts[0] = strings.Join(parts, ".") + } + return strings.Join(nameParts, ".") +} + +// OpenAPICanonicalTypeNamer is an interface for models without Go type to seed model name. +// +// OpenAPI canonical names are Go type names with full package path, for uniquely indentifying +// a model / Go type. If a Go type is vendored from another package, only the path after "/vendor/" +// should be used. For custom resource definition (CRD), the canonical name is expected to be +// group/version.kind +// +// Examples of canonical name: +// Go type: k8s.io/kubernetes/pkg/apis/core.Pod +// CRD: csi.storage.k8s.io/v1alpha1.CSINodeInfo +// +// Example for vendored Go type: +// Original full path: k8s.io/kubernetes/vendor/k8s.io/api/core/v1.Pod +// Canonical name: k8s.io/api/core/v1.Pod +type OpenAPICanonicalTypeNamer interface { + OpenAPICanonicalTypeName() string +} + +// GetCanonicalTypeName will find the canonical type name of a sample object, removing +// the "vendor" part of the path +func GetCanonicalTypeName(model interface{}) string { + if namer, ok := model.(OpenAPICanonicalTypeNamer); ok { + return namer.OpenAPICanonicalTypeName() + } + t := reflect.TypeOf(model) + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.PkgPath() == "" { + return t.Name() + } + path := t.PkgPath() + if strings.Contains(path, "/vendor/") { + path = path[strings.Index(path, "/vendor/")+len("/vendor/"):] + } + return path + "." + t.Name() +} diff --git a/vendor/k8s.io/utils/net/ipnet.go b/vendor/k8s.io/utils/net/ipnet.go new file mode 100644 index 000000000..c2e844bf5 --- /dev/null +++ b/vendor/k8s.io/utils/net/ipnet.go @@ -0,0 +1,221 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "fmt" + "net" + "strings" +) + +// IPNetSet maps string to net.IPNet. +type IPNetSet map[string]*net.IPNet + +// ParseIPNets parses string slice to IPNetSet. +func ParseIPNets(specs ...string) (IPNetSet, error) { + ipnetset := make(IPNetSet) + for _, spec := range specs { + spec = strings.TrimSpace(spec) + _, ipnet, err := net.ParseCIDR(spec) + if err != nil { + return nil, err + } + k := ipnet.String() // In case of normalization + ipnetset[k] = ipnet + } + return ipnetset, nil +} + +// Insert adds items to the set. +func (s IPNetSet) Insert(items ...*net.IPNet) { + for _, item := range items { + s[item.String()] = item + } +} + +// Delete removes all items from the set. +func (s IPNetSet) Delete(items ...*net.IPNet) { + for _, item := range items { + delete(s, item.String()) + } +} + +// Has returns true if and only if item is contained in the set. +func (s IPNetSet) Has(item *net.IPNet) bool { + _, contained := s[item.String()] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s IPNetSet) HasAll(items ...*net.IPNet) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s IPNetSet) Difference(s2 IPNetSet) IPNetSet { + result := make(IPNetSet) + for k, i := range s { + _, found := s2[k] + if found { + continue + } + result[k] = i + } + return result +} + +// StringSlice returns a []string with the String representation of each element in the set. +// Order is undefined. +func (s IPNetSet) StringSlice() []string { + a := make([]string, 0, len(s)) + for k := range s { + a = append(a, k) + } + return a +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s IPNetSet) IsSuperset(s2 IPNetSet) bool { + for k := range s2 { + _, found := s[k] + if !found { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s IPNetSet) Equal(s2 IPNetSet) bool { + return len(s) == len(s2) && s.IsSuperset(s2) +} + +// Len returns the size of the set. +func (s IPNetSet) Len() int { + return len(s) +} + +// IPSet maps string to net.IP +type IPSet map[string]net.IP + +// ParseIPSet parses string slice to IPSet +func ParseIPSet(items ...string) (IPSet, error) { + ipset := make(IPSet) + for _, item := range items { + ip := net.ParseIP(strings.TrimSpace(item)) + if ip == nil { + return nil, fmt.Errorf("error parsing IP %q", item) + } + + ipset[ip.String()] = ip + } + + return ipset, nil +} + +// Insert adds items to the set. +func (s IPSet) Insert(items ...net.IP) { + for _, item := range items { + s[item.String()] = item + } +} + +// Delete removes all items from the set. +func (s IPSet) Delete(items ...net.IP) { + for _, item := range items { + delete(s, item.String()) + } +} + +// Has returns true if and only if item is contained in the set. +func (s IPSet) Has(item net.IP) bool { + _, contained := s[item.String()] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s IPSet) HasAll(items ...net.IP) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s IPSet) Difference(s2 IPSet) IPSet { + result := make(IPSet) + for k, i := range s { + _, found := s2[k] + if found { + continue + } + result[k] = i + } + return result +} + +// StringSlice returns a []string with the String representation of each element in the set. +// Order is undefined. +func (s IPSet) StringSlice() []string { + a := make([]string, 0, len(s)) + for k := range s { + a = append(a, k) + } + return a +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s IPSet) IsSuperset(s2 IPSet) bool { + for k := range s2 { + _, found := s[k] + if !found { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s IPSet) Equal(s2 IPSet) bool { + return len(s) == len(s2) && s.IsSuperset(s2) +} + +// Len returns the size of the set. +func (s IPSet) Len() int { + return len(s) +} diff --git a/vendor/k8s.io/utils/net/net.go b/vendor/k8s.io/utils/net/net.go new file mode 100644 index 000000000..077e44727 --- /dev/null +++ b/vendor/k8s.io/utils/net/net.go @@ -0,0 +1,213 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "errors" + "fmt" + "math" + "math/big" + "net" + "strconv" +) + +// ParseCIDRs parses a list of cidrs and return error if any is invalid. +// order is maintained +func ParseCIDRs(cidrsString []string) ([]*net.IPNet, error) { + cidrs := make([]*net.IPNet, 0, len(cidrsString)) + for _, cidrString := range cidrsString { + _, cidr, err := net.ParseCIDR(cidrString) + if err != nil { + return nil, fmt.Errorf("failed to parse cidr value:%q with error:%v", cidrString, err) + } + cidrs = append(cidrs, cidr) + } + return cidrs, nil +} + +// IsDualStackIPs returns if a slice of ips is: +// - all are valid ips +// - at least one ip from each family (v4 or v6) +func IsDualStackIPs(ips []net.IP) (bool, error) { + v4Found := false + v6Found := false + for _, ip := range ips { + if ip == nil { + return false, fmt.Errorf("ip %v is invalid", ip) + } + + if v4Found && v6Found { + continue + } + + if IsIPv6(ip) { + v6Found = true + continue + } + + v4Found = true + } + + return (v4Found && v6Found), nil +} + +// IsDualStackIPStrings returns if +// - all are valid ips +// - at least one ip from each family (v4 or v6) +func IsDualStackIPStrings(ips []string) (bool, error) { + parsedIPs := make([]net.IP, 0, len(ips)) + for _, ip := range ips { + parsedIP := net.ParseIP(ip) + parsedIPs = append(parsedIPs, parsedIP) + } + return IsDualStackIPs(parsedIPs) +} + +// IsDualStackCIDRs returns if +// - all are valid cidrs +// - at least one cidr from each family (v4 or v6) +func IsDualStackCIDRs(cidrs []*net.IPNet) (bool, error) { + v4Found := false + v6Found := false + for _, cidr := range cidrs { + if cidr == nil { + return false, fmt.Errorf("cidr %v is invalid", cidr) + } + + if v4Found && v6Found { + continue + } + + if IsIPv6(cidr.IP) { + v6Found = true + continue + } + v4Found = true + } + + return v4Found && v6Found, nil +} + +// IsDualStackCIDRStrings returns if +// - all are valid cidrs +// - at least one cidr from each family (v4 or v6) +func IsDualStackCIDRStrings(cidrs []string) (bool, error) { + parsedCIDRs, err := ParseCIDRs(cidrs) + if err != nil { + return false, err + } + return IsDualStackCIDRs(parsedCIDRs) +} + +// IsIPv6 returns if netIP is IPv6. +func IsIPv6(netIP net.IP) bool { + return netIP != nil && netIP.To4() == nil +} + +// IsIPv6String returns if ip is IPv6. +func IsIPv6String(ip string) bool { + netIP := net.ParseIP(ip) + return IsIPv6(netIP) +} + +// IsIPv6CIDRString returns if cidr is IPv6. +// This assumes cidr is a valid CIDR. +func IsIPv6CIDRString(cidr string) bool { + ip, _, _ := net.ParseCIDR(cidr) + return IsIPv6(ip) +} + +// IsIPv6CIDR returns if a cidr is ipv6 +func IsIPv6CIDR(cidr *net.IPNet) bool { + ip := cidr.IP + return IsIPv6(ip) +} + +// IsIPv4 returns if netIP is IPv4. +func IsIPv4(netIP net.IP) bool { + return netIP != nil && netIP.To4() != nil +} + +// IsIPv4String returns if ip is IPv4. +func IsIPv4String(ip string) bool { + netIP := net.ParseIP(ip) + return IsIPv4(netIP) +} + +// IsIPv4CIDR returns if a cidr is ipv4 +func IsIPv4CIDR(cidr *net.IPNet) bool { + ip := cidr.IP + return IsIPv4(ip) +} + +// IsIPv4CIDRString returns if cidr is IPv4. +// This assumes cidr is a valid CIDR. +func IsIPv4CIDRString(cidr string) bool { + ip, _, _ := net.ParseCIDR(cidr) + return IsIPv4(ip) +} + +// ParsePort parses a string representing an IP port. If the string is not a +// valid port number, this returns an error. +func ParsePort(port string, allowZero bool) (int, error) { + portInt, err := strconv.ParseUint(port, 10, 16) + if err != nil { + return 0, err + } + if portInt == 0 && !allowZero { + return 0, errors.New("0 is not a valid port number") + } + return int(portInt), nil +} + +// BigForIP creates a big.Int based on the provided net.IP +func BigForIP(ip net.IP) *big.Int { + // NOTE: Convert to 16-byte representation so we can + // handle v4 and v6 values the same way. + return big.NewInt(0).SetBytes(ip.To16()) +} + +// AddIPOffset adds the provided integer offset to a base big.Int representing a net.IP +// NOTE: If you started with a v4 address and overflow it, you get a v6 result. +func AddIPOffset(base *big.Int, offset int) net.IP { + r := big.NewInt(0).Add(base, big.NewInt(int64(offset))).Bytes() + r = append(make([]byte, 16), r...) + return net.IP(r[len(r)-16:]) +} + +// RangeSize returns the size of a range in valid addresses. +// returns the size of the subnet (or math.MaxInt64 if the range size would overflow int64) +func RangeSize(subnet *net.IPNet) int64 { + ones, bits := subnet.Mask.Size() + if bits == 32 && (bits-ones) >= 31 || bits == 128 && (bits-ones) >= 127 { + return 0 + } + // this checks that we are not overflowing an int64 + if bits-ones >= 63 { + return math.MaxInt64 + } + return int64(1) << uint(bits-ones) +} + +// GetIndexedIP returns a net.IP that is subnet.IP + index in the contiguous IP space. +func GetIndexedIP(subnet *net.IPNet, index int) (net.IP, error) { + ip := AddIPOffset(BigForIP(subnet.IP), index) + if !subnet.Contains(ip) { + return nil, fmt.Errorf("can't generate IP with index %d from subnet. subnet too small. subnet: %q", index, subnet) + } + return ip, nil +} diff --git a/vendor/k8s.io/utils/net/port.go b/vendor/k8s.io/utils/net/port.go new file mode 100644 index 000000000..b4ff128e0 --- /dev/null +++ b/vendor/k8s.io/utils/net/port.go @@ -0,0 +1,137 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "fmt" + "net" + "strconv" + "strings" +) + +// IPFamily refers to a specific family if not empty, i.e. "4" or "6". +type IPFamily string + +// Constants for valid IPFamilys: +const ( + IPv4 IPFamily = "4" + IPv6 = "6" +) + +// Protocol is a network protocol support by LocalPort. +type Protocol string + +// Constants for valid protocols: +const ( + TCP Protocol = "TCP" + UDP Protocol = "UDP" +) + +// LocalPort represents an IP address and port pair along with a protocol +// and potentially a specific IP family. +// A LocalPort can be opened and subsequently closed. +type LocalPort struct { + // Description is an arbitrary string. + Description string + // IP is the IP address part of a given local port. + // If this string is empty, the port binds to all local IP addresses. + IP string + // If IPFamily is not empty, the port binds only to addresses of this + // family. + // IF empty along with IP, bind to local addresses of any family. + IPFamily IPFamily + // Port is the port number. + // A value of 0 causes a port to be automatically chosen. + Port int + // Protocol is the protocol, e.g. TCP + Protocol Protocol +} + +// NewLocalPort returns a LocalPort instance and ensures IPFamily and IP are +// consistent and that the given protocol is valid. +func NewLocalPort(desc, ip string, ipFamily IPFamily, port int, protocol Protocol) (*LocalPort, error) { + if protocol != TCP && protocol != UDP { + return nil, fmt.Errorf("Unsupported protocol %s", protocol) + } + if ipFamily != "" && ipFamily != "4" && ipFamily != "6" { + return nil, fmt.Errorf("Invalid IP family %s", ipFamily) + } + if ip != "" { + parsedIP := net.ParseIP(ip) + if parsedIP == nil { + return nil, fmt.Errorf("invalid ip address %s", ip) + } + asIPv4 := parsedIP.To4() + if asIPv4 == nil && ipFamily == IPv4 || asIPv4 != nil && ipFamily == IPv6 { + return nil, fmt.Errorf("ip address and family mismatch %s, %s", ip, ipFamily) + } + } + return &LocalPort{Description: desc, IP: ip, IPFamily: ipFamily, Port: port, Protocol: protocol}, nil +} + +func (lp *LocalPort) String() string { + ipPort := net.JoinHostPort(lp.IP, strconv.Itoa(lp.Port)) + return fmt.Sprintf("%q (%s/%s%s)", lp.Description, ipPort, strings.ToLower(string(lp.Protocol)), lp.IPFamily) +} + +// Closeable closes an opened LocalPort. +type Closeable interface { + Close() error +} + +// PortOpener can open a LocalPort and allows later closing it. +type PortOpener interface { + OpenLocalPort(lp *LocalPort) (Closeable, error) +} + +type listenPortOpener struct{} + +// ListenPortOpener opens ports by calling bind() and listen(). +var ListenPortOpener listenPortOpener + +// OpenLocalPort holds the given local port open. +func (l *listenPortOpener) OpenLocalPort(lp *LocalPort) (Closeable, error) { + return openLocalPort(lp) +} + +func openLocalPort(lp *LocalPort) (Closeable, error) { + var socket Closeable + hostPort := net.JoinHostPort(lp.IP, strconv.Itoa(lp.Port)) + switch lp.Protocol { + case TCP: + network := "tcp" + string(lp.IPFamily) + listener, err := net.Listen(network, hostPort) + if err != nil { + return nil, err + } + socket = listener + case UDP: + network := "udp" + string(lp.IPFamily) + addr, err := net.ResolveUDPAddr(network, hostPort) + if err != nil { + return nil, err + } + conn, err := net.ListenUDP(network, addr) + if err != nil { + return nil, err + } + socket = conn + default: + return nil, fmt.Errorf("unknown protocol %q", lp.Protocol) + } + return socket, nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 35af4dcee..751880d05 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,22 +1,34 @@ # github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e +## explicit github.com/MakeNowJust/heredoc +# github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 +github.com/NYTimes/gziphandler # github.com/PuerkitoBio/purell v1.1.1 github.com/PuerkitoBio/purell # github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 github.com/PuerkitoBio/urlesc # github.com/bcicen/go-haproxy v0.0.0-20180203142132-ff5824fe38be +## explicit github.com/bcicen/go-haproxy github.com/bcicen/go-haproxy/kvcodec # github.com/beorn7/perks v1.0.1 github.com/beorn7/perks/quantile -# github.com/blang/semver v3.5.0+incompatible +# github.com/blang/semver v3.5.1+incompatible github.com/blang/semver # github.com/certifi/gocertifi v0.0.0-20180905225744-ee1a9a0726d2 github.com/certifi/gocertifi # github.com/cespare/xxhash/v2 v2.1.1 github.com/cespare/xxhash/v2 # github.com/cockroachdb/cmux v0.0.0-20170110192607-30d10be49292 +## explicit github.com/cockroachdb/cmux +# github.com/coreos/go-semver v0.3.0 +github.com/coreos/go-semver/semver +# github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e +github.com/coreos/go-systemd/daemon +github.com/coreos/go-systemd/journal +# github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f +github.com/coreos/pkg/capnslog # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew # github.com/emicklei/go-restful v2.9.5+incompatible @@ -25,12 +37,16 @@ github.com/emicklei/go-restful/log # github.com/evanphx/json-patch v4.9.0+incompatible github.com/evanphx/json-patch # github.com/fortytw2/leaktest v1.3.0 +## explicit github.com/fortytw2/leaktest # github.com/fsnotify/fsnotify v1.4.9 +## explicit github.com/fsnotify/fsnotify # github.com/getsentry/raven-go v0.2.0 +## explicit github.com/getsentry/raven-go # github.com/go-logr/logr v0.2.1 +## explicit github.com/go-logr/logr # github.com/go-openapi/jsonpointer v0.19.3 github.com/go-openapi/jsonpointer @@ -41,17 +57,23 @@ github.com/go-openapi/spec # github.com/go-openapi/swag v0.19.5 github.com/go-openapi/swag # github.com/gocarina/gocsv v0.0.0-20190927101021-3ecffd272576 +## explicit github.com/gocarina/gocsv # github.com/gogo/protobuf v1.3.1 +github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/proto +github.com/gogo/protobuf/protoc-gen-gogo/descriptor github.com/gogo/protobuf/sortkeys -# github.com/golang/protobuf v1.4.2 +# github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e +github.com/golang/groupcache/lru +# github.com/golang/protobuf v1.4.3 github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp -# github.com/google/go-cmp v0.4.0 +# github.com/google/go-cmp v0.5.2 +## explicit github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags @@ -59,12 +81,14 @@ github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value # github.com/google/gofuzz v1.1.0 github.com/google/gofuzz -# github.com/google/uuid v1.1.1 +# github.com/google/uuid v1.1.2 github.com/google/uuid # github.com/googleapis/gnostic v0.4.1 github.com/googleapis/gnostic/compiler github.com/googleapis/gnostic/extensions github.com/googleapis/gnostic/openapiv2 +# github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 +github.com/grpc-ecosystem/go-grpc-prometheus # github.com/hashicorp/golang-lru v0.5.1 github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru @@ -86,11 +110,15 @@ github.com/matttproud/golang_protobuf_extensions/pbutil github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.1 github.com/modern-go/reflect2 +# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 +github.com/munnerz/goautoneg # github.com/openshift/api v0.0.0-20200827090112-c05698d102cf +## explicit github.com/openshift/api/project/v1 github.com/openshift/api/route github.com/openshift/api/route/v1 # github.com/openshift/client-go v0.0.0-20200827190008-3062137373b5 +## explicit github.com/openshift/client-go/project/clientset/versioned github.com/openshift/client-go/project/clientset/versioned/fake github.com/openshift/client-go/project/clientset/versioned/scheme @@ -103,6 +131,7 @@ github.com/openshift/client-go/route/clientset/versioned/typed/route/v1 github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/fake github.com/openshift/client-go/route/listers/route/v1 # github.com/openshift/library-go v0.0.0-20200921120329-c803a7b7bb2c +## explicit github.com/openshift/library-go/pkg/crypto github.com/openshift/library-go/pkg/proc github.com/openshift/library-go/pkg/serviceability @@ -111,28 +140,75 @@ github.com/pkg/errors # github.com/pkg/profile v1.3.0 github.com/pkg/profile # github.com/prometheus/client_golang v1.7.1 +## explicit github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp +github.com/prometheus/client_golang/prometheus/testutil +github.com/prometheus/client_golang/prometheus/testutil/promlint # github.com/prometheus/client_model v0.2.0 +## explicit github.com/prometheus/client_model/go # github.com/prometheus/common v0.10.0 +## explicit github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model -# github.com/prometheus/procfs v0.1.3 +# github.com/prometheus/procfs v0.2.0 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util # github.com/sirupsen/logrus v1.6.0 github.com/sirupsen/logrus # github.com/spf13/cobra v1.0.0 +## explicit github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 +## explicit github.com/spf13/pflag -# golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 +# go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 +go.etcd.io/etcd/auth/authpb +go.etcd.io/etcd/clientv3 +go.etcd.io/etcd/clientv3/balancer +go.etcd.io/etcd/clientv3/balancer/connectivity +go.etcd.io/etcd/clientv3/balancer/picker +go.etcd.io/etcd/clientv3/balancer/resolver/endpoint +go.etcd.io/etcd/clientv3/credentials +go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes +go.etcd.io/etcd/etcdserver/etcdserverpb +go.etcd.io/etcd/mvcc/mvccpb +go.etcd.io/etcd/pkg/fileutil +go.etcd.io/etcd/pkg/logutil +go.etcd.io/etcd/pkg/systemd +go.etcd.io/etcd/pkg/tlsutil +go.etcd.io/etcd/pkg/transport +go.etcd.io/etcd/pkg/types +go.etcd.io/etcd/raft +go.etcd.io/etcd/raft/confchange +go.etcd.io/etcd/raft/quorum +go.etcd.io/etcd/raft/raftpb +go.etcd.io/etcd/raft/tracker +go.etcd.io/etcd/version +# go.uber.org/atomic v1.4.0 +go.uber.org/atomic +# go.uber.org/multierr v1.1.0 +go.uber.org/multierr +# go.uber.org/zap v1.10.0 +go.uber.org/zap +go.uber.org/zap/buffer +go.uber.org/zap/internal/bufferpool +go.uber.org/zap/internal/color +go.uber.org/zap/internal/exit +go.uber.org/zap/zapcore +# golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 +golang.org/x/crypto/cryptobyte +golang.org/x/crypto/cryptobyte/asn1 +golang.org/x/crypto/internal/subtle +golang.org/x/crypto/nacl/secretbox +golang.org/x/crypto/poly1305 +golang.org/x/crypto/salsa20/salsa golang.org/x/crypto/ssh/terminal -# golang.org/x/net v0.0.0-20200707034311-ab3426394381 +# golang.org/x/net v0.0.0-20201110031124-69a78807bb2b golang.org/x/net/context golang.org/x/net/context/ctxhttp golang.org/x/net/http/httpguts @@ -142,22 +218,23 @@ golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace golang.org/x/net/websocket -# golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 +# golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/oauth2 golang.org/x/oauth2/internal # golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e golang.org/x/sync/singleflight -# golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4 +# golang.org/x/sys v0.0.0-20201112073958-5cba982894dd +golang.org/x/sys/cpu golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/text v0.3.3 +# golang.org/x/text v0.3.4 golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/time v0.0.0-20191024005414-555d28b269f0 +# golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e golang.org/x/time/rate # google.golang.org/appengine v1.6.5 google.golang.org/appengine/internal @@ -167,9 +244,9 @@ google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 +# google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.27.0 +# google.golang.org/grpc v1.27.1 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -202,11 +279,13 @@ google.golang.org/grpc/metadata google.golang.org/grpc/naming google.golang.org/grpc/peer google.golang.org/grpc/resolver +google.golang.org/grpc/resolver/dns +google.golang.org/grpc/resolver/passthrough google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.24.0 +# google.golang.org/protobuf v1.25.0 google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt @@ -217,12 +296,11 @@ google.golang.org/protobuf/internal/encoding/messageset google.golang.org/protobuf/internal/encoding/tag google.golang.org/protobuf/internal/encoding/text google.golang.org/protobuf/internal/errors -google.golang.org/protobuf/internal/fieldnum google.golang.org/protobuf/internal/fieldsort google.golang.org/protobuf/internal/filedesc google.golang.org/protobuf/internal/filetype google.golang.org/protobuf/internal/flags -google.golang.org/protobuf/internal/genname +google.golang.org/protobuf/internal/genid google.golang.org/protobuf/internal/impl google.golang.org/protobuf/internal/mapsort google.golang.org/protobuf/internal/pragma @@ -239,11 +317,17 @@ google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/timestamppb # gopkg.in/inf.v0 v0.9.1 gopkg.in/inf.v0 +# gopkg.in/natefinch/lumberjack.v2 v2.0.0 +gopkg.in/natefinch/lumberjack.v2 # gopkg.in/yaml.v2 v2.3.0 gopkg.in/yaml.v2 -# k8s.io/api v0.19.2 +# k8s.io/api v0.20.0 +## explicit +k8s.io/api/admission/v1 +k8s.io/api/admission/v1beta1 k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1beta1 +k8s.io/api/apiserverinternal/v1alpha1 k8s.io/api/apps/v1 k8s.io/api/apps/v1beta1 k8s.io/api/apps/v1beta2 @@ -268,8 +352,10 @@ k8s.io/api/events/v1 k8s.io/api/events/v1beta1 k8s.io/api/extensions/v1beta1 k8s.io/api/flowcontrol/v1alpha1 +k8s.io/api/flowcontrol/v1beta1 k8s.io/api/networking/v1 k8s.io/api/networking/v1beta1 +k8s.io/api/node/v1 k8s.io/api/node/v1alpha1 k8s.io/api/node/v1beta1 k8s.io/api/policy/v1beta1 @@ -279,22 +365,25 @@ k8s.io/api/rbac/v1beta1 k8s.io/api/scheduling/v1 k8s.io/api/scheduling/v1alpha1 k8s.io/api/scheduling/v1beta1 -k8s.io/api/settings/v1alpha1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apimachinery v0.19.2 +# k8s.io/apimachinery v0.20.0 +## explicit k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta k8s.io/apimachinery/pkg/api/resource +k8s.io/apimachinery/pkg/api/validation k8s.io/apimachinery/pkg/api/validation/path k8s.io/apimachinery/pkg/apis/meta/internalversion k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme +k8s.io/apimachinery/pkg/apis/meta/internalversion/validation k8s.io/apimachinery/pkg/apis/meta/v1 k8s.io/apimachinery/pkg/apis/meta/v1/unstructured k8s.io/apimachinery/pkg/apis/meta/v1/validation k8s.io/apimachinery/pkg/apis/meta/v1beta1 +k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation k8s.io/apimachinery/pkg/conversion k8s.io/apimachinery/pkg/conversion/queryparams k8s.io/apimachinery/pkg/fields @@ -323,25 +412,53 @@ k8s.io/apimachinery/pkg/util/rand k8s.io/apimachinery/pkg/util/runtime k8s.io/apimachinery/pkg/util/sets k8s.io/apimachinery/pkg/util/strategicpatch +k8s.io/apimachinery/pkg/util/uuid k8s.io/apimachinery/pkg/util/validation k8s.io/apimachinery/pkg/util/validation/field k8s.io/apimachinery/pkg/util/wait +k8s.io/apimachinery/pkg/util/waitgroup k8s.io/apimachinery/pkg/util/yaml k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.19.2 +# k8s.io/apiserver v0.20.0 +## explicit +k8s.io/apiserver/pkg/admission +k8s.io/apiserver/pkg/admission/configuration +k8s.io/apiserver/pkg/admission/initializer +k8s.io/apiserver/pkg/admission/metrics +k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle +k8s.io/apiserver/pkg/admission/plugin/webhook +k8s.io/apiserver/pkg/admission/plugin/webhook/config +k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission +k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1 +k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1 +k8s.io/apiserver/pkg/admission/plugin/webhook/errors +k8s.io/apiserver/pkg/admission/plugin/webhook/generic +k8s.io/apiserver/pkg/admission/plugin/webhook/mutating +k8s.io/apiserver/pkg/admission/plugin/webhook/namespace +k8s.io/apiserver/pkg/admission/plugin/webhook/object +k8s.io/apiserver/pkg/admission/plugin/webhook/request +k8s.io/apiserver/pkg/admission/plugin/webhook/rules +k8s.io/apiserver/pkg/admission/plugin/webhook/validating k8s.io/apiserver/pkg/apis/apiserver k8s.io/apiserver/pkg/apis/apiserver/install k8s.io/apiserver/pkg/apis/apiserver/v1 k8s.io/apiserver/pkg/apis/apiserver/v1alpha1 k8s.io/apiserver/pkg/apis/apiserver/v1beta1 k8s.io/apiserver/pkg/apis/audit +k8s.io/apiserver/pkg/apis/audit/install k8s.io/apiserver/pkg/apis/audit/v1 k8s.io/apiserver/pkg/apis/audit/v1alpha1 k8s.io/apiserver/pkg/apis/audit/v1beta1 +k8s.io/apiserver/pkg/apis/audit/validation +k8s.io/apiserver/pkg/apis/config +k8s.io/apiserver/pkg/apis/config/v1 +k8s.io/apiserver/pkg/apis/config/validation +k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap k8s.io/apiserver/pkg/audit +k8s.io/apiserver/pkg/audit/policy k8s.io/apiserver/pkg/authentication/authenticator k8s.io/apiserver/pkg/authentication/authenticatorfactory k8s.io/apiserver/pkg/authentication/group @@ -351,30 +468,95 @@ k8s.io/apiserver/pkg/authentication/request/headerrequest k8s.io/apiserver/pkg/authentication/request/union k8s.io/apiserver/pkg/authentication/request/websocket k8s.io/apiserver/pkg/authentication/request/x509 +k8s.io/apiserver/pkg/authentication/serviceaccount k8s.io/apiserver/pkg/authentication/token/cache k8s.io/apiserver/pkg/authentication/token/tokenfile k8s.io/apiserver/pkg/authentication/user k8s.io/apiserver/pkg/authorization/authorizer k8s.io/apiserver/pkg/authorization/authorizerfactory +k8s.io/apiserver/pkg/authorization/path +k8s.io/apiserver/pkg/authorization/union +k8s.io/apiserver/pkg/endpoints +k8s.io/apiserver/pkg/endpoints/deprecation +k8s.io/apiserver/pkg/endpoints/discovery +k8s.io/apiserver/pkg/endpoints/filterlatency +k8s.io/apiserver/pkg/endpoints/filters +k8s.io/apiserver/pkg/endpoints/handlers +k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager +k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal +k8s.io/apiserver/pkg/endpoints/handlers/negotiation +k8s.io/apiserver/pkg/endpoints/handlers/responsewriters k8s.io/apiserver/pkg/endpoints/metrics +k8s.io/apiserver/pkg/endpoints/openapi k8s.io/apiserver/pkg/endpoints/request +k8s.io/apiserver/pkg/endpoints/warning k8s.io/apiserver/pkg/features +k8s.io/apiserver/pkg/quota/v1 +k8s.io/apiserver/pkg/registry/generic +k8s.io/apiserver/pkg/registry/generic/registry +k8s.io/apiserver/pkg/registry/rest +k8s.io/apiserver/pkg/server +k8s.io/apiserver/pkg/server/dynamiccertificates k8s.io/apiserver/pkg/server/egressselector k8s.io/apiserver/pkg/server/egressselector/metrics +k8s.io/apiserver/pkg/server/filters k8s.io/apiserver/pkg/server/healthz k8s.io/apiserver/pkg/server/httplog +k8s.io/apiserver/pkg/server/mux +k8s.io/apiserver/pkg/server/options +k8s.io/apiserver/pkg/server/options/encryptionconfig +k8s.io/apiserver/pkg/server/resourceconfig +k8s.io/apiserver/pkg/server/routes +k8s.io/apiserver/pkg/server/storage +k8s.io/apiserver/pkg/storage +k8s.io/apiserver/pkg/storage/cacher +k8s.io/apiserver/pkg/storage/errors +k8s.io/apiserver/pkg/storage/etcd3 +k8s.io/apiserver/pkg/storage/etcd3/metrics +k8s.io/apiserver/pkg/storage/names +k8s.io/apiserver/pkg/storage/storagebackend +k8s.io/apiserver/pkg/storage/storagebackend/factory +k8s.io/apiserver/pkg/storage/value +k8s.io/apiserver/pkg/storage/value/encrypt/aes +k8s.io/apiserver/pkg/storage/value/encrypt/envelope +k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1 +k8s.io/apiserver/pkg/storage/value/encrypt/identity +k8s.io/apiserver/pkg/storage/value/encrypt/secretbox +k8s.io/apiserver/pkg/storageversion +k8s.io/apiserver/pkg/util/apihelpers +k8s.io/apiserver/pkg/util/dryrun k8s.io/apiserver/pkg/util/feature +k8s.io/apiserver/pkg/util/flowcontrol +k8s.io/apiserver/pkg/util/flowcontrol/counter +k8s.io/apiserver/pkg/util/flowcontrol/debug +k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing +k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise +k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/lockingpromise +k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset +k8s.io/apiserver/pkg/util/flowcontrol/format +k8s.io/apiserver/pkg/util/flowcontrol/metrics +k8s.io/apiserver/pkg/util/flushwriter +k8s.io/apiserver/pkg/util/openapi +k8s.io/apiserver/pkg/util/shufflesharding k8s.io/apiserver/pkg/util/webhook k8s.io/apiserver/pkg/util/wsstream +k8s.io/apiserver/pkg/warning +k8s.io/apiserver/plugin/pkg/audit/buffered +k8s.io/apiserver/plugin/pkg/audit/log +k8s.io/apiserver/plugin/pkg/audit/truncate +k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/client-go v0.19.2 +# k8s.io/client-go v0.20.0 +## explicit k8s.io/client-go/discovery k8s.io/client-go/discovery/fake k8s.io/client-go/informers k8s.io/client-go/informers/admissionregistration k8s.io/client-go/informers/admissionregistration/v1 k8s.io/client-go/informers/admissionregistration/v1beta1 +k8s.io/client-go/informers/apiserverinternal +k8s.io/client-go/informers/apiserverinternal/v1alpha1 k8s.io/client-go/informers/apps k8s.io/client-go/informers/apps/v1 k8s.io/client-go/informers/apps/v1beta1 @@ -405,11 +587,13 @@ k8s.io/client-go/informers/extensions k8s.io/client-go/informers/extensions/v1beta1 k8s.io/client-go/informers/flowcontrol k8s.io/client-go/informers/flowcontrol/v1alpha1 +k8s.io/client-go/informers/flowcontrol/v1beta1 k8s.io/client-go/informers/internalinterfaces k8s.io/client-go/informers/networking k8s.io/client-go/informers/networking/v1 k8s.io/client-go/informers/networking/v1beta1 k8s.io/client-go/informers/node +k8s.io/client-go/informers/node/v1 k8s.io/client-go/informers/node/v1alpha1 k8s.io/client-go/informers/node/v1beta1 k8s.io/client-go/informers/policy @@ -422,8 +606,6 @@ k8s.io/client-go/informers/scheduling k8s.io/client-go/informers/scheduling/v1 k8s.io/client-go/informers/scheduling/v1alpha1 k8s.io/client-go/informers/scheduling/v1beta1 -k8s.io/client-go/informers/settings -k8s.io/client-go/informers/settings/v1alpha1 k8s.io/client-go/informers/storage k8s.io/client-go/informers/storage/v1 k8s.io/client-go/informers/storage/v1alpha1 @@ -435,6 +617,8 @@ k8s.io/client-go/kubernetes/typed/admissionregistration/v1 k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake +k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1 +k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake k8s.io/client-go/kubernetes/typed/apps/v1 k8s.io/client-go/kubernetes/typed/apps/v1/fake k8s.io/client-go/kubernetes/typed/apps/v1beta1 @@ -483,10 +667,14 @@ k8s.io/client-go/kubernetes/typed/extensions/v1beta1 k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1 k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake +k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1 +k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake k8s.io/client-go/kubernetes/typed/networking/v1 k8s.io/client-go/kubernetes/typed/networking/v1/fake k8s.io/client-go/kubernetes/typed/networking/v1beta1 k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake +k8s.io/client-go/kubernetes/typed/node/v1 +k8s.io/client-go/kubernetes/typed/node/v1/fake k8s.io/client-go/kubernetes/typed/node/v1alpha1 k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake k8s.io/client-go/kubernetes/typed/node/v1beta1 @@ -505,8 +693,6 @@ k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake -k8s.io/client-go/kubernetes/typed/settings/v1alpha1 -k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake k8s.io/client-go/kubernetes/typed/storage/v1 k8s.io/client-go/kubernetes/typed/storage/v1/fake k8s.io/client-go/kubernetes/typed/storage/v1alpha1 @@ -515,6 +701,7 @@ k8s.io/client-go/kubernetes/typed/storage/v1beta1 k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake k8s.io/client-go/listers/admissionregistration/v1 k8s.io/client-go/listers/admissionregistration/v1beta1 +k8s.io/client-go/listers/apiserverinternal/v1alpha1 k8s.io/client-go/listers/apps/v1 k8s.io/client-go/listers/apps/v1beta1 k8s.io/client-go/listers/apps/v1beta2 @@ -535,8 +722,10 @@ k8s.io/client-go/listers/events/v1 k8s.io/client-go/listers/events/v1beta1 k8s.io/client-go/listers/extensions/v1beta1 k8s.io/client-go/listers/flowcontrol/v1alpha1 +k8s.io/client-go/listers/flowcontrol/v1beta1 k8s.io/client-go/listers/networking/v1 k8s.io/client-go/listers/networking/v1beta1 +k8s.io/client-go/listers/node/v1 k8s.io/client-go/listers/node/v1alpha1 k8s.io/client-go/listers/node/v1beta1 k8s.io/client-go/listers/policy/v1beta1 @@ -546,7 +735,6 @@ k8s.io/client-go/listers/rbac/v1beta1 k8s.io/client-go/listers/scheduling/v1 k8s.io/client-go/listers/scheduling/v1alpha1 k8s.io/client-go/listers/scheduling/v1beta1 -k8s.io/client-go/listers/settings/v1alpha1 k8s.io/client-go/listers/storage/v1 k8s.io/client-go/listers/storage/v1alpha1 k8s.io/client-go/listers/storage/v1beta1 @@ -565,8 +753,11 @@ k8s.io/client-go/tools/clientcmd k8s.io/client-go/tools/clientcmd/api k8s.io/client-go/tools/clientcmd/api/latest k8s.io/client-go/tools/clientcmd/api/v1 +k8s.io/client-go/tools/events k8s.io/client-go/tools/metrics k8s.io/client-go/tools/pager +k8s.io/client-go/tools/record +k8s.io/client-go/tools/record/util k8s.io/client-go/tools/reference k8s.io/client-go/transport k8s.io/client-go/util/cert @@ -575,27 +766,45 @@ k8s.io/client-go/util/flowcontrol k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/workqueue -# k8s.io/component-base v0.19.2 +# k8s.io/component-base v0.20.0 +k8s.io/component-base/cli/flag k8s.io/component-base/featuregate +k8s.io/component-base/logs +k8s.io/component-base/logs/datapol +k8s.io/component-base/logs/json +k8s.io/component-base/logs/sanitization k8s.io/component-base/metrics k8s.io/component-base/metrics/legacyregistry +k8s.io/component-base/metrics/prometheus/workqueue +k8s.io/component-base/metrics/testutil k8s.io/component-base/version # k8s.io/klog v1.0.0 +## explicit k8s.io/klog k8s.io/klog/klogr -# k8s.io/klog/v2 v2.3.0 +# k8s.io/klog/v2 v2.4.0 k8s.io/klog/v2 -# k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 +# k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd +k8s.io/kube-openapi/pkg/builder +k8s.io/kube-openapi/pkg/common +k8s.io/kube-openapi/pkg/handler +k8s.io/kube-openapi/pkg/schemaconv +k8s.io/kube-openapi/pkg/util k8s.io/kube-openapi/pkg/util/proto -# k8s.io/utils v0.0.0-20200729134348-d5654de09c73 +# k8s.io/utils v0.0.0-20201110183641-67b214c5f920 k8s.io/utils/buffer k8s.io/utils/integer +k8s.io/utils/net k8s.io/utils/path k8s.io/utils/trace -# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9 +# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client -# sigs.k8s.io/structured-merge-diff/v4 v4.0.1 +# sigs.k8s.io/structured-merge-diff/v4 v4.0.2 +sigs.k8s.io/structured-merge-diff/v4/fieldpath +sigs.k8s.io/structured-merge-diff/v4/merge +sigs.k8s.io/structured-merge-diff/v4/schema +sigs.k8s.io/structured-merge-diff/v4/typed sigs.k8s.io/structured-merge-diff/v4/value # sigs.k8s.io/yaml v1.2.0 sigs.k8s.io/yaml diff --git a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go index 6f5110696..3ccec331a 100644 --- a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go +++ b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go @@ -51,6 +51,12 @@ type grpcTunnel struct { connsLock sync.RWMutex } +type clientConn interface { + Close() error +} + +var _ clientConn = &grpc.ClientConn{} + // CreateSingleUseGrpcTunnel creates a Tunnel to dial to a remote server through a // gRPC based proxy service. // Currently, a single tunnel supports a single connection, and the tunnel is closed when the connection is terminated @@ -79,7 +85,7 @@ func CreateSingleUseGrpcTunnel(address string, opts ...grpc.DialOption) (Tunnel, return tunnel, nil } -func (t *grpcTunnel) serve(c *grpc.ClientConn) { +func (t *grpcTunnel) serve(c clientConn) { defer c.Close() for { @@ -88,11 +94,11 @@ func (t *grpcTunnel) serve(c *grpc.ClientConn) { return } if err != nil || pkt == nil { - klog.Warningf("stream read error: %v", err) + klog.ErrorS(err, "stream read failure") return } - klog.V(6).Infof("[tracing] recv packet, type: %s", pkt.Type) + klog.V(5).InfoS("[tracing] recv packet", "type", pkt.Type) switch pkt.Type { case client.PacketType_DIAL_RSP: @@ -102,7 +108,7 @@ func (t *grpcTunnel) serve(c *grpc.ClientConn) { t.pendingDialLock.RUnlock() if !ok { - klog.Warning("DialResp not recognized; dropped") + klog.V(1).Infoln("DialResp not recognized; dropped") } else { ch <- dialResult{ err: resp.Error, @@ -119,7 +125,7 @@ func (t *grpcTunnel) serve(c *grpc.ClientConn) { if ok { conn.readCh <- resp.Data } else { - klog.Warningf("connection id %d not recognized", resp.ConnectID) + klog.V(1).InfoS("connection not recognized", "connectionID", resp.ConnectID) } case client.PacketType_CLOSE_RSP: resp := pkt.GetCloseResponse() @@ -136,7 +142,7 @@ func (t *grpcTunnel) serve(c *grpc.ClientConn) { t.connsLock.Unlock() return } - klog.Warningf("connection id %d not recognized", resp.ConnectID) + klog.V(1).InfoS("connection not recognized", "connectionID", resp.ConnectID) } } } @@ -169,14 +175,14 @@ func (t *grpcTunnel) Dial(protocol, address string) (net.Conn, error) { }, }, } - klog.V(6).Infof("[tracing] send packet, type: %s", req.Type) + klog.V(5).InfoS("[tracing] send packet", "type", req.Type) err := t.stream.Send(req) if err != nil { return nil, err } - klog.Info("DIAL_REQ sent to proxy server") + klog.V(5).Infoln("DIAL_REQ sent to proxy server") c := &conn{stream: t.stream} diff --git a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/conn.go b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/conn.go index 5137ba266..4a93c69cf 100644 --- a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/conn.go +++ b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/conn.go @@ -54,7 +54,7 @@ func (c *conn) Write(data []byte) (n int, err error) { }, } - klog.V(6).Infof("[tracing] send req, type: %s", req.Type) + klog.V(5).InfoS("[tracing] send req", "type", req.Type) err = c.stream.Send(req) if err != nil { @@ -112,7 +112,7 @@ func (c *conn) SetWriteDeadline(t time.Time) error { // Close closes the connection. It also sends CLOSE_REQ packet over // proxy service to notify remote to drop the connection. func (c *conn) Close() error { - klog.Info("conn.Close()") + klog.V(4).Infoln("closing connection") req := &client.Packet{ Type: client.PacketType_CLOSE_REQ, Payload: &client.Packet_CloseRequest{ @@ -122,7 +122,7 @@ func (c *conn) Close() error { }, } - klog.V(6).Infof("[tracing] send req, type: %s", req.Type) + klog.V(5).InfoS("[tracing] send req", "type", req.Type) if err := c.stream.Send(req); err != nil { return err diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/doc.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/doc.go new file mode 100644 index 000000000..f4fbbff26 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fieldpath defines a way for referencing path elements (e.g., an +// index in an array, or a key in a map). It provides types for arranging these +// into paths for referencing nested fields, and for grouping those into sets, +// for referencing multiple nested fields. +package fieldpath diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/element.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/element.go new file mode 100644 index 000000000..1578f64c0 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/element.go @@ -0,0 +1,317 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "fmt" + "sort" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// PathElement describes how to select a child field given a containing object. +type PathElement struct { + // Exactly one of the following fields should be non-nil. + + // FieldName selects a single field from a map (reminder: this is also + // how structs are represented). The containing object must be a map. + FieldName *string + + // Key selects the list element which has fields matching those given. + // The containing object must be an associative list with map typed + // elements. They are sorted alphabetically. + Key *value.FieldList + + // Value selects the list element with the given value. The containing + // object must be an associative list with a primitive typed element + // (i.e., a set). + Value *value.Value + + // Index selects a list element by its index number. The containing + // object must be an atomic list. + Index *int +} + +// Less provides an order for path elements. +func (e PathElement) Less(rhs PathElement) bool { + return e.Compare(rhs) < 0 +} + +// Compare provides an order for path elements. +func (e PathElement) Compare(rhs PathElement) int { + if e.FieldName != nil { + if rhs.FieldName == nil { + return -1 + } + return strings.Compare(*e.FieldName, *rhs.FieldName) + } else if rhs.FieldName != nil { + return 1 + } + + if e.Key != nil { + if rhs.Key == nil { + return -1 + } + return e.Key.Compare(*rhs.Key) + } else if rhs.Key != nil { + return 1 + } + + if e.Value != nil { + if rhs.Value == nil { + return -1 + } + return value.Compare(*e.Value, *rhs.Value) + } else if rhs.Value != nil { + return 1 + } + + if e.Index != nil { + if rhs.Index == nil { + return -1 + } + if *e.Index < *rhs.Index { + return -1 + } else if *e.Index == *rhs.Index { + return 0 + } + return 1 + } else if rhs.Index != nil { + return 1 + } + + return 0 +} + +// Equals returns true if both path elements are equal. +func (e PathElement) Equals(rhs PathElement) bool { + if e.FieldName != nil { + if rhs.FieldName == nil { + return false + } + return *e.FieldName == *rhs.FieldName + } else if rhs.FieldName != nil { + return false + } + if e.Key != nil { + if rhs.Key == nil { + return false + } + return e.Key.Equals(*rhs.Key) + } else if rhs.Key != nil { + return false + } + if e.Value != nil { + if rhs.Value == nil { + return false + } + return value.Equals(*e.Value, *rhs.Value) + } else if rhs.Value != nil { + return false + } + if e.Index != nil { + if rhs.Index == nil { + return false + } + return *e.Index == *rhs.Index + } else if rhs.Index != nil { + return false + } + return true +} + +// String presents the path element as a human-readable string. +func (e PathElement) String() string { + switch { + case e.FieldName != nil: + return "." + *e.FieldName + case e.Key != nil: + strs := make([]string, len(*e.Key)) + for i, k := range *e.Key { + strs[i] = fmt.Sprintf("%v=%v", k.Name, value.ToString(k.Value)) + } + // Keys are supposed to be sorted. + return "[" + strings.Join(strs, ",") + "]" + case e.Value != nil: + return fmt.Sprintf("[=%v]", value.ToString(*e.Value)) + case e.Index != nil: + return fmt.Sprintf("[%v]", *e.Index) + default: + return "{{invalid path element}}" + } +} + +// KeyByFields is a helper function which constructs a key for an associative +// list type. `nameValues` must have an even number of entries, alternating +// names (type must be string) with values (type must be value.Value). If these +// conditions are not met, KeyByFields will panic--it's intended for static +// construction and shouldn't have user-produced values passed to it. +func KeyByFields(nameValues ...interface{}) *value.FieldList { + if len(nameValues)%2 != 0 { + panic("must have a value for every name") + } + out := value.FieldList{} + for i := 0; i < len(nameValues)-1; i += 2 { + out = append(out, value.Field{Name: nameValues[i].(string), Value: value.NewValueInterface(nameValues[i+1])}) + } + out.Sort() + return &out +} + +// PathElementSet is a set of path elements. +// TODO: serialize as a list. +type PathElementSet struct { + members sortedPathElements +} + +func MakePathElementSet(size int) PathElementSet { + return PathElementSet{ + members: make(sortedPathElements, 0, size), + } +} + +type sortedPathElements []PathElement + +// Implement the sort interface; this would permit bulk creation, which would +// be faster than doing it one at a time via Insert. +func (spe sortedPathElements) Len() int { return len(spe) } +func (spe sortedPathElements) Less(i, j int) bool { return spe[i].Less(spe[j]) } +func (spe sortedPathElements) Swap(i, j int) { spe[i], spe[j] = spe[j], spe[i] } + +// Insert adds pe to the set. +func (s *PathElementSet) Insert(pe PathElement) { + loc := sort.Search(len(s.members), func(i int) bool { + return !s.members[i].Less(pe) + }) + if loc == len(s.members) { + s.members = append(s.members, pe) + return + } + if s.members[loc].Equals(pe) { + return + } + s.members = append(s.members, PathElement{}) + copy(s.members[loc+1:], s.members[loc:]) + s.members[loc] = pe +} + +// Union returns a set containing elements that appear in either s or s2. +func (s *PathElementSet) Union(s2 *PathElementSet) *PathElementSet { + out := &PathElementSet{} + + i, j := 0, 0 + for i < len(s.members) && j < len(s2.members) { + if s.members[i].Less(s2.members[j]) { + out.members = append(out.members, s.members[i]) + i++ + } else { + out.members = append(out.members, s2.members[j]) + if !s2.members[j].Less(s.members[i]) { + i++ + } + j++ + } + } + + if i < len(s.members) { + out.members = append(out.members, s.members[i:]...) + } + if j < len(s2.members) { + out.members = append(out.members, s2.members[j:]...) + } + return out +} + +// Intersection returns a set containing elements which appear in both s and s2. +func (s *PathElementSet) Intersection(s2 *PathElementSet) *PathElementSet { + out := &PathElementSet{} + + i, j := 0, 0 + for i < len(s.members) && j < len(s2.members) { + if s.members[i].Less(s2.members[j]) { + i++ + } else { + if !s2.members[j].Less(s.members[i]) { + out.members = append(out.members, s.members[i]) + i++ + } + j++ + } + } + + return out +} + +// Difference returns a set containing elements which appear in s but not in s2. +func (s *PathElementSet) Difference(s2 *PathElementSet) *PathElementSet { + out := &PathElementSet{} + + i, j := 0, 0 + for i < len(s.members) && j < len(s2.members) { + if s.members[i].Less(s2.members[j]) { + out.members = append(out.members, s.members[i]) + i++ + } else { + if !s2.members[j].Less(s.members[i]) { + i++ + } + j++ + } + } + if i < len(s.members) { + out.members = append(out.members, s.members[i:]...) + } + return out +} + +// Size retuns the number of elements in the set. +func (s *PathElementSet) Size() int { return len(s.members) } + +// Has returns true if pe is a member of the set. +func (s *PathElementSet) Has(pe PathElement) bool { + loc := sort.Search(len(s.members), func(i int) bool { + return !s.members[i].Less(pe) + }) + if loc == len(s.members) { + return false + } + if s.members[loc].Equals(pe) { + return true + } + return false +} + +// Equals returns true if s and s2 have exactly the same members. +func (s *PathElementSet) Equals(s2 *PathElementSet) bool { + if len(s.members) != len(s2.members) { + return false + } + for k := range s.members { + if !s.members[k].Equals(s2.members[k]) { + return false + } + } + return true +} + +// Iterate calls f for each PathElement in the set. The order is deterministic. +func (s *PathElementSet) Iterate(f func(PathElement)) { + for _, pe := range s.members { + f(pe) + } +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/fromvalue.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/fromvalue.go new file mode 100644 index 000000000..20775ee02 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/fromvalue.go @@ -0,0 +1,134 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// SetFromValue creates a set containing every leaf field mentioned in v. +func SetFromValue(v value.Value) *Set { + s := NewSet() + + w := objectWalker{ + path: Path{}, + value: v, + allocator: value.NewFreelistAllocator(), + do: func(p Path) { s.Insert(p) }, + } + + w.walk() + return s +} + +type objectWalker struct { + path Path + value value.Value + allocator value.Allocator + + do func(Path) +} + +func (w *objectWalker) walk() { + switch { + case w.value.IsNull(): + case w.value.IsFloat(): + case w.value.IsInt(): + case w.value.IsString(): + case w.value.IsBool(): + // All leaf fields handled the same way (after the switch + // statement). + + // Descend + case w.value.IsList(): + // If the list were atomic, we'd break here, but we don't have + // a schema, so we can't tell. + l := w.value.AsListUsing(w.allocator) + defer w.allocator.Free(l) + iter := l.RangeUsing(w.allocator) + defer w.allocator.Free(iter) + for iter.Next() { + i, value := iter.Item() + w2 := *w + w2.path = append(w.path, w.GuessBestListPathElement(i, value)) + w2.value = value + w2.walk() + } + return + case w.value.IsMap(): + // If the map/struct were atomic, we'd break here, but we don't + // have a schema, so we can't tell. + + m := w.value.AsMapUsing(w.allocator) + defer w.allocator.Free(m) + m.IterateUsing(w.allocator, func(k string, val value.Value) bool { + w2 := *w + w2.path = append(w.path, PathElement{FieldName: &k}) + w2.value = val + w2.walk() + return true + }) + return + } + + // Leaf fields get added to the set. + if len(w.path) > 0 { + w.do(w.path) + } +} + +// AssociativeListCandidateFieldNames lists the field names which are +// considered keys if found in a list element. +var AssociativeListCandidateFieldNames = []string{ + "key", + "id", + "name", +} + +// GuessBestListPathElement guesses whether item is an associative list +// element, which should be referenced by key(s), or if it is not and therefore +// referencing by index is acceptable. Currently this is done by checking +// whether item has any of the fields listed in +// AssociativeListCandidateFieldNames which have scalar values. +func (w *objectWalker) GuessBestListPathElement(index int, item value.Value) PathElement { + if !item.IsMap() { + // Non map items could be parts of sets or regular "atomic" + // lists. We won't try to guess whether something should be a + // set or not. + return PathElement{Index: &index} + } + + m := item.AsMapUsing(w.allocator) + defer w.allocator.Free(m) + var keys value.FieldList + for _, name := range AssociativeListCandidateFieldNames { + f, ok := m.Get(name) + if !ok { + continue + } + // only accept primitive/scalar types as keys. + if f.IsNull() || f.IsMap() || f.IsList() { + continue + } + keys = append(keys, value.Field{Name: name, Value: f}) + } + if len(keys) > 0 { + keys.Sort() + return PathElement{Key: &keys} + } + return PathElement{Index: &index} +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/managers.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/managers.go new file mode 100644 index 000000000..20499dc03 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/managers.go @@ -0,0 +1,144 @@ +/* +Copyright 2018 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "fmt" + "strings" +) + +// APIVersion describes the version of an object or of a fieldset. +type APIVersion string + +type VersionedSet interface { + Set() *Set + APIVersion() APIVersion + Applied() bool +} + +// VersionedSet associates a version to a set. +type versionedSet struct { + set *Set + apiVersion APIVersion + applied bool +} + +func NewVersionedSet(set *Set, apiVersion APIVersion, applied bool) VersionedSet { + return versionedSet{ + set: set, + apiVersion: apiVersion, + applied: applied, + } +} + +func (v versionedSet) Set() *Set { + return v.set +} + +func (v versionedSet) APIVersion() APIVersion { + return v.apiVersion +} + +func (v versionedSet) Applied() bool { + return v.applied +} + +// ManagedFields is a map from manager to VersionedSet (what they own in +// what version). +type ManagedFields map[string]VersionedSet + +// Equals returns true if the two managedfields are the same, false +// otherwise. +func (lhs ManagedFields) Equals(rhs ManagedFields) bool { + if len(lhs) != len(rhs) { + return false + } + + for manager, left := range lhs { + right, ok := rhs[manager] + if !ok { + return false + } + if left.APIVersion() != right.APIVersion() || left.Applied() != right.Applied() { + return false + } + if !left.Set().Equals(right.Set()) { + return false + } + } + return true +} + +// Copy the list, this is mostly a shallow copy. +func (lhs ManagedFields) Copy() ManagedFields { + copy := ManagedFields{} + for manager, set := range lhs { + copy[manager] = set + } + return copy +} + +// Difference returns a symmetric difference between two Managers. If a +// given user's entry has version X in lhs and version Y in rhs, then +// the return value for that user will be from rhs. If the difference for +// a user is an empty set, that user will not be inserted in the map. +func (lhs ManagedFields) Difference(rhs ManagedFields) ManagedFields { + diff := ManagedFields{} + + for manager, left := range lhs { + right, ok := rhs[manager] + if !ok { + if !left.Set().Empty() { + diff[manager] = left + } + continue + } + + // If we have sets in both but their version + // differs, we don't even diff and keep the + // entire thing. + if left.APIVersion() != right.APIVersion() { + diff[manager] = right + continue + } + + newSet := left.Set().Difference(right.Set()).Union(right.Set().Difference(left.Set())) + if !newSet.Empty() { + diff[manager] = NewVersionedSet(newSet, right.APIVersion(), false) + } + } + + for manager, set := range rhs { + if _, ok := lhs[manager]; ok { + // Already done + continue + } + if !set.Set().Empty() { + diff[manager] = set + } + } + + return diff +} + +func (lhs ManagedFields) String() string { + s := strings.Builder{} + for k, v := range lhs { + fmt.Fprintf(&s, "%s:\n", k) + fmt.Fprintf(&s, "- Applied: %v\n", v.Applied()) + fmt.Fprintf(&s, "- APIVersion: %v\n", v.APIVersion()) + fmt.Fprintf(&s, "- Set: %v\n", v.Set()) + } + return s.String() +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/path.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/path.go new file mode 100644 index 000000000..0413130bd --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/path.go @@ -0,0 +1,118 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "fmt" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// Path describes how to select a potentially deeply-nested child field given a +// containing object. +type Path []PathElement + +func (fp Path) String() string { + strs := make([]string, len(fp)) + for i := range fp { + strs[i] = fp[i].String() + } + return strings.Join(strs, "") +} + +// Equals returns true if the two paths are equivalent. +func (fp Path) Equals(fp2 Path) bool { + if len(fp) != len(fp2) { + return false + } + for i := range fp { + if !fp[i].Equals(fp2[i]) { + return false + } + } + return true +} + +// Less provides a lexical order for Paths. +func (fp Path) Compare(rhs Path) int { + i := 0 + for { + if i >= len(fp) && i >= len(rhs) { + // Paths are the same length and all items are equal. + return 0 + } + if i >= len(fp) { + // LHS is shorter. + return -1 + } + if i >= len(rhs) { + // RHS is shorter. + return 1 + } + if c := fp[i].Compare(rhs[i]); c != 0 { + return c + } + // The items are equal; continue. + i++ + } +} + +func (fp Path) Copy() Path { + new := make(Path, len(fp)) + copy(new, fp) + return new +} + +// MakePath constructs a Path. The parts may be PathElements, ints, strings. +func MakePath(parts ...interface{}) (Path, error) { + var fp Path + for _, p := range parts { + switch t := p.(type) { + case PathElement: + fp = append(fp, t) + case int: + // TODO: Understand schema and object and convert this to the + // FieldSpecifier below if appropriate. + fp = append(fp, PathElement{Index: &t}) + case string: + fp = append(fp, PathElement{FieldName: &t}) + case *value.FieldList: + if len(*t) == 0 { + return nil, fmt.Errorf("associative list key type path elements must have at least one key (got zero)") + } + fp = append(fp, PathElement{Key: t}) + case value.Value: + // TODO: understand schema and verify that this is a set type + // TODO: make a copy of t + fp = append(fp, PathElement{Value: &t}) + default: + return nil, fmt.Errorf("unable to make %#v into a path element", p) + } + } + return fp, nil +} + +// MakePathOrDie panics if parts can't be turned into a path. Good for things +// that are known at complie time. +func MakePathOrDie(parts ...interface{}) Path { + fp, err := MakePath(parts...) + if err != nil { + panic(err) + } + return fp +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go new file mode 100644 index 000000000..9b14ca581 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go @@ -0,0 +1,85 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "sort" + + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// PathElementValueMap is a map from PathElement to value.Value. +// +// TODO(apelisse): We have multiple very similar implementation of this +// for PathElementSet and SetNodeMap, so we could probably share the +// code. +type PathElementValueMap struct { + members sortedPathElementValues +} + +func MakePathElementValueMap(size int) PathElementValueMap { + return PathElementValueMap{ + members: make(sortedPathElementValues, 0, size), + } +} + +type pathElementValue struct { + PathElement PathElement + Value value.Value +} + +type sortedPathElementValues []pathElementValue + +// Implement the sort interface; this would permit bulk creation, which would +// be faster than doing it one at a time via Insert. +func (spev sortedPathElementValues) Len() int { return len(spev) } +func (spev sortedPathElementValues) Less(i, j int) bool { + return spev[i].PathElement.Less(spev[j].PathElement) +} +func (spev sortedPathElementValues) Swap(i, j int) { spev[i], spev[j] = spev[j], spev[i] } + +// Insert adds the pathelement and associated value in the map. +func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) { + loc := sort.Search(len(s.members), func(i int) bool { + return !s.members[i].PathElement.Less(pe) + }) + if loc == len(s.members) { + s.members = append(s.members, pathElementValue{pe, v}) + return + } + if s.members[loc].PathElement.Equals(pe) { + return + } + s.members = append(s.members, pathElementValue{}) + copy(s.members[loc+1:], s.members[loc:]) + s.members[loc] = pathElementValue{pe, v} +} + +// Get retrieves the value associated with the given PathElement from the map. +// (nil, false) is returned if there is no such PathElement. +func (s *PathElementValueMap) Get(pe PathElement) (value.Value, bool) { + loc := sort.Search(len(s.members), func(i int) bool { + return !s.members[i].PathElement.Less(pe) + }) + if loc == len(s.members) { + return nil, false + } + if s.members[loc].PathElement.Equals(pe) { + return s.members[loc].Value, true + } + return nil, false +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/serialize-pe.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/serialize-pe.go new file mode 100644 index 000000000..cb18e7b1c --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/serialize-pe.go @@ -0,0 +1,168 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "errors" + "fmt" + "io" + "strconv" + "strings" + + jsoniter "github.com/json-iterator/go" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +var ErrUnknownPathElementType = errors.New("unknown path element type") + +const ( + // Field indicates that the content of this path element is a field's name + peField = "f" + + // Value indicates that the content of this path element is a field's value + peValue = "v" + + // Index indicates that the content of this path element is an index in an array + peIndex = "i" + + // Key indicates that the content of this path element is a key value map + peKey = "k" + + // Separator separates the type of a path element from the contents + peSeparator = ":" +) + +var ( + peFieldSepBytes = []byte(peField + peSeparator) + peValueSepBytes = []byte(peValue + peSeparator) + peIndexSepBytes = []byte(peIndex + peSeparator) + peKeySepBytes = []byte(peKey + peSeparator) + peSepBytes = []byte(peSeparator) +) + +// DeserializePathElement parses a serialized path element +func DeserializePathElement(s string) (PathElement, error) { + b := []byte(s) + if len(b) < 2 { + return PathElement{}, errors.New("key must be 2 characters long:") + } + typeSep, b := b[:2], b[2:] + if typeSep[1] != peSepBytes[0] { + return PathElement{}, fmt.Errorf("missing colon: %v", s) + } + switch typeSep[0] { + case peFieldSepBytes[0]: + // Slice s rather than convert b, to save on + // allocations. + str := s[2:] + return PathElement{ + FieldName: &str, + }, nil + case peValueSepBytes[0]: + iter := readPool.BorrowIterator(b) + defer readPool.ReturnIterator(iter) + v, err := value.ReadJSONIter(iter) + if err != nil { + return PathElement{}, err + } + return PathElement{Value: &v}, nil + case peKeySepBytes[0]: + iter := readPool.BorrowIterator(b) + defer readPool.ReturnIterator(iter) + fields := value.FieldList{} + + iter.ReadObjectCB(func(iter *jsoniter.Iterator, key string) bool { + v, err := value.ReadJSONIter(iter) + if err != nil { + iter.Error = err + return false + } + fields = append(fields, value.Field{Name: key, Value: v}) + return true + }) + fields.Sort() + return PathElement{Key: &fields}, iter.Error + case peIndexSepBytes[0]: + i, err := strconv.Atoi(s[2:]) + if err != nil { + return PathElement{}, err + } + return PathElement{ + Index: &i, + }, nil + default: + return PathElement{}, ErrUnknownPathElementType + } +} + +var ( + readPool = jsoniter.NewIterator(jsoniter.ConfigCompatibleWithStandardLibrary).Pool() + writePool = jsoniter.NewStream(jsoniter.ConfigCompatibleWithStandardLibrary, nil, 1024).Pool() +) + +// SerializePathElement serializes a path element +func SerializePathElement(pe PathElement) (string, error) { + buf := strings.Builder{} + err := serializePathElementToWriter(&buf, pe) + return buf.String(), err +} + +func serializePathElementToWriter(w io.Writer, pe PathElement) error { + stream := writePool.BorrowStream(w) + defer writePool.ReturnStream(stream) + switch { + case pe.FieldName != nil: + if _, err := stream.Write(peFieldSepBytes); err != nil { + return err + } + stream.WriteRaw(*pe.FieldName) + case pe.Key != nil: + if _, err := stream.Write(peKeySepBytes); err != nil { + return err + } + stream.WriteObjectStart() + + for i, field := range *pe.Key { + if i > 0 { + stream.WriteMore() + } + stream.WriteObjectField(field.Name) + value.WriteJSONStream(field.Value, stream) + } + stream.WriteObjectEnd() + case pe.Value != nil: + if _, err := stream.Write(peValueSepBytes); err != nil { + return err + } + value.WriteJSONStream(*pe.Value, stream) + case pe.Index != nil: + if _, err := stream.Write(peIndexSepBytes); err != nil { + return err + } + stream.WriteInt(*pe.Index) + default: + return errors.New("invalid PathElement") + } + b := stream.Buffer() + err := stream.Flush() + // Help jsoniter manage its buffers--without this, the next + // use of the stream is likely to require an allocation. Look + // at the jsoniter stream code to understand why. They were probably + // optimizing for folks using the buffer directly. + stream.SetBuffer(b[:0]) + return err +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/serialize.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/serialize.go new file mode 100644 index 000000000..b992b93c5 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/serialize.go @@ -0,0 +1,238 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "bytes" + "io" + "unsafe" + + jsoniter "github.com/json-iterator/go" +) + +func (s *Set) ToJSON() ([]byte, error) { + buf := bytes.Buffer{} + err := s.ToJSONStream(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (s *Set) ToJSONStream(w io.Writer) error { + stream := writePool.BorrowStream(w) + defer writePool.ReturnStream(stream) + + var r reusableBuilder + + stream.WriteObjectStart() + err := s.emitContentsV1(false, stream, &r) + if err != nil { + return err + } + stream.WriteObjectEnd() + return stream.Flush() +} + +func manageMemory(stream *jsoniter.Stream) error { + // Help jsoniter manage its buffers--without this, it does a bunch of + // alloctaions that are not necessary. They were probably optimizing + // for folks using the buffer directly. + b := stream.Buffer() + if len(b) > 4096 || cap(b)-len(b) < 2048 { + if err := stream.Flush(); err != nil { + return err + } + stream.SetBuffer(b[:0]) + } + return nil +} + +type reusableBuilder struct { + bytes.Buffer +} + +func (r *reusableBuilder) unsafeString() string { + b := r.Bytes() + return *(*string)(unsafe.Pointer(&b)) +} + +func (r *reusableBuilder) reset() *bytes.Buffer { + r.Reset() + return &r.Buffer +} + +func (s *Set) emitContentsV1(includeSelf bool, stream *jsoniter.Stream, r *reusableBuilder) error { + mi, ci := 0, 0 + first := true + preWrite := func() { + if first { + first = false + return + } + stream.WriteMore() + } + + if includeSelf && !(len(s.Members.members) == 0 && len(s.Children.members) == 0) { + preWrite() + stream.WriteObjectField(".") + stream.WriteEmptyObject() + } + + for mi < len(s.Members.members) && ci < len(s.Children.members) { + mpe := s.Members.members[mi] + cpe := s.Children.members[ci].pathElement + + if c := mpe.Compare(cpe); c < 0 { + preWrite() + if err := serializePathElementToWriter(r.reset(), mpe); err != nil { + return err + } + stream.WriteObjectField(r.unsafeString()) + stream.WriteEmptyObject() + mi++ + } else if c > 0 { + preWrite() + if err := serializePathElementToWriter(r.reset(), cpe); err != nil { + return err + } + stream.WriteObjectField(r.unsafeString()) + stream.WriteObjectStart() + if err := s.Children.members[ci].set.emitContentsV1(false, stream, r); err != nil { + return err + } + stream.WriteObjectEnd() + ci++ + } else { + preWrite() + if err := serializePathElementToWriter(r.reset(), cpe); err != nil { + return err + } + stream.WriteObjectField(r.unsafeString()) + stream.WriteObjectStart() + if err := s.Children.members[ci].set.emitContentsV1(true, stream, r); err != nil { + return err + } + stream.WriteObjectEnd() + mi++ + ci++ + } + } + + for mi < len(s.Members.members) { + mpe := s.Members.members[mi] + + preWrite() + if err := serializePathElementToWriter(r.reset(), mpe); err != nil { + return err + } + stream.WriteObjectField(r.unsafeString()) + stream.WriteEmptyObject() + mi++ + } + + for ci < len(s.Children.members) { + cpe := s.Children.members[ci].pathElement + + preWrite() + if err := serializePathElementToWriter(r.reset(), cpe); err != nil { + return err + } + stream.WriteObjectField(r.unsafeString()) + stream.WriteObjectStart() + if err := s.Children.members[ci].set.emitContentsV1(false, stream, r); err != nil { + return err + } + stream.WriteObjectEnd() + ci++ + } + + return manageMemory(stream) +} + +// FromJSON clears s and reads a JSON formatted set structure. +func (s *Set) FromJSON(r io.Reader) error { + // The iterator pool is completely useless for memory management, grrr. + iter := jsoniter.Parse(jsoniter.ConfigCompatibleWithStandardLibrary, r, 4096) + + found, _ := readIterV1(iter) + if found == nil { + *s = Set{} + } else { + *s = *found + } + return iter.Error +} + +// returns true if this subtree is also (or only) a member of parent; s is nil +// if there are no further children. +func readIterV1(iter *jsoniter.Iterator) (children *Set, isMember bool) { + iter.ReadMapCB(func(iter *jsoniter.Iterator, key string) bool { + if key == "." { + isMember = true + iter.Skip() + return true + } + pe, err := DeserializePathElement(key) + if err == ErrUnknownPathElementType { + // Ignore these-- a future version maybe knows what + // they are. We drop these completely rather than try + // to preserve things we don't understand. + iter.Skip() + return true + } else if err != nil { + iter.ReportError("parsing key as path element", err.Error()) + iter.Skip() + return true + } + grandchildren, childIsMember := readIterV1(iter) + if childIsMember { + if children == nil { + children = &Set{} + } + m := &children.Members.members + // Since we expect that most of the time these will have been + // serialized in the right order, we just verify that and append. + appendOK := len(*m) == 0 || (*m)[len(*m)-1].Less(pe) + if appendOK { + *m = append(*m, pe) + } else { + children.Members.Insert(pe) + } + } + if grandchildren != nil { + if children == nil { + children = &Set{} + } + // Since we expect that most of the time these will have been + // serialized in the right order, we just verify that and append. + m := &children.Children.members + appendOK := len(*m) == 0 || (*m)[len(*m)-1].pathElement.Less(pe) + if appendOK { + *m = append(*m, setNode{pe, grandchildren}) + } else { + *children.Children.Descend(pe) = *grandchildren + } + } + return true + }) + if children == nil { + isMember = true + } + + return children, isMember +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/set.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/set.go new file mode 100644 index 000000000..029c2b600 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/set.go @@ -0,0 +1,406 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "sort" + "strings" +) + +// Set identifies a set of fields. +type Set struct { + // Members lists fields that are part of the set. + // TODO: will be serialized as a list of path elements. + Members PathElementSet + + // Children lists child fields which themselves have children that are + // members of the set. Appearance in this list does not imply membership. + // Note: this is a tree, not an arbitrary graph. + Children SetNodeMap +} + +// NewSet makes a set from a list of paths. +func NewSet(paths ...Path) *Set { + s := &Set{} + for _, p := range paths { + s.Insert(p) + } + return s +} + +// Insert adds the field identified by `p` to the set. Important: parent fields +// are NOT added to the set; if that is desired, they must be added separately. +func (s *Set) Insert(p Path) { + if len(p) == 0 { + // Zero-length path identifies the entire object; we don't + // track top-level ownership. + return + } + for { + if len(p) == 1 { + s.Members.Insert(p[0]) + return + } + s = s.Children.Descend(p[0]) + p = p[1:] + } +} + +// Union returns a Set containing elements which appear in either s or s2. +func (s *Set) Union(s2 *Set) *Set { + return &Set{ + Members: *s.Members.Union(&s2.Members), + Children: *s.Children.Union(&s2.Children), + } +} + +// Intersection returns a Set containing leaf elements which appear in both s +// and s2. Intersection can be constructed from Union and Difference operations +// (example in the tests) but it's much faster to do it in one pass. +func (s *Set) Intersection(s2 *Set) *Set { + return &Set{ + Members: *s.Members.Intersection(&s2.Members), + Children: *s.Children.Intersection(&s2.Children), + } +} + +// Difference returns a Set containing elements which: +// * appear in s +// * do not appear in s2 +// +// In other words, for leaf fields, this acts like a regular set difference +// operation. When non leaf fields are compared with leaf fields ("parents" +// which contain "children"), the effect is: +// * parent - child = parent +// * child - parent = {empty set} +func (s *Set) Difference(s2 *Set) *Set { + return &Set{ + Members: *s.Members.Difference(&s2.Members), + Children: *s.Children.Difference(s2), + } +} + +// RecursiveDifference returns a Set containing elements which: +// * appear in s +// * do not appear in s2 +// +// Compared to a regular difference, +// this removes every field **and its children** from s that is contained in s2. +// +// For example, with s containing `a.b.c` and s2 containing `a.b`, +// a RecursiveDifference will result in `a`, as the entire node `a.b` gets removed. +func (s *Set) RecursiveDifference(s2 *Set) *Set { + return &Set{ + Members: *s.Members.Difference(&s2.Members), + Children: *s.Children.RecursiveDifference(s2), + } +} + +// Size returns the number of members of the set. +func (s *Set) Size() int { + return s.Members.Size() + s.Children.Size() +} + +// Empty returns true if there are no members of the set. It is a separate +// function from Size since it's common to check whether size > 0, and +// potentially much faster to return as soon as a single element is found. +func (s *Set) Empty() bool { + if s.Members.Size() > 0 { + return false + } + return s.Children.Empty() +} + +// Has returns true if the field referenced by `p` is a member of the set. +func (s *Set) Has(p Path) bool { + if len(p) == 0 { + // No one owns "the entire object" + return false + } + for { + if len(p) == 1 { + return s.Members.Has(p[0]) + } + var ok bool + s, ok = s.Children.Get(p[0]) + if !ok { + return false + } + p = p[1:] + } +} + +// Equals returns true if s and s2 have exactly the same members. +func (s *Set) Equals(s2 *Set) bool { + return s.Members.Equals(&s2.Members) && s.Children.Equals(&s2.Children) +} + +// String returns the set one element per line. +func (s *Set) String() string { + elements := []string{} + s.Iterate(func(p Path) { + elements = append(elements, p.String()) + }) + return strings.Join(elements, "\n") +} + +// Iterate calls f once for each field that is a member of the set (preorder +// DFS). The path passed to f will be reused so make a copy if you wish to keep +// it. +func (s *Set) Iterate(f func(Path)) { + s.iteratePrefix(Path{}, f) +} + +func (s *Set) iteratePrefix(prefix Path, f func(Path)) { + s.Members.Iterate(func(pe PathElement) { f(append(prefix, pe)) }) + s.Children.iteratePrefix(prefix, f) +} + +// WithPrefix returns the subset of paths which begin with the given prefix, +// with the prefix not included. +func (s *Set) WithPrefix(pe PathElement) *Set { + subset, ok := s.Children.Get(pe) + if !ok { + return NewSet() + } + return subset +} + +// setNode is a pair of PathElement / Set, for the purpose of expressing +// nested set membership. +type setNode struct { + pathElement PathElement + set *Set +} + +// SetNodeMap is a map of PathElement to subset. +type SetNodeMap struct { + members sortedSetNode +} + +type sortedSetNode []setNode + +// Implement the sort interface; this would permit bulk creation, which would +// be faster than doing it one at a time via Insert. +func (s sortedSetNode) Len() int { return len(s) } +func (s sortedSetNode) Less(i, j int) bool { return s[i].pathElement.Less(s[j].pathElement) } +func (s sortedSetNode) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// Descend adds pe to the set if necessary, returning the associated subset. +func (s *SetNodeMap) Descend(pe PathElement) *Set { + loc := sort.Search(len(s.members), func(i int) bool { + return !s.members[i].pathElement.Less(pe) + }) + if loc == len(s.members) { + s.members = append(s.members, setNode{pathElement: pe, set: &Set{}}) + return s.members[loc].set + } + if s.members[loc].pathElement.Equals(pe) { + return s.members[loc].set + } + s.members = append(s.members, setNode{}) + copy(s.members[loc+1:], s.members[loc:]) + s.members[loc] = setNode{pathElement: pe, set: &Set{}} + return s.members[loc].set +} + +// Size returns the sum of the number of members of all subsets. +func (s *SetNodeMap) Size() int { + count := 0 + for _, v := range s.members { + count += v.set.Size() + } + return count +} + +// Empty returns false if there's at least one member in some child set. +func (s *SetNodeMap) Empty() bool { + for _, n := range s.members { + if !n.set.Empty() { + return false + } + } + return true +} + +// Get returns (the associated set, true) or (nil, false) if there is none. +func (s *SetNodeMap) Get(pe PathElement) (*Set, bool) { + loc := sort.Search(len(s.members), func(i int) bool { + return !s.members[i].pathElement.Less(pe) + }) + if loc == len(s.members) { + return nil, false + } + if s.members[loc].pathElement.Equals(pe) { + return s.members[loc].set, true + } + return nil, false +} + +// Equals returns true if s and s2 have the same structure (same nested +// child sets). +func (s *SetNodeMap) Equals(s2 *SetNodeMap) bool { + if len(s.members) != len(s2.members) { + return false + } + for i := range s.members { + if !s.members[i].pathElement.Equals(s2.members[i].pathElement) { + return false + } + if !s.members[i].set.Equals(s2.members[i].set) { + return false + } + } + return true +} + +// Union returns a SetNodeMap with members that appear in either s or s2. +func (s *SetNodeMap) Union(s2 *SetNodeMap) *SetNodeMap { + out := &SetNodeMap{} + + i, j := 0, 0 + for i < len(s.members) && j < len(s2.members) { + if s.members[i].pathElement.Less(s2.members[j].pathElement) { + out.members = append(out.members, s.members[i]) + i++ + } else { + if !s2.members[j].pathElement.Less(s.members[i].pathElement) { + out.members = append(out.members, setNode{pathElement: s.members[i].pathElement, set: s.members[i].set.Union(s2.members[j].set)}) + i++ + } else { + out.members = append(out.members, s2.members[j]) + } + j++ + } + } + + if i < len(s.members) { + out.members = append(out.members, s.members[i:]...) + } + if j < len(s2.members) { + out.members = append(out.members, s2.members[j:]...) + } + return out +} + +// Intersection returns a SetNodeMap with members that appear in both s and s2. +func (s *SetNodeMap) Intersection(s2 *SetNodeMap) *SetNodeMap { + out := &SetNodeMap{} + + i, j := 0, 0 + for i < len(s.members) && j < len(s2.members) { + if s.members[i].pathElement.Less(s2.members[j].pathElement) { + i++ + } else { + if !s2.members[j].pathElement.Less(s.members[i].pathElement) { + res := s.members[i].set.Intersection(s2.members[j].set) + if !res.Empty() { + out.members = append(out.members, setNode{pathElement: s.members[i].pathElement, set: res}) + } + i++ + } + j++ + } + } + return out +} + +// Difference returns a SetNodeMap with members that appear in s but not in s2. +func (s *SetNodeMap) Difference(s2 *Set) *SetNodeMap { + out := &SetNodeMap{} + + i, j := 0, 0 + for i < len(s.members) && j < len(s2.Children.members) { + if s.members[i].pathElement.Less(s2.Children.members[j].pathElement) { + out.members = append(out.members, setNode{pathElement: s.members[i].pathElement, set: s.members[i].set}) + i++ + } else { + if !s2.Children.members[j].pathElement.Less(s.members[i].pathElement) { + + diff := s.members[i].set.Difference(s2.Children.members[j].set) + // We aren't permitted to add nodes with no elements. + if !diff.Empty() { + out.members = append(out.members, setNode{pathElement: s.members[i].pathElement, set: diff}) + } + + i++ + } + j++ + } + } + + if i < len(s.members) { + out.members = append(out.members, s.members[i:]...) + } + return out +} + +// RecursiveDifference returns a SetNodeMap with members that appear in s but not in s2. +// +// Compared to a regular difference, +// this removes every field **and its children** from s that is contained in s2. +// +// For example, with s containing `a.b.c` and s2 containing `a.b`, +// a RecursiveDifference will result in `a`, as the entire node `a.b` gets removed. +func (s *SetNodeMap) RecursiveDifference(s2 *Set) *SetNodeMap { + out := &SetNodeMap{} + + i, j := 0, 0 + for i < len(s.members) && j < len(s2.Children.members) { + if s.members[i].pathElement.Less(s2.Children.members[j].pathElement) { + if !s2.Members.Has(s.members[i].pathElement) { + out.members = append(out.members, setNode{pathElement: s.members[i].pathElement, set: s.members[i].set}) + } + i++ + } else { + if !s2.Children.members[j].pathElement.Less(s.members[i].pathElement) { + if !s2.Members.Has(s.members[i].pathElement) { + diff := s.members[i].set.RecursiveDifference(s2.Children.members[j].set) + if !diff.Empty() { + out.members = append(out.members, setNode{pathElement: s.members[i].pathElement, set: diff}) + } + } + i++ + } + j++ + } + } + + if i < len(s.members) { + for _, c := range s.members[i:] { + if !s2.Members.Has(c.pathElement) { + out.members = append(out.members, c) + } + } + } + + return out +} + +// Iterate calls f for each PathElement in the set. +func (s *SetNodeMap) Iterate(f func(PathElement)) { + for _, n := range s.members { + f(n.pathElement) + } +} + +func (s *SetNodeMap) iteratePrefix(prefix Path, f func(Path)) { + for _, n := range s.members { + pe := n.pathElement + n.set.iteratePrefix(append(prefix, pe), f) + } +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go new file mode 100644 index 000000000..75a492d8e --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go @@ -0,0 +1,121 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package merge + +import ( + "fmt" + "sort" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// Conflict is a conflict on a specific field with the current manager of +// that field. It does implement the error interface so that it can be +// used as an error. +type Conflict struct { + Manager string + Path fieldpath.Path +} + +// Conflict is an error. +var _ error = Conflict{} + +// Error formats the conflict as an error. +func (c Conflict) Error() string { + return fmt.Sprintf("conflict with %q: %v", c.Manager, c.Path) +} + +// Equals returns true if c == c2 +func (c Conflict) Equals(c2 Conflict) bool { + if c.Manager != c2.Manager { + return false + } + return c.Path.Equals(c2.Path) +} + +// Conflicts accumulates multiple conflicts and aggregates them by managers. +type Conflicts []Conflict + +var _ error = Conflicts{} + +// Error prints the list of conflicts, grouped by sorted managers. +func (conflicts Conflicts) Error() string { + if len(conflicts) == 1 { + return conflicts[0].Error() + } + + m := map[string][]fieldpath.Path{} + for _, conflict := range conflicts { + m[conflict.Manager] = append(m[conflict.Manager], conflict.Path) + } + + managers := []string{} + for manager := range m { + managers = append(managers, manager) + } + + // Print conflicts by sorted managers. + sort.Strings(managers) + + messages := []string{} + for _, manager := range managers { + messages = append(messages, fmt.Sprintf("conflicts with %q:", manager)) + for _, path := range m[manager] { + messages = append(messages, fmt.Sprintf("- %v", path)) + } + } + return strings.Join(messages, "\n") +} + +// Equals returns true if the lists of conflicts are the same. +func (c Conflicts) Equals(c2 Conflicts) bool { + if len(c) != len(c2) { + return false + } + for i := range c { + if !c[i].Equals(c2[i]) { + return false + } + } + return true +} + +// ToSet aggregates conflicts for all managers into a single Set. +func (c Conflicts) ToSet() *fieldpath.Set { + set := fieldpath.NewSet() + for _, conflict := range []Conflict(c) { + set.Insert(conflict.Path) + } + return set +} + +// ConflictsFromManagers creates a list of conflicts given Managers sets. +func ConflictsFromManagers(sets fieldpath.ManagedFields) Conflicts { + conflicts := []Conflict{} + + for manager, set := range sets { + set.Set().Iterate(func(p fieldpath.Path) { + conflicts = append(conflicts, Conflict{ + Manager: manager, + Path: p, + }) + }) + } + + return conflicts +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go new file mode 100644 index 000000000..73e0ec73f --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go @@ -0,0 +1,329 @@ +/* +Copyright 2018 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package merge + +import ( + "fmt" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +// Converter is an interface to the conversion logic. The converter +// needs to be able to convert objects from one version to another. +type Converter interface { + Convert(object *typed.TypedValue, version fieldpath.APIVersion) (*typed.TypedValue, error) + IsMissingVersionError(error) bool +} + +// Updater is the object used to compute updated FieldSets and also +// merge the object on Apply. +type Updater struct { + Converter Converter + IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set + + enableUnions bool +} + +// EnableUnionFeature turns on union handling. It is disabled by default until the +// feature is complete. +func (s *Updater) EnableUnionFeature() { + s.enableUnions = true +} + +func (s *Updater) update(oldObject, newObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, workflow string, force bool) (fieldpath.ManagedFields, *typed.Comparison, error) { + conflicts := fieldpath.ManagedFields{} + removed := fieldpath.ManagedFields{} + compare, err := oldObject.Compare(newObject) + if err != nil { + return nil, nil, fmt.Errorf("failed to compare objects: %v", err) + } + + versions := map[fieldpath.APIVersion]*typed.Comparison{ + version: compare.ExcludeFields(s.IgnoredFields[version]), + } + + for manager, managerSet := range managers { + if manager == workflow { + continue + } + compare, ok := versions[managerSet.APIVersion()] + if !ok { + var err error + versionedOldObject, err := s.Converter.Convert(oldObject, managerSet.APIVersion()) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + delete(managers, manager) + continue + } + return nil, nil, fmt.Errorf("failed to convert old object: %v", err) + } + versionedNewObject, err := s.Converter.Convert(newObject, managerSet.APIVersion()) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + delete(managers, manager) + continue + } + return nil, nil, fmt.Errorf("failed to convert new object: %v", err) + } + compare, err = versionedOldObject.Compare(versionedNewObject) + if err != nil { + return nil, nil, fmt.Errorf("failed to compare objects: %v", err) + } + versions[managerSet.APIVersion()] = compare.ExcludeFields(s.IgnoredFields[managerSet.APIVersion()]) + } + + conflictSet := managerSet.Set().Intersection(compare.Modified.Union(compare.Added)) + if !conflictSet.Empty() { + conflicts[manager] = fieldpath.NewVersionedSet(conflictSet, managerSet.APIVersion(), false) + } + + if !compare.Removed.Empty() { + removed[manager] = fieldpath.NewVersionedSet(compare.Removed, managerSet.APIVersion(), false) + } + } + + if !force && len(conflicts) != 0 { + return nil, nil, ConflictsFromManagers(conflicts) + } + + for manager, conflictSet := range conflicts { + managers[manager] = fieldpath.NewVersionedSet(managers[manager].Set().Difference(conflictSet.Set()), managers[manager].APIVersion(), managers[manager].Applied()) + } + + for manager, removedSet := range removed { + managers[manager] = fieldpath.NewVersionedSet(managers[manager].Set().Difference(removedSet.Set()), managers[manager].APIVersion(), managers[manager].Applied()) + } + + for manager := range managers { + if managers[manager].Set().Empty() { + delete(managers, manager) + } + } + + return managers, compare, nil +} + +// Update is the method you should call once you've merged your final +// object on CREATE/UPDATE/PATCH verbs. newObject must be the object +// that you intend to persist (after applying the patch if this is for a +// PATCH call), and liveObject must be the original object (empty if +// this is a CREATE call). +func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, manager string) (*typed.TypedValue, fieldpath.ManagedFields, error) { + var err error + managers, err = s.reconcileManagedFieldsWithSchemaChanges(liveObject, managers) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + if s.enableUnions { + newObject, err = liveObject.NormalizeUnions(newObject) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + } + managers, compare, err := s.update(liveObject, newObject, version, managers, manager, true) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + if _, ok := managers[manager]; !ok { + managers[manager] = fieldpath.NewVersionedSet(fieldpath.NewSet(), version, false) + } + + ignored := s.IgnoredFields[version] + if ignored == nil { + ignored = fieldpath.NewSet() + } + managers[manager] = fieldpath.NewVersionedSet( + managers[manager].Set().Union(compare.Modified).Union(compare.Added).Difference(compare.Removed).RecursiveDifference(ignored), + version, + false, + ) + if managers[manager].Set().Empty() { + delete(managers, manager) + } + return newObject, managers, nil +} + +// Apply should be called when Apply is run, given the current object as +// well as the configuration that is applied. This will merge the object +// and return it. If the object hasn't changed, nil is returned (the +// managers can still have changed though). +func (s *Updater) Apply(liveObject, configObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, manager string, force bool) (*typed.TypedValue, fieldpath.ManagedFields, error) { + var err error + managers, err = s.reconcileManagedFieldsWithSchemaChanges(liveObject, managers) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + if s.enableUnions { + configObject, err = configObject.NormalizeUnionsApply(configObject) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + } + newObject, err := liveObject.Merge(configObject) + if err != nil { + return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to merge config: %v", err) + } + if s.enableUnions { + newObject, err = configObject.NormalizeUnionsApply(newObject) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + } + lastSet := managers[manager] + set, err := configObject.ToFieldSet() + if err != nil { + return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to get field set: %v", err) + } + + ignored := s.IgnoredFields[version] + if ignored != nil { + set = set.RecursiveDifference(ignored) + // TODO: is this correct. If we don't remove from lastSet pruning might remove the fields? + if lastSet != nil { + lastSet.Set().RecursiveDifference(ignored) + } + } + managers[manager] = fieldpath.NewVersionedSet(set, version, true) + newObject, err = s.prune(newObject, managers, manager, lastSet) + if err != nil { + return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to prune fields: %v", err) + } + managers, compare, err := s.update(liveObject, newObject, version, managers, manager, force) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + if compare.IsSame() { + newObject = nil + } + return newObject, managers, nil +} + +// prune will remove a field, list or map item, iff: +// * applyingManager applied it last time +// * applyingManager didn't apply it this time +// * no other applier claims to manage it +func (s *Updater) prune(merged *typed.TypedValue, managers fieldpath.ManagedFields, applyingManager string, lastSet fieldpath.VersionedSet) (*typed.TypedValue, error) { + if lastSet == nil || lastSet.Set().Empty() { + return merged, nil + } + convertedMerged, err := s.Converter.Convert(merged, lastSet.APIVersion()) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + return merged, nil + } + return nil, fmt.Errorf("failed to convert merged object to last applied version: %v", err) + } + + pruned := convertedMerged.RemoveItems(lastSet.Set()) + pruned, err = s.addBackOwnedItems(convertedMerged, pruned, managers, applyingManager) + if err != nil { + return nil, fmt.Errorf("failed add back owned items: %v", err) + } + pruned, err = s.addBackDanglingItems(convertedMerged, pruned, lastSet) + if err != nil { + return nil, fmt.Errorf("failed add back dangling items: %v", err) + } + return s.Converter.Convert(pruned, managers[applyingManager].APIVersion()) +} + +// addBackOwnedItems adds back any fields, list and map items that were removed by prune, +// but other appliers or updaters (or the current applier's new config) claim to own. +func (s *Updater) addBackOwnedItems(merged, pruned *typed.TypedValue, managedFields fieldpath.ManagedFields, applyingManager string) (*typed.TypedValue, error) { + var err error + managedAtVersion := map[fieldpath.APIVersion]*fieldpath.Set{} + for _, managerSet := range managedFields { + if _, ok := managedAtVersion[managerSet.APIVersion()]; !ok { + managedAtVersion[managerSet.APIVersion()] = fieldpath.NewSet() + } + managedAtVersion[managerSet.APIVersion()] = managedAtVersion[managerSet.APIVersion()].Union(managerSet.Set()) + } + for version, managed := range managedAtVersion { + merged, err = s.Converter.Convert(merged, version) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + continue + } + return nil, fmt.Errorf("failed to convert merged object at version %v: %v", version, err) + } + pruned, err = s.Converter.Convert(pruned, version) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + continue + } + return nil, fmt.Errorf("failed to convert pruned object at version %v: %v", version, err) + } + mergedSet, err := merged.ToFieldSet() + if err != nil { + return nil, fmt.Errorf("failed to create field set from merged object at version %v: %v", version, err) + } + prunedSet, err := pruned.ToFieldSet() + if err != nil { + return nil, fmt.Errorf("failed to create field set from pruned object at version %v: %v", version, err) + } + pruned = merged.RemoveItems(mergedSet.Difference(prunedSet.Union(managed))) + } + return pruned, nil +} + +// addBackDanglingItems makes sure that the fields list and map items removed by prune were +// previously owned by the currently applying manager. This will add back fields list and map items +// that are unowned or that are owned by Updaters and shouldn't be removed. +func (s *Updater) addBackDanglingItems(merged, pruned *typed.TypedValue, lastSet fieldpath.VersionedSet) (*typed.TypedValue, error) { + convertedPruned, err := s.Converter.Convert(pruned, lastSet.APIVersion()) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + return merged, nil + } + return nil, fmt.Errorf("failed to convert pruned object to last applied version: %v", err) + } + prunedSet, err := convertedPruned.ToFieldSet() + if err != nil { + return nil, fmt.Errorf("failed to create field set from pruned object in last applied version: %v", err) + } + mergedSet, err := merged.ToFieldSet() + if err != nil { + return nil, fmt.Errorf("failed to create field set from merged object in last applied version: %v", err) + } + return merged.RemoveItems(mergedSet.Difference(prunedSet).Intersection(lastSet.Set())), nil +} + +// reconcileManagedFieldsWithSchemaChanges reconciles the managed fields with any changes to the +// object's schema since the managed fields were written. +// +// Supports: +// - changing types from atomic to granular +// - changing types from granular to atomic +func (s *Updater) reconcileManagedFieldsWithSchemaChanges(liveObject *typed.TypedValue, managers fieldpath.ManagedFields) (fieldpath.ManagedFields, error) { + result := fieldpath.ManagedFields{} + for manager, versionedSet := range managers { + tv, err := s.Converter.Convert(liveObject, versionedSet.APIVersion()) + if s.Converter.IsMissingVersionError(err) { // okay to skip, obsolete versions will be deleted automatically anyway + continue + } + if err != nil { + return nil, err + } + reconciled, err := typed.ReconcileFieldSetWithSchema(versionedSet.Set(), tv) + if err != nil { + return nil, err + } + if reconciled != nil { + result[manager] = fieldpath.NewVersionedSet(reconciled, versionedSet.APIVersion(), versionedSet.Applied()) + } else { + result[manager] = versionedSet + } + } + return result, nil +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/doc.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/doc.go new file mode 100644 index 000000000..9081ccbc7 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/doc.go @@ -0,0 +1,28 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package schema defines a targeted schema language which allows one to +// represent all the schema information necessary to perform "structured" +// merges and diffs. +// +// Due to the targeted nature of the data model, the schema language can fit in +// just a few hundred lines of go code, making it much more understandable and +// concise than e.g. OpenAPI. +// +// This schema was derived by observing the API objects used by Kubernetes, and +// formalizing a model which allows certain operations ("apply") to be more +// well defined. It is currently missing one feature: one-of ("unions"). +package schema diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go new file mode 100644 index 000000000..01103b38a --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go @@ -0,0 +1,261 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import "sync" + +// Schema is a list of named types. +// +// Schema types are indexed in a map before the first search so this type +// should be considered immutable. +type Schema struct { + Types []TypeDef `yaml:"types,omitempty"` + + once sync.Once + m map[string]TypeDef +} + +// A TypeSpecifier references a particular type in a schema. +type TypeSpecifier struct { + Type TypeRef `yaml:"type,omitempty"` + Schema Schema `yaml:"schema,omitempty"` +} + +// TypeDef represents a named type in a schema. +type TypeDef struct { + // Top level types should be named. Every type must have a unique name. + Name string `yaml:"name,omitempty"` + + Atom `yaml:"atom,omitempty,inline"` +} + +// TypeRef either refers to a named type or declares an inlined type. +type TypeRef struct { + // Either the name or one member of Atom should be set. + NamedType *string `yaml:"namedType,omitempty"` + Inlined Atom `yaml:",inline,omitempty"` +} + +// Atom represents the smallest possible pieces of the type system. +// Each set field in the Atom represents a possible type for the object. +// If none of the fields are set, any object will fail validation against the atom. +type Atom struct { + *Scalar `yaml:"scalar,omitempty"` + *List `yaml:"list,omitempty"` + *Map `yaml:"map,omitempty"` +} + +// Scalar (AKA "primitive") represents a type which has a single value which is +// either numeric, string, or boolean. +// +// TODO: split numeric into float/int? Something even more fine-grained? +type Scalar string + +const ( + Numeric = Scalar("numeric") + String = Scalar("string") + Boolean = Scalar("boolean") +) + +// ElementRelationship is an enum of the different possible relationships +// between the elements of container types (maps, lists). +type ElementRelationship string + +const ( + // Associative only applies to lists (see the documentation there). + Associative = ElementRelationship("associative") + // Atomic makes container types (lists, maps) behave + // as scalars / leaf fields + Atomic = ElementRelationship("atomic") + // Separable means the items of the container type have no particular + // relationship (default behavior for maps). + Separable = ElementRelationship("separable") +) + +// Map is a key-value pair. Its default semantics are the same as an +// associative list, but: +// * It is serialized differently: +// map: {"k": {"value": "v"}} +// list: [{"key": "k", "value": "v"}] +// * Keys must be string typed. +// * Keys can't have multiple components. +// +// Optionally, maps may be atomic (for example, imagine representing an RGB +// color value--it doesn't make sense to have different actors own the R and G +// values). +// +// Maps may also represent a type which is composed of a number of different fields. +// Each field has a name and a type. +// +// Fields are indexed in a map before the first search so this type +// should be considered immutable. +type Map struct { + // Each struct field appears exactly once in this list. The order in + // this list defines the canonical field ordering. + Fields []StructField `yaml:"fields,omitempty"` + + // A Union is a grouping of fields with special rules. It may refer to + // one or more fields in the above list. A given field from the above + // list may be referenced in exactly 0 or 1 places in the below list. + // One can have multiple unions in the same struct, but the fields can't + // overlap between unions. + Unions []Union `yaml:"unions,omitempty"` + + // ElementType is the type of the structs's unknown fields. + ElementType TypeRef `yaml:"elementType,omitempty"` + + // ElementRelationship states the relationship between the map's items. + // * `separable` (or unset) implies that each element is 100% independent. + // * `atomic` implies that all elements depend on each other, and this + // is effectively a scalar / leaf field; it doesn't make sense for + // separate actors to set the elements. Example: an RGB color struct; + // it would never make sense to "own" only one component of the + // color. + // The default behavior for maps is `separable`; it's permitted to + // leave this unset to get the default behavior. + ElementRelationship ElementRelationship `yaml:"elementRelationship,omitempty"` + + once sync.Once + m map[string]StructField +} + +// FindField is a convenience function that returns the referenced StructField, +// if it exists, or (nil, false) if it doesn't. +func (m *Map) FindField(name string) (StructField, bool) { + m.once.Do(func() { + m.m = make(map[string]StructField, len(m.Fields)) + for _, field := range m.Fields { + m.m[field.Name] = field + } + }) + sf, ok := m.m[name] + return sf, ok +} + +// UnionFields are mapping between the fields that are part of the union and +// their discriminated value. The discriminated value has to be set, and +// should not conflict with other discriminated value in the list. +type UnionField struct { + // FieldName is the name of the field that is part of the union. This + // is the serialized form of the field. + FieldName string `yaml:"fieldName"` + // Discriminatorvalue is the value of the discriminator to + // select that field. If the union doesn't have a discriminator, + // this field is ignored. + DiscriminatorValue string `yaml:"discriminatorValue"` +} + +// Union, or oneof, means that only one of multiple fields of a structure can be +// set at a time. Setting the discriminator helps clearing oher fields: +// - If discriminator changed to non-nil, and a new field has been added +// that doesn't match, an error is returned, +// - If discriminator hasn't changed and two fields or more are set, an +// error is returned, +// - If discriminator changed to non-nil, all other fields but the +// discriminated one will be cleared, +// - Otherwise, If only one field is left, update discriminator to that value. +type Union struct { + // Discriminator, if present, is the name of the field that + // discriminates fields in the union. The mapping between the value of + // the discriminator and the field is done by using the Fields list + // below. + Discriminator *string `yaml:"discriminator,omitempty"` + + // DeduceInvalidDiscriminator indicates if the discriminator + // should be updated automatically based on the fields set. This + // typically defaults to false since we don't want to deduce by + // default (the behavior exists to maintain compatibility on + // existing types and shouldn't be used for new types). + DeduceInvalidDiscriminator bool `yaml:"deduceInvalidDiscriminator,omitempty"` + + // This is the list of fields that belong to this union. All the + // fields present in here have to be part of the parent + // structure. Discriminator (if oneOf has one), is NOT included in + // this list. The value for field is how we map the name of the field + // to actual value for discriminator. + Fields []UnionField `yaml:"fields,omitempty"` +} + +// StructField pairs a field name with a field type. +type StructField struct { + // Name is the field name. + Name string `yaml:"name,omitempty"` + // Type is the field type. + Type TypeRef `yaml:"type,omitempty"` + // Default value for the field, nil if not present. + Default interface{} `yaml:"default,omitempty"` +} + +// List represents a type which contains a zero or more elements, all of the +// same subtype. Lists may be either associative: each element is more or less +// independent and could be managed by separate entities in the system; or +// atomic, where the elements are heavily dependent on each other: it is not +// sensible to change one element without considering the ramifications on all +// the other elements. +type List struct { + // ElementType is the type of the list's elements. + ElementType TypeRef `yaml:"elementType,omitempty"` + + // ElementRelationship states the relationship between the list's elements + // and must have one of these values: + // * `atomic`: the list is treated as a single entity, like a scalar. + // * `associative`: + // - If the list element is a scalar, the list is treated as a set. + // - If the list element is a map, the list is treated as a map. + // There is no default for this value for lists; all schemas must + // explicitly state the element relationship for all lists. + ElementRelationship ElementRelationship `yaml:"elementRelationship,omitempty"` + + // Iff ElementRelationship is `associative`, and the element type is + // map, then Keys must have non-zero length, and it lists the fields + // of the element's map type which are to be used as the keys of the + // list. + // + // TODO: change this to "non-atomic struct" above and make the code reflect this. + // + // Each key must refer to a single field name (no nesting, not JSONPath). + Keys []string `yaml:"keys,omitempty"` +} + +// FindNamedType is a convenience function that returns the referenced TypeDef, +// if it exists, or (nil, false) if it doesn't. +func (s *Schema) FindNamedType(name string) (TypeDef, bool) { + s.once.Do(func() { + s.m = make(map[string]TypeDef, len(s.Types)) + for _, t := range s.Types { + s.m[t.Name] = t + } + }) + t, ok := s.m[name] + return t, ok +} + +// Resolve is a convenience function which returns the atom referenced, whether +// it is inline or named. Returns (Atom{}, false) if the type can't be resolved. +// +// This allows callers to not care about the difference between a (possibly +// inlined) reference and a definition. +func (s *Schema) Resolve(tr TypeRef) (Atom, bool) { + if tr.NamedType != nil { + t, ok := s.FindNamedType(*tr.NamedType) + if !ok { + return Atom{}, false + } + return t.Atom, true + } + return tr.Inlined, true +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/equals.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/equals.go new file mode 100644 index 000000000..4c303eecc --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/equals.go @@ -0,0 +1,199 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import "reflect" + +// Equals returns true iff the two Schemas are equal. +func (a *Schema) Equals(b *Schema) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + + if len(a.Types) != len(b.Types) { + return false + } + for i := range a.Types { + if !a.Types[i].Equals(&b.Types[i]) { + return false + } + } + return true +} + +// Equals returns true iff the two TypeRefs are equal. +// +// Note that two typerefs that have an equivalent type but where one is +// inlined and the other is named, are not considered equal. +func (a *TypeRef) Equals(b *TypeRef) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if (a.NamedType == nil) != (b.NamedType == nil) { + return false + } + if a.NamedType != nil { + if *a.NamedType != *b.NamedType { + return false + } + //return true + } + return a.Inlined.Equals(&b.Inlined) +} + +// Equals returns true iff the two TypeDefs are equal. +func (a *TypeDef) Equals(b *TypeDef) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if a.Name != b.Name { + return false + } + return a.Atom.Equals(&b.Atom) +} + +// Equals returns true iff the two Atoms are equal. +func (a *Atom) Equals(b *Atom) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if (a.Scalar == nil) != (b.Scalar == nil) { + return false + } + if (a.List == nil) != (b.List == nil) { + return false + } + if (a.Map == nil) != (b.Map == nil) { + return false + } + switch { + case a.Scalar != nil: + return *a.Scalar == *b.Scalar + case a.List != nil: + return a.List.Equals(b.List) + case a.Map != nil: + return a.Map.Equals(b.Map) + } + return true +} + +// Equals returns true iff the two Maps are equal. +func (a *Map) Equals(b *Map) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if !a.ElementType.Equals(&b.ElementType) { + return false + } + if a.ElementRelationship != b.ElementRelationship { + return false + } + if len(a.Fields) != len(b.Fields) { + return false + } + for i := range a.Fields { + if !a.Fields[i].Equals(&b.Fields[i]) { + return false + } + } + if len(a.Unions) != len(b.Unions) { + return false + } + for i := range a.Unions { + if !a.Unions[i].Equals(&b.Unions[i]) { + return false + } + } + return true +} + +// Equals returns true iff the two Unions are equal. +func (a *Union) Equals(b *Union) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if (a.Discriminator == nil) != (b.Discriminator == nil) { + return false + } + if a.Discriminator != nil { + if *a.Discriminator != *b.Discriminator { + return false + } + } + if a.DeduceInvalidDiscriminator != b.DeduceInvalidDiscriminator { + return false + } + if len(a.Fields) != len(b.Fields) { + return false + } + for i := range a.Fields { + if !a.Fields[i].Equals(&b.Fields[i]) { + return false + } + } + return true +} + +// Equals returns true iff the two UnionFields are equal. +func (a *UnionField) Equals(b *UnionField) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if a.FieldName != b.FieldName { + return false + } + if a.DiscriminatorValue != b.DiscriminatorValue { + return false + } + return true +} + +// Equals returns true iff the two StructFields are equal. +func (a *StructField) Equals(b *StructField) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if a.Name != b.Name { + return false + } + if !reflect.DeepEqual(a.Default, b.Default) { + return false + } + return a.Type.Equals(&b.Type) +} + +// Equals returns true iff the two Lists are equal. +func (a *List) Equals(b *List) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if !a.ElementType.Equals(&b.ElementType) { + return false + } + if a.ElementRelationship != b.ElementRelationship { + return false + } + if len(a.Keys) != len(b.Keys) { + return false + } + for i := range a.Keys { + if a.Keys[i] != b.Keys[i] { + return false + } + } + return true +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go new file mode 100644 index 000000000..bb60e2a5f --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go @@ -0,0 +1,161 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +// SchemaSchemaYAML is a schema against which you can validate other schemas. +// It will validate itself. It can be unmarshalled into a Schema type. +var SchemaSchemaYAML = `types: +- name: schema + map: + fields: + - name: types + type: + list: + elementRelationship: associative + elementType: + namedType: typeDef + keys: + - name +- name: typeDef + map: + fields: + - name: name + type: + scalar: string + - name: scalar + type: + scalar: string + - name: map + type: + namedType: map + - name: list + type: + namedType: list + - name: untyped + type: + namedType: untyped +- name: typeRef + map: + fields: + - name: namedType + type: + scalar: string + - name: scalar + type: + scalar: string + - name: map + type: + namedType: map + - name: list + type: + namedType: list + - name: untyped + type: + namedType: untyped +- name: scalar + scalar: string +- name: map + map: + fields: + - name: fields + type: + list: + elementType: + namedType: structField + elementRelationship: associative + keys: [ "name" ] + - name: unions + type: + list: + elementType: + namedType: union + elementRelationship: atomic + - name: elementType + type: + namedType: typeRef + - name: elementRelationship + type: + scalar: string +- name: unionField + map: + fields: + - name: fieldName + type: + scalar: string + - name: discriminatorValue + type: + scalar: string +- name: union + map: + fields: + - name: discriminator + type: + scalar: string + - name: deduceInvalidDiscriminator + type: + scalar: bool + - name: fields + type: + list: + elementRelationship: associative + elementType: + namedType: unionField + keys: + - fieldName +- name: structField + map: + fields: + - name: name + type: + scalar: string + - name: type + type: + namedType: typeRef + - name: default + type: + namedType: __untyped_atomic_ +- name: list + map: + fields: + - name: elementType + type: + namedType: typeRef + - name: elementRelationship + type: + scalar: string + - name: keys + type: + list: + elementType: + scalar: string +- name: untyped + map: + fields: + - name: elementRelationship + type: + scalar: string +- name: __untyped_atomic_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic +` diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/doc.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/doc.go new file mode 100644 index 000000000..ca4e60542 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package typed contains logic for operating on values with given schemas. +package typed diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go new file mode 100644 index 000000000..6b2b2cb4a --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go @@ -0,0 +1,256 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "errors" + "fmt" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// ValidationError reports an error about a particular field +type ValidationError struct { + Path string + ErrorMessage string +} + +// Error returns a human readable error message. +func (ve ValidationError) Error() string { + if len(ve.Path) == 0 { + return ve.ErrorMessage + } + return fmt.Sprintf("%s: %v", ve.Path, ve.ErrorMessage) +} + +// ValidationErrors accumulates multiple validation error messages. +type ValidationErrors []ValidationError + +// Error returns a human readable error message reporting each error in the +// list. +func (errs ValidationErrors) Error() string { + if len(errs) == 1 { + return errs[0].Error() + } + messages := []string{"errors:"} + for _, e := range errs { + messages = append(messages, " "+e.Error()) + } + return strings.Join(messages, "\n") +} + +// Set the given path to all the validation errors. +func (errs ValidationErrors) WithPath(p string) ValidationErrors { + for i := range errs { + errs[i].Path = p + } + return errs +} + +// WithPrefix prefixes all errors path with the given pathelement. This +// is useful when unwinding the stack on errors. +func (errs ValidationErrors) WithPrefix(prefix string) ValidationErrors { + for i := range errs { + errs[i].Path = prefix + errs[i].Path + } + return errs +} + +// WithLazyPrefix prefixes all errors path with the given pathelement. +// This is useful when unwinding the stack on errors. Prefix is +// computed lazily only if there is an error. +func (errs ValidationErrors) WithLazyPrefix(fn func() string) ValidationErrors { + if len(errs) == 0 { + return errs + } + prefix := "" + if fn != nil { + prefix = fn() + } + for i := range errs { + errs[i].Path = prefix + errs[i].Path + } + return errs +} + +func errorf(format string, args ...interface{}) ValidationErrors { + return ValidationErrors{{ + ErrorMessage: fmt.Sprintf(format, args...), + }} +} + +type atomHandler interface { + doScalar(*schema.Scalar) ValidationErrors + doList(*schema.List) ValidationErrors + doMap(*schema.Map) ValidationErrors +} + +func resolveSchema(s *schema.Schema, tr schema.TypeRef, v value.Value, ah atomHandler) ValidationErrors { + a, ok := s.Resolve(tr) + if !ok { + return errorf("schema error: no type found matching: %v", *tr.NamedType) + } + + a = deduceAtom(a, v) + return handleAtom(a, tr, ah) +} + +// deduceAtom determines which of the possible types in atom 'atom' applies to value 'val'. +// If val is of a type allowed by atom, return a copy of atom with all other types set to nil. +// if val is nil, or is not of a type allowed by atom, just return the original atom, +// and validation will fail at a later stage. (with a more useful error) +func deduceAtom(atom schema.Atom, val value.Value) schema.Atom { + switch { + case val == nil: + case val.IsFloat(), val.IsInt(), val.IsString(), val.IsBool(): + if atom.Scalar != nil { + return schema.Atom{Scalar: atom.Scalar} + } + case val.IsList(): + if atom.List != nil { + return schema.Atom{List: atom.List} + } + case val.IsMap(): + if atom.Map != nil { + return schema.Atom{Map: atom.Map} + } + } + return atom +} + +func handleAtom(a schema.Atom, tr schema.TypeRef, ah atomHandler) ValidationErrors { + switch { + case a.Map != nil: + return ah.doMap(a.Map) + case a.Scalar != nil: + return ah.doScalar(a.Scalar) + case a.List != nil: + return ah.doList(a.List) + } + + name := "inlined" + if tr.NamedType != nil { + name = "named type: " + *tr.NamedType + } + + return errorf("schema error: invalid atom: %v", name) +} + +// Returns the list, or an error. Reminder: nil is a valid list and might be returned. +func listValue(a value.Allocator, val value.Value) (value.List, error) { + if val.IsNull() { + // Null is a valid list. + return nil, nil + } + if !val.IsList() { + return nil, fmt.Errorf("expected list, got %v", val) + } + return val.AsListUsing(a), nil +} + +// Returns the map, or an error. Reminder: nil is a valid map and might be returned. +func mapValue(a value.Allocator, val value.Value) (value.Map, error) { + if val == nil { + return nil, fmt.Errorf("expected map, got nil") + } + if val.IsNull() { + // Null is a valid map. + return nil, nil + } + if !val.IsMap() { + return nil, fmt.Errorf("expected map, got %v", val) + } + return val.AsMapUsing(a), nil +} + +func getAssociativeKeyDefault(s *schema.Schema, list *schema.List, fieldName string) (interface{}, error) { + atom, ok := s.Resolve(list.ElementType) + if !ok { + return nil, errors.New("invalid elementType for list") + } + if atom.Map == nil { + return nil, errors.New("associative list may not have non-map types") + } + // If the field is not found, we can assume there is no default. + field, _ := atom.Map.FindField(fieldName) + return field.Default, nil +} + +func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { + pe := fieldpath.PathElement{} + if child.IsNull() { + // null entries are illegal. + return pe, errors.New("associative list with keys may not have a null element") + } + if !child.IsMap() { + return pe, errors.New("associative list with keys may not have non-map elements") + } + keyMap := value.FieldList{} + m := child.AsMapUsing(a) + defer a.Free(m) + for _, fieldName := range list.Keys { + if val, ok := m.Get(fieldName); ok { + keyMap = append(keyMap, value.Field{Name: fieldName, Value: val}) + } else if def, err := getAssociativeKeyDefault(s, list, fieldName); err != nil { + return pe, fmt.Errorf("couldn't find default value for %v: %v", fieldName, err) + } else if def != nil { + keyMap = append(keyMap, value.Field{Name: fieldName, Value: value.NewValueInterface(def)}) + } else { + return pe, fmt.Errorf("associative list with keys has an element that omits key field %q (and doesn't have default value)", fieldName) + } + } + keyMap.Sort() + pe.Key = &keyMap + return pe, nil +} + +func setItemToPathElement(list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { + pe := fieldpath.PathElement{} + switch { + case child.IsMap(): + // TODO: atomic maps should be acceptable. + return pe, errors.New("associative list without keys has an element that's a map type") + case child.IsList(): + // Should we support a set of lists? For the moment + // let's say we don't. + // TODO: atomic lists should be acceptable. + return pe, errors.New("not supported: associative list with lists as elements") + case child.IsNull(): + return pe, errors.New("associative list without keys has an element that's an explicit null") + default: + // We are a set type. + pe.Value = &child + return pe, nil + } +} + +func listItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { + if list.ElementRelationship == schema.Associative { + if len(list.Keys) > 0 { + return keyedAssociativeListItemToPathElement(a, s, list, index, child) + } + + // If there's no keys, then we must be a set of primitives. + return setItemToPathElement(list, index, child) + } + + // Use the index as a key for atomic lists. + return fieldpath.PathElement{Index: &index}, nil +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go new file mode 100644 index 000000000..7e20f4083 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go @@ -0,0 +1,353 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "math" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +type mergingWalker struct { + lhs value.Value + rhs value.Value + schema *schema.Schema + typeRef schema.TypeRef + + // Current path that we are merging + path fieldpath.Path + + // How to merge. Called after schema validation for all leaf fields. + rule mergeRule + + // If set, called after non-leaf items have been merged. (`out` is + // probably already set.) + postItemHook mergeRule + + // output of the merge operation (nil if none) + out *interface{} + + // internal housekeeping--don't set when constructing. + inLeaf bool // Set to true if we're in a "big leaf"--atomic map/list + + // Allocate only as many walkers as needed for the depth by storing them here. + spareWalkers *[]*mergingWalker + + allocator value.Allocator +} + +// merge rules examine w.lhs and w.rhs (up to one of which may be nil) and +// optionally set w.out. If lhs and rhs are both set, they will be of +// comparable type. +type mergeRule func(w *mergingWalker) + +var ( + ruleKeepRHS = mergeRule(func(w *mergingWalker) { + if w.rhs != nil { + v := w.rhs.Unstructured() + w.out = &v + } else if w.lhs != nil { + v := w.lhs.Unstructured() + w.out = &v + } + }) +) + +// merge sets w.out. +func (w *mergingWalker) merge(prefixFn func() string) (errs ValidationErrors) { + if w.lhs == nil && w.rhs == nil { + // check this condidition here instead of everywhere below. + return errorf("at least one of lhs and rhs must be provided") + } + a, ok := w.schema.Resolve(w.typeRef) + if !ok { + return errorf("schema error: no type found matching: %v", *w.typeRef.NamedType) + } + + alhs := deduceAtom(a, w.lhs) + arhs := deduceAtom(a, w.rhs) + if alhs.Equals(&arhs) { + errs = append(errs, handleAtom(arhs, w.typeRef, w)...) + } else { + w2 := *w + errs = append(errs, handleAtom(alhs, w.typeRef, &w2)...) + errs = append(errs, handleAtom(arhs, w.typeRef, w)...) + } + + if !w.inLeaf && w.postItemHook != nil { + w.postItemHook(w) + } + return errs.WithLazyPrefix(prefixFn) +} + +// doLeaf should be called on leaves before descending into children, if there +// will be a descent. It modifies w.inLeaf. +func (w *mergingWalker) doLeaf() { + if w.inLeaf { + // We're in a "big leaf", an atomic map or list. Ignore + // subsequent leaves. + return + } + w.inLeaf = true + + // We don't recurse into leaf fields for merging. + w.rule(w) +} + +func (w *mergingWalker) doScalar(t *schema.Scalar) (errs ValidationErrors) { + errs = append(errs, validateScalar(t, w.lhs, "lhs: ")...) + errs = append(errs, validateScalar(t, w.rhs, "rhs: ")...) + if len(errs) > 0 { + return errs + } + + // All scalars are leaf fields. + w.doLeaf() + + return nil +} + +func (w *mergingWalker) prepareDescent(pe fieldpath.PathElement, tr schema.TypeRef) *mergingWalker { + if w.spareWalkers == nil { + // first descent. + w.spareWalkers = &[]*mergingWalker{} + } + var w2 *mergingWalker + if n := len(*w.spareWalkers); n > 0 { + w2, *w.spareWalkers = (*w.spareWalkers)[n-1], (*w.spareWalkers)[:n-1] + } else { + w2 = &mergingWalker{} + } + *w2 = *w + w2.typeRef = tr + w2.path = append(w2.path, pe) + w2.lhs = nil + w2.rhs = nil + w2.out = nil + return w2 +} + +func (w *mergingWalker) finishDescent(w2 *mergingWalker) { + // if the descent caused a realloc, ensure that we reuse the buffer + // for the next sibling. + w.path = w2.path[:len(w2.path)-1] + *w.spareWalkers = append(*w.spareWalkers, w2) +} + +func (w *mergingWalker) derefMap(prefix string, v value.Value) (value.Map, ValidationErrors) { + if v == nil { + return nil, nil + } + m, err := mapValue(w.allocator, v) + if err != nil { + return nil, errorf("%v: %v", prefix, err) + } + return m, nil +} + +func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (errs ValidationErrors) { + rLen := 0 + if rhs != nil { + rLen = rhs.Length() + } + lLen := 0 + if lhs != nil { + lLen = lhs.Length() + } + out := make([]interface{}, 0, int(math.Max(float64(rLen), float64(lLen)))) + + // TODO: ordering is totally wrong. + // TODO: might as well make the map order work the same way. + + // This is a cheap hack to at least make the output order stable. + rhsOrder := make([]fieldpath.PathElement, 0, rLen) + + // First, collect all RHS children. + observedRHS := fieldpath.MakePathElementValueMap(rLen) + if rhs != nil { + for i := 0; i < rhs.Length(); i++ { + child := rhs.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, i, child) + if err != nil { + errs = append(errs, errorf("rhs: element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + if _, ok := observedRHS.Get(pe); ok { + errs = append(errs, errorf("rhs: duplicate entries for key %v", pe.String())...) + } + observedRHS.Insert(pe, child) + rhsOrder = append(rhsOrder, pe) + } + } + + // Then merge with LHS children. + observedLHS := fieldpath.MakePathElementSet(lLen) + if lhs != nil { + for i := 0; i < lhs.Length(); i++ { + child := lhs.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, i, child) + if err != nil { + errs = append(errs, errorf("lhs: element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + if observedLHS.Has(pe) { + errs = append(errs, errorf("lhs: duplicate entries for key %v", pe.String())...) + continue + } + observedLHS.Insert(pe) + w2 := w.prepareDescent(pe, t.ElementType) + w2.lhs = value.Value(child) + if rchild, ok := observedRHS.Get(pe); ok { + w2.rhs = rchild + } + errs = append(errs, w2.merge(pe.String)...) + if w2.out != nil { + out = append(out, *w2.out) + } + w.finishDescent(w2) + } + } + + for _, pe := range rhsOrder { + if observedLHS.Has(pe) { + continue + } + value, _ := observedRHS.Get(pe) + w2 := w.prepareDescent(pe, t.ElementType) + w2.rhs = value + errs = append(errs, w2.merge(pe.String)...) + if w2.out != nil { + out = append(out, *w2.out) + } + w.finishDescent(w2) + } + + if len(out) > 0 { + i := interface{}(out) + w.out = &i + } + + return errs +} + +func (w *mergingWalker) derefList(prefix string, v value.Value) (value.List, ValidationErrors) { + if v == nil { + return nil, nil + } + l, err := listValue(w.allocator, v) + if err != nil { + return nil, errorf("%v: %v", prefix, err) + } + return l, nil +} + +func (w *mergingWalker) doList(t *schema.List) (errs ValidationErrors) { + lhs, _ := w.derefList("lhs: ", w.lhs) + if lhs != nil { + defer w.allocator.Free(lhs) + } + rhs, _ := w.derefList("rhs: ", w.rhs) + if rhs != nil { + defer w.allocator.Free(rhs) + } + + // If both lhs and rhs are empty/null, treat it as a + // leaf: this helps preserve the empty/null + // distinction. + emptyPromoteToLeaf := (lhs == nil || lhs.Length() == 0) && (rhs == nil || rhs.Length() == 0) + + if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf { + w.doLeaf() + return nil + } + + if lhs == nil && rhs == nil { + return nil + } + + errs = w.visitListItems(t, lhs, rhs) + + return errs +} + +func (w *mergingWalker) visitMapItem(t *schema.Map, out map[string]interface{}, key string, lhs, rhs value.Value) (errs ValidationErrors) { + fieldType := t.ElementType + if sf, ok := t.FindField(key); ok { + fieldType = sf.Type + } + pe := fieldpath.PathElement{FieldName: &key} + w2 := w.prepareDescent(pe, fieldType) + w2.lhs = lhs + w2.rhs = rhs + errs = append(errs, w2.merge(pe.String)...) + if w2.out != nil { + out[key] = *w2.out + } + w.finishDescent(w2) + return errs +} + +func (w *mergingWalker) visitMapItems(t *schema.Map, lhs, rhs value.Map) (errs ValidationErrors) { + out := map[string]interface{}{} + + value.MapZipUsing(w.allocator, lhs, rhs, value.Unordered, func(key string, lhsValue, rhsValue value.Value) bool { + errs = append(errs, w.visitMapItem(t, out, key, lhsValue, rhsValue)...) + return true + }) + if len(out) > 0 { + i := interface{}(out) + w.out = &i + } + + return errs +} + +func (w *mergingWalker) doMap(t *schema.Map) (errs ValidationErrors) { + lhs, _ := w.derefMap("lhs: ", w.lhs) + if lhs != nil { + defer w.allocator.Free(lhs) + } + rhs, _ := w.derefMap("rhs: ", w.rhs) + if rhs != nil { + defer w.allocator.Free(rhs) + } + // If both lhs and rhs are empty/null, treat it as a + // leaf: this helps preserve the empty/null + // distinction. + emptyPromoteToLeaf := (lhs == nil || lhs.Empty()) && (rhs == nil || rhs.Empty()) + + if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf { + w.doLeaf() + return nil + } + + if lhs == nil && rhs == nil { + return nil + } + + errs = append(errs, w.visitMapItems(t, lhs, rhs)...) + + return errs +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go new file mode 100644 index 000000000..3949a78fc --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go @@ -0,0 +1,151 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "fmt" + + yaml "gopkg.in/yaml.v2" + "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// YAMLObject is an object encoded in YAML. +type YAMLObject string + +// Parser implements YAMLParser and allows introspecting the schema. +type Parser struct { + Schema schema.Schema +} + +// create builds an unvalidated parser. +func create(s YAMLObject) (*Parser, error) { + p := Parser{} + err := yaml.Unmarshal([]byte(s), &p.Schema) + return &p, err +} + +func createOrDie(schema YAMLObject) *Parser { + p, err := create(schema) + if err != nil { + panic(fmt.Errorf("failed to create parser: %v", err)) + } + return p +} + +var ssParser = createOrDie(YAMLObject(schema.SchemaSchemaYAML)) + +// NewParser will build a YAMLParser from a schema. The schema is validated. +func NewParser(schema YAMLObject) (*Parser, error) { + _, err := ssParser.Type("schema").FromYAML(schema) + if err != nil { + return nil, fmt.Errorf("unable to validate schema: %v", err) + } + p, err := create(schema) + if err != nil { + return nil, err + } + return p, nil +} + +// TypeNames returns a list of types this parser understands. +func (p *Parser) TypeNames() (names []string) { + for _, td := range p.Schema.Types { + names = append(names, td.Name) + } + return names +} + +// Type returns a helper which can produce objects of the given type. Any +// errors are deferred until a further function is called. +func (p *Parser) Type(name string) ParseableType { + return ParseableType{ + Schema: &p.Schema, + TypeRef: schema.TypeRef{NamedType: &name}, + } +} + +// ParseableType allows for easy production of typed objects. +type ParseableType struct { + TypeRef schema.TypeRef + Schema *schema.Schema +} + +// IsValid return true if p's schema and typename are valid. +func (p ParseableType) IsValid() bool { + _, ok := p.Schema.Resolve(p.TypeRef) + return ok +} + +// FromYAML parses a yaml string into an object with the current schema +// and the type "typename" or an error if validation fails. +func (p ParseableType) FromYAML(object YAMLObject) (*TypedValue, error) { + var v interface{} + err := yaml.Unmarshal([]byte(object), &v) + if err != nil { + return nil, err + } + return AsTyped(value.NewValueInterface(v), p.Schema, p.TypeRef) +} + +// FromUnstructured converts a go "interface{}" type, typically an +// unstructured object in Kubernetes world, to a TypedValue. It returns an +// error if the resulting object fails schema validation. +// The provided interface{} must be one of: map[string]interface{}, +// map[interface{}]interface{}, []interface{}, int types, float types, +// string or boolean. Nested interface{} must also be one of these types. +func (p ParseableType) FromUnstructured(in interface{}) (*TypedValue, error) { + return AsTyped(value.NewValueInterface(in), p.Schema, p.TypeRef) +} + +// FromStructured converts a go "interface{}" type, typically an structured object in +// Kubernetes, to a TypedValue. It will return an error if the resulting object fails +// schema validation. The provided "interface{}" value must be a pointer so that the +// value can be modified via reflection. The provided "interface{}" may contain structs +// and types that are converted to Values by the jsonMarshaler interface. +func (p ParseableType) FromStructured(in interface{}) (*TypedValue, error) { + v, err := value.NewValueReflect(in) + if err != nil { + return nil, fmt.Errorf("error creating struct value reflector: %v", err) + } + return AsTyped(v, p.Schema, p.TypeRef) +} + +// DeducedParseableType is a ParseableType that deduces the type from +// the content of the object. +var DeducedParseableType ParseableType = createOrDie(YAMLObject(`types: +- name: __untyped_atomic_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic +- name: __untyped_deduced_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +`)).Type("__untyped_deduced_") diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go new file mode 100644 index 000000000..5a8214ae2 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go @@ -0,0 +1,295 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "fmt" + "sync" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/schema" +) + +var fmPool = sync.Pool{ + New: func() interface{} { return &reconcileWithSchemaWalker{} }, +} + +func (v *reconcileWithSchemaWalker) finished() { + v.fieldSet = nil + v.schema = nil + v.value = nil + v.typeRef = schema.TypeRef{} + v.path = nil + v.toRemove = nil + v.toAdd = nil + fmPool.Put(v) +} + +type reconcileWithSchemaWalker struct { + value *TypedValue // root of the live object + schema *schema.Schema // root of the live schema + + // state of node being visited by walker + fieldSet *fieldpath.Set + typeRef schema.TypeRef + path fieldpath.Path + isAtomic bool + + // the accumulated diff to perform to apply reconciliation + toRemove *fieldpath.Set // paths to remove recursively + toAdd *fieldpath.Set // paths to add after any removals + + // Allocate only as many walkers as needed for the depth by storing them here. + spareWalkers *[]*reconcileWithSchemaWalker +} + +func (v *reconcileWithSchemaWalker) prepareDescent(pe fieldpath.PathElement, tr schema.TypeRef) *reconcileWithSchemaWalker { + if v.spareWalkers == nil { + // first descent. + v.spareWalkers = &[]*reconcileWithSchemaWalker{} + } + var v2 *reconcileWithSchemaWalker + if n := len(*v.spareWalkers); n > 0 { + v2, *v.spareWalkers = (*v.spareWalkers)[n-1], (*v.spareWalkers)[:n-1] + } else { + v2 = &reconcileWithSchemaWalker{} + } + *v2 = *v + v2.typeRef = tr + v2.path = append(v.path, pe) + v2.value = v.value + return v2 +} + +func (v *reconcileWithSchemaWalker) finishDescent(v2 *reconcileWithSchemaWalker) { + v2.fieldSet = nil + v2.schema = nil + v2.value = nil + v2.typeRef = schema.TypeRef{} + if cap(v2.path) < 20 { // recycle slices that do not have unexpectedly high capacity + v2.path = v2.path[:0] + } else { + v2.path = nil + } + + // merge any accumulated changes into parent walker + if v2.toRemove != nil { + if v.toRemove == nil { + v.toRemove = v2.toRemove + } else { + v.toRemove = v.toRemove.Union(v2.toRemove) + } + } + if v2.toAdd != nil { + if v.toAdd == nil { + v.toAdd = v2.toAdd + } else { + v.toAdd = v.toAdd.Union(v2.toAdd) + } + } + v2.toRemove = nil + v2.toAdd = nil + + // if the descent caused a realloc, ensure that we reuse the buffer + // for the next sibling. + *v.spareWalkers = append(*v.spareWalkers, v2) +} + +// ReconcileFieldSetWithSchema reconciles the a field set with any changes to the +//// object's schema since the field set was written. Returns the reconciled field set, or nil of +// no changes were made to the field set. +// +// Supports: +// - changing types from atomic to granular +// - changing types from granular to atomic +func ReconcileFieldSetWithSchema(fieldset *fieldpath.Set, tv *TypedValue) (*fieldpath.Set, error) { + v := fmPool.Get().(*reconcileWithSchemaWalker) + v.fieldSet = fieldset + v.value = tv + + v.schema = tv.schema + v.typeRef = tv.typeRef + + // We don't reconcile deduced types, which are primarily for use by unstructured CRDs. Deduced + // types do not support atomic or granular tags. Nor does the dynamic schema deduction + // interact well with the reconcile logic. + if v.schema == DeducedParseableType.Schema { + return nil, nil + } + + defer v.finished() + errs := v.reconcile() + + if len(errs) > 0 { + return nil, fmt.Errorf("errors reconciling field set with schema: %s", errs.Error()) + } + + // If there are any accumulated changes, apply them + if v.toAdd != nil || v.toRemove != nil { + out := v.fieldSet + if v.toRemove != nil { + out = out.RecursiveDifference(v.toRemove) + } + if v.toAdd != nil { + out = out.Union(v.toAdd) + } + return out, nil + } + return nil, nil +} + +func (v *reconcileWithSchemaWalker) reconcile() (errs ValidationErrors) { + a, ok := v.schema.Resolve(v.typeRef) + if !ok { + errs = append(errs, errorf("could not resolve %v", v.typeRef)...) + return + } + return handleAtom(a, v.typeRef, v) +} + +func (v *reconcileWithSchemaWalker) doScalar(_ *schema.Scalar) (errs ValidationErrors) { + return errs +} + +func (v *reconcileWithSchemaWalker) visitListItems(t *schema.List, element *fieldpath.Set) (errs ValidationErrors) { + handleElement := func(pe fieldpath.PathElement, isMember bool) { + var hasChildren bool + v2 := v.prepareDescent(pe, t.ElementType) + v2.fieldSet, hasChildren = element.Children.Get(pe) + v2.isAtomic = isMember && !hasChildren + errs = append(errs, v2.reconcile()...) + v.finishDescent(v2) + } + element.Children.Iterate(func(pe fieldpath.PathElement) { + if element.Members.Has(pe) { + return + } + handleElement(pe, false) + }) + element.Members.Iterate(func(pe fieldpath.PathElement) { + handleElement(pe, true) + }) + return errs +} + +func (v *reconcileWithSchemaWalker) doList(t *schema.List) (errs ValidationErrors) { + // reconcile lists changed from granular to atomic + if !v.isAtomic && t.ElementRelationship == schema.Atomic { + v.toRemove = fieldpath.NewSet(v.path) // remove all root and all children fields + v.toAdd = fieldpath.NewSet(v.path) // add the root of the atomic + return errs + } + // reconcile lists changed from atomic to granular + if v.isAtomic && t.ElementRelationship == schema.Associative { + v.toAdd, errs = buildGranularFieldSet(v.path, v.value) + if errs != nil { + return errs + } + } + if v.fieldSet != nil { + errs = v.visitListItems(t, v.fieldSet) + } + return errs +} + +func (v *reconcileWithSchemaWalker) visitMapItems(t *schema.Map, element *fieldpath.Set) (errs ValidationErrors) { + handleElement := func(pe fieldpath.PathElement, isMember bool) { + var hasChildren bool + if tr, ok := typeRefAtPath(t, pe); ok { // ignore fields not in the schema + v2 := v.prepareDescent(pe, tr) + v2.fieldSet, hasChildren = element.Children.Get(pe) + v2.isAtomic = isMember && !hasChildren + errs = append(errs, v2.reconcile()...) + v.finishDescent(v2) + } + } + element.Children.Iterate(func(pe fieldpath.PathElement) { + if element.Members.Has(pe) { + return + } + handleElement(pe, false) + }) + element.Members.Iterate(func(pe fieldpath.PathElement) { + handleElement(pe, true) + }) + + return errs +} + +func (v *reconcileWithSchemaWalker) doMap(t *schema.Map) (errs ValidationErrors) { + // reconcile maps and structs changed from granular to atomic + if !v.isAtomic && t.ElementRelationship == schema.Atomic { + if v.fieldSet != nil && v.fieldSet.Size() > 0 { + v.toRemove = fieldpath.NewSet(v.path) // remove all root and all children fields + v.toAdd = fieldpath.NewSet(v.path) // add the root of the atomic + } + return errs + } + // reconcile maps changed from atomic to granular + if v.isAtomic && (t.ElementRelationship == schema.Separable || t.ElementRelationship == "") { + v.toAdd, errs = buildGranularFieldSet(v.path, v.value) + if errs != nil { + return errs + } + } + if v.fieldSet != nil { + errs = v.visitMapItems(t, v.fieldSet) + } + return errs +} + +func buildGranularFieldSet(path fieldpath.Path, value *TypedValue) (*fieldpath.Set, ValidationErrors) { + + valueFieldSet, err := value.ToFieldSet() + if err != nil { + return nil, errorf("toFieldSet: %v", err) + } + if valueFieldSetAtPath, ok := fieldSetAtPath(valueFieldSet, path); ok { + result := fieldpath.NewSet(path) + resultAtPath := descendToPath(result, path) + *resultAtPath = *valueFieldSetAtPath + return result, nil + } + return nil, nil +} + +func fieldSetAtPath(node *fieldpath.Set, path fieldpath.Path) (*fieldpath.Set, bool) { + ok := true + for _, pe := range path { + if node, ok = node.Children.Get(pe); !ok { + break + } + } + return node, ok +} + +func descendToPath(node *fieldpath.Set, path fieldpath.Path) *fieldpath.Set { + for _, pe := range path { + node = node.Children.Descend(pe) + } + return node +} + +func typeRefAtPath(t *schema.Map, pe fieldpath.PathElement) (schema.TypeRef, bool) { + tr := t.ElementType + if pe.FieldName != nil { + if sf, ok := t.FindField(*pe.FieldName); ok { + tr = sf.Type + } + } + return tr, tr != schema.TypeRef{} +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go new file mode 100644 index 000000000..a20fc16f9 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go @@ -0,0 +1,112 @@ +/* +Copyright 2019 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +type removingWalker struct { + value value.Value + out interface{} + schema *schema.Schema + toRemove *fieldpath.Set + allocator value.Allocator +} + +func removeItemsWithSchema(val value.Value, toRemove *fieldpath.Set, schema *schema.Schema, typeRef schema.TypeRef) value.Value { + w := &removingWalker{ + value: val, + schema: schema, + toRemove: toRemove, + allocator: value.NewFreelistAllocator(), + } + resolveSchema(schema, typeRef, val, w) + return value.NewValueInterface(w.out) +} + +func (w *removingWalker) doScalar(t *schema.Scalar) ValidationErrors { + w.out = w.value.Unstructured() + return nil +} + +func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { + l := w.value.AsListUsing(w.allocator) + defer w.allocator.Free(l) + // If list is null, empty, or atomic just return + if l == nil || l.Length() == 0 || t.ElementRelationship == schema.Atomic { + return nil + } + + var newItems []interface{} + iter := l.RangeUsing(w.allocator) + defer w.allocator.Free(iter) + for iter.Next() { + i, item := iter.Item() + // Ignore error because we have already validated this list + pe, _ := listItemToPathElement(w.allocator, w.schema, t, i, item) + path, _ := fieldpath.MakePath(pe) + if w.toRemove.Has(path) { + continue + } + if subset := w.toRemove.WithPrefix(pe); !subset.Empty() { + item = removeItemsWithSchema(item, subset, w.schema, t.ElementType) + } + newItems = append(newItems, item.Unstructured()) + } + if len(newItems) > 0 { + w.out = newItems + } + return nil +} + +func (w *removingWalker) doMap(t *schema.Map) ValidationErrors { + m := w.value.AsMapUsing(w.allocator) + if m != nil { + defer w.allocator.Free(m) + } + // If map is null, empty, or atomic just return + if m == nil || m.Empty() || t.ElementRelationship == schema.Atomic { + return nil + } + + fieldTypes := map[string]schema.TypeRef{} + for _, structField := range t.Fields { + fieldTypes[structField.Name] = structField.Type + } + + newMap := map[string]interface{}{} + m.Iterate(func(k string, val value.Value) bool { + pe := fieldpath.PathElement{FieldName: &k} + path, _ := fieldpath.MakePath(pe) + fieldType := t.ElementType + if ft, ok := fieldTypes[k]; ok { + fieldType = ft + } + if w.toRemove.Has(path) { + return true + } + if subset := w.toRemove.WithPrefix(pe); !subset.Empty() { + val = removeItemsWithSchema(val, subset, w.schema, fieldType) + } + newMap[k] = val.Unstructured() + return true + }) + if len(newMap) > 0 { + w.out = newMap + } + return nil +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go new file mode 100644 index 000000000..bc88c086c --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go @@ -0,0 +1,166 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "sync" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +var tPool = sync.Pool{ + New: func() interface{} { return &toFieldSetWalker{} }, +} + +func (tv TypedValue) toFieldSetWalker() *toFieldSetWalker { + v := tPool.Get().(*toFieldSetWalker) + v.value = tv.value + v.schema = tv.schema + v.typeRef = tv.typeRef + v.set = &fieldpath.Set{} + v.allocator = value.NewFreelistAllocator() + return v +} + +func (v *toFieldSetWalker) finished() { + v.schema = nil + v.typeRef = schema.TypeRef{} + v.path = nil + v.set = nil + tPool.Put(v) +} + +type toFieldSetWalker struct { + value value.Value + schema *schema.Schema + typeRef schema.TypeRef + + set *fieldpath.Set + path fieldpath.Path + + // Allocate only as many walkers as needed for the depth by storing them here. + spareWalkers *[]*toFieldSetWalker + allocator value.Allocator +} + +func (v *toFieldSetWalker) prepareDescent(pe fieldpath.PathElement, tr schema.TypeRef) *toFieldSetWalker { + if v.spareWalkers == nil { + // first descent. + v.spareWalkers = &[]*toFieldSetWalker{} + } + var v2 *toFieldSetWalker + if n := len(*v.spareWalkers); n > 0 { + v2, *v.spareWalkers = (*v.spareWalkers)[n-1], (*v.spareWalkers)[:n-1] + } else { + v2 = &toFieldSetWalker{} + } + *v2 = *v + v2.typeRef = tr + v2.path = append(v2.path, pe) + return v2 +} + +func (v *toFieldSetWalker) finishDescent(v2 *toFieldSetWalker) { + // if the descent caused a realloc, ensure that we reuse the buffer + // for the next sibling. + v.path = v2.path[:len(v2.path)-1] + *v.spareWalkers = append(*v.spareWalkers, v2) +} + +func (v *toFieldSetWalker) toFieldSet() ValidationErrors { + return resolveSchema(v.schema, v.typeRef, v.value, v) +} + +func (v *toFieldSetWalker) doScalar(t *schema.Scalar) ValidationErrors { + v.set.Insert(v.path) + + return nil +} + +func (v *toFieldSetWalker) visitListItems(t *schema.List, list value.List) (errs ValidationErrors) { + for i := 0; i < list.Length(); i++ { + child := list.At(i) + pe, _ := listItemToPathElement(v.allocator, v.schema, t, i, child) + v2 := v.prepareDescent(pe, t.ElementType) + v2.value = child + errs = append(errs, v2.toFieldSet()...) + + v2.set.Insert(v2.path) + v.finishDescent(v2) + } + return errs +} + +func (v *toFieldSetWalker) doList(t *schema.List) (errs ValidationErrors) { + list, _ := listValue(v.allocator, v.value) + if list != nil { + defer v.allocator.Free(list) + } + if t.ElementRelationship == schema.Atomic { + v.set.Insert(v.path) + return nil + } + + if list == nil { + return nil + } + + errs = v.visitListItems(t, list) + + return errs +} + +func (v *toFieldSetWalker) visitMapItems(t *schema.Map, m value.Map) (errs ValidationErrors) { + m.Iterate(func(key string, val value.Value) bool { + pe := fieldpath.PathElement{FieldName: &key} + + tr := t.ElementType + if sf, ok := t.FindField(key); ok { + tr = sf.Type + } + v2 := v.prepareDescent(pe, tr) + v2.value = val + errs = append(errs, v2.toFieldSet()...) + if _, ok := t.FindField(key); !ok { + v2.set.Insert(v2.path) + } + v.finishDescent(v2) + return true + }) + return errs +} + +func (v *toFieldSetWalker) doMap(t *schema.Map) (errs ValidationErrors) { + m, _ := mapValue(v.allocator, v.value) + if m != nil { + defer v.allocator.Free(m) + } + if t.ElementRelationship == schema.Atomic { + v.set.Insert(v.path) + return nil + } + + if m == nil { + return nil + } + + errs = v.visitMapItems(t, m) + + return errs +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go new file mode 100644 index 000000000..54766f6ed --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go @@ -0,0 +1,315 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "fmt" + "strings" + "sync" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// AsTyped accepts a value and a type and returns a TypedValue. 'v' must have +// type 'typeName' in the schema. An error is returned if the v doesn't conform +// to the schema. +func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef) (*TypedValue, error) { + tv := &TypedValue{ + value: v, + typeRef: typeRef, + schema: s, + } + if err := tv.Validate(); err != nil { + return nil, err + } + return tv, nil +} + +// AsTypeUnvalidated is just like AsTyped, but doesn't validate that the type +// conforms to the schema, for cases where that has already been checked or +// where you're going to call a method that validates as a side-effect (like +// ToFieldSet). +func AsTypedUnvalidated(v value.Value, s *schema.Schema, typeRef schema.TypeRef) *TypedValue { + tv := &TypedValue{ + value: v, + typeRef: typeRef, + schema: s, + } + return tv +} + +// TypedValue is a value of some specific type. +type TypedValue struct { + value value.Value + typeRef schema.TypeRef + schema *schema.Schema +} + +// TypeRef is the type of the value. +func (tv TypedValue) TypeRef() schema.TypeRef { + return tv.typeRef +} + +// AsValue removes the type from the TypedValue and only keeps the value. +func (tv TypedValue) AsValue() value.Value { + return tv.value +} + +// Schema gets the schema from the TypedValue. +func (tv TypedValue) Schema() *schema.Schema { + return tv.schema +} + +// Validate returns an error with a list of every spec violation. +func (tv TypedValue) Validate() error { + w := tv.walker() + defer w.finished() + if errs := w.validate(nil); len(errs) != 0 { + return errs + } + return nil +} + +// ToFieldSet creates a set containing every leaf field and item mentioned, or +// validation errors, if any were encountered. +func (tv TypedValue) ToFieldSet() (*fieldpath.Set, error) { + w := tv.toFieldSetWalker() + defer w.finished() + if errs := w.toFieldSet(); len(errs) != 0 { + return nil, errs + } + return w.set, nil +} + +// Merge returns the result of merging tv and pso ("partially specified +// object") together. Of note: +// * No fields can be removed by this operation. +// * If both tv and pso specify a given leaf field, the result will keep pso's +// value. +// * Container typed elements will have their items ordered: +// * like tv, if pso doesn't change anything in the container +// * like pso, if pso does change something in the container. +// tv and pso must both be of the same type (their Schema and TypeRef must +// match), or an error will be returned. Validation errors will be returned if +// the objects don't conform to the schema. +func (tv TypedValue) Merge(pso *TypedValue) (*TypedValue, error) { + return merge(&tv, pso, ruleKeepRHS, nil) +} + +// Compare compares the two objects. See the comments on the `Comparison` +// struct for details on the return value. +// +// tv and rhs must both be of the same type (their Schema and TypeRef must +// match), or an error will be returned. Validation errors will be returned if +// the objects don't conform to the schema. +func (tv TypedValue) Compare(rhs *TypedValue) (c *Comparison, err error) { + c = &Comparison{ + Removed: fieldpath.NewSet(), + Modified: fieldpath.NewSet(), + Added: fieldpath.NewSet(), + } + _, err = merge(&tv, rhs, func(w *mergingWalker) { + if w.lhs == nil { + c.Added.Insert(w.path) + } else if w.rhs == nil { + c.Removed.Insert(w.path) + } else if !value.Equals(w.rhs, w.lhs) { + // TODO: Equality is not sufficient for this. + // Need to implement equality check on the value type. + c.Modified.Insert(w.path) + } + }, func(w *mergingWalker) { + if w.lhs == nil { + c.Added.Insert(w.path) + } else if w.rhs == nil { + c.Removed.Insert(w.path) + } + }) + if err != nil { + return nil, err + } + + return c, nil +} + +// RemoveItems removes each provided list or map item from the value. +func (tv TypedValue) RemoveItems(items *fieldpath.Set) *TypedValue { + tv.value = removeItemsWithSchema(tv.value, items, tv.schema, tv.typeRef) + return &tv +} + +// NormalizeUnions takes the new object and normalizes the union: +// - If discriminator changed to non-nil, and a new field has been added +// that doesn't match, an error is returned, +// - If discriminator hasn't changed and two fields or more are set, an +// error is returned, +// - If discriminator changed to non-nil, all other fields but the +// discriminated one will be cleared, +// - Otherwise, If only one field is left, update discriminator to that value. +// +// Please note: union behavior isn't finalized yet and this is still experimental. +func (tv TypedValue) NormalizeUnions(new *TypedValue) (*TypedValue, error) { + var errs ValidationErrors + var normalizeFn = func(w *mergingWalker) { + if w.rhs != nil { + v := w.rhs.Unstructured() + w.out = &v + } + if err := normalizeUnions(w); err != nil { + errs = append(errs, errorf(err.Error())...) + } + } + out, mergeErrs := merge(&tv, new, func(w *mergingWalker) {}, normalizeFn) + if mergeErrs != nil { + errs = append(errs, mergeErrs.(ValidationErrors)...) + } + if len(errs) > 0 { + return nil, errs + } + return out, nil +} + +// NormalizeUnionsApply specifically normalize unions on apply. It +// validates that the applied union is correct (there should be no +// ambiguity there), and clear the fields according to the sent intent. +// +// Please note: union behavior isn't finalized yet and this is still experimental. +func (tv TypedValue) NormalizeUnionsApply(new *TypedValue) (*TypedValue, error) { + var errs ValidationErrors + var normalizeFn = func(w *mergingWalker) { + if w.rhs != nil { + v := w.rhs.Unstructured() + w.out = &v + } + if err := normalizeUnionsApply(w); err != nil { + errs = append(errs, errorf(err.Error())...) + } + } + out, mergeErrs := merge(&tv, new, func(w *mergingWalker) {}, normalizeFn) + if mergeErrs != nil { + errs = append(errs, mergeErrs.(ValidationErrors)...) + } + if len(errs) > 0 { + return nil, errs + } + return out, nil +} + +func (tv TypedValue) Empty() *TypedValue { + tv.value = value.NewValueInterface(nil) + return &tv +} + +var mwPool = sync.Pool{ + New: func() interface{} { return &mergingWalker{} }, +} + +func merge(lhs, rhs *TypedValue, rule, postRule mergeRule) (*TypedValue, error) { + if lhs.schema != rhs.schema { + return nil, errorf("expected objects with types from the same schema") + } + if !lhs.typeRef.Equals(&rhs.typeRef) { + return nil, errorf("expected objects of the same type, but got %v and %v", lhs.typeRef, rhs.typeRef) + } + + mw := mwPool.Get().(*mergingWalker) + defer func() { + mw.lhs = nil + mw.rhs = nil + mw.schema = nil + mw.typeRef = schema.TypeRef{} + mw.rule = nil + mw.postItemHook = nil + mw.out = nil + mw.inLeaf = false + + mwPool.Put(mw) + }() + + mw.lhs = lhs.value + mw.rhs = rhs.value + mw.schema = lhs.schema + mw.typeRef = lhs.typeRef + mw.rule = rule + mw.postItemHook = postRule + if mw.allocator == nil { + mw.allocator = value.NewFreelistAllocator() + } + + errs := mw.merge(nil) + if len(errs) > 0 { + return nil, errs + } + + out := &TypedValue{ + schema: lhs.schema, + typeRef: lhs.typeRef, + } + if mw.out != nil { + out.value = value.NewValueInterface(*mw.out) + } + return out, nil +} + +// Comparison is the return value of a TypedValue.Compare() operation. +// +// No field will appear in more than one of the three fieldsets. If all of the +// fieldsets are empty, then the objects must have been equal. +type Comparison struct { + // Removed contains any fields removed by rhs (the right-hand-side + // object in the comparison). + Removed *fieldpath.Set + // Modified contains fields present in both objects but different. + Modified *fieldpath.Set + // Added contains any fields added by rhs. + Added *fieldpath.Set +} + +// IsSame returns true if the comparison returned no changes (the two +// compared objects are similar). +func (c *Comparison) IsSame() bool { + return c.Removed.Empty() && c.Modified.Empty() && c.Added.Empty() +} + +// String returns a human readable version of the comparison. +func (c *Comparison) String() string { + bld := strings.Builder{} + if !c.Modified.Empty() { + bld.WriteString(fmt.Sprintf("- Modified Fields:\n%v\n", c.Modified)) + } + if !c.Added.Empty() { + bld.WriteString(fmt.Sprintf("- Added Fields:\n%v\n", c.Added)) + } + if !c.Removed.Empty() { + bld.WriteString(fmt.Sprintf("- Removed Fields:\n%v\n", c.Removed)) + } + return bld.String() +} + +// ExcludeFields fields from the compare recursively removes the fields +// from the entire comparison +func (c *Comparison) ExcludeFields(fields *fieldpath.Set) *Comparison { + if fields == nil || fields.Empty() { + return c + } + c.Removed = c.Removed.RecursiveDifference(fields) + c.Modified = c.Modified.RecursiveDifference(fields) + c.Added = c.Added.RecursiveDifference(fields) + return c +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go new file mode 100644 index 000000000..1fa5d88ae --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go @@ -0,0 +1,276 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "fmt" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +func normalizeUnions(w *mergingWalker) error { + atom, found := w.schema.Resolve(w.typeRef) + if !found { + panic(fmt.Sprintf("Unable to resolve schema in normalize union: %v/%v", w.schema, w.typeRef)) + } + // Unions can only be in structures, and the struct must not have been removed + if atom.Map == nil || w.out == nil { + return nil + } + + var old value.Map + if w.lhs != nil && !w.lhs.IsNull() { + old = w.lhs.AsMap() + } + for _, union := range atom.Map.Unions { + if err := newUnion(&union).Normalize(old, w.rhs.AsMap(), value.NewValueInterface(*w.out).AsMap()); err != nil { + return err + } + } + return nil +} + +func normalizeUnionsApply(w *mergingWalker) error { + atom, found := w.schema.Resolve(w.typeRef) + if !found { + panic(fmt.Sprintf("Unable to resolve schema in normalize union: %v/%v", w.schema, w.typeRef)) + } + // Unions can only be in structures, and the struct must not have been removed + if atom.Map == nil || w.out == nil { + return nil + } + + var old value.Map + if w.lhs != nil && !w.lhs.IsNull() { + old = w.lhs.AsMap() + } + + for _, union := range atom.Map.Unions { + out := value.NewValueInterface(*w.out) + if err := newUnion(&union).NormalizeApply(old, w.rhs.AsMap(), out.AsMap()); err != nil { + return err + } + *w.out = out.Unstructured() + } + return nil +} + +type discriminated string +type field string + +type discriminatedNames struct { + f2d map[field]discriminated + d2f map[discriminated]field +} + +func newDiscriminatedName(f2d map[field]discriminated) discriminatedNames { + d2f := map[discriminated]field{} + for key, value := range f2d { + d2f[value] = key + } + return discriminatedNames{ + f2d: f2d, + d2f: d2f, + } +} + +func (dn discriminatedNames) toField(d discriminated) field { + if f, ok := dn.d2f[d]; ok { + return f + } + return field(d) +} + +func (dn discriminatedNames) toDiscriminated(f field) discriminated { + if d, ok := dn.f2d[f]; ok { + return d + } + return discriminated(f) +} + +type discriminator struct { + name string +} + +func (d *discriminator) Set(m value.Map, v discriminated) { + if d == nil { + return + } + m.Set(d.name, value.NewValueInterface(string(v))) +} + +func (d *discriminator) Get(m value.Map) discriminated { + if d == nil || m == nil { + return "" + } + val, ok := m.Get(d.name) + if !ok { + return "" + } + if !val.IsString() { + return "" + } + return discriminated(val.AsString()) +} + +type fieldsSet map[field]struct{} + +// newFieldsSet returns a map of the fields that are part of the union and are set +// in the given map. +func newFieldsSet(m value.Map, fields []field) fieldsSet { + if m == nil { + return nil + } + set := fieldsSet{} + for _, f := range fields { + if subField, ok := m.Get(string(f)); ok && !subField.IsNull() { + set.Add(f) + } + } + return set +} + +func (fs fieldsSet) Add(f field) { + if fs == nil { + fs = map[field]struct{}{} + } + fs[f] = struct{}{} +} + +func (fs fieldsSet) One() *field { + for f := range fs { + return &f + } + return nil +} + +func (fs fieldsSet) Has(f field) bool { + _, ok := fs[f] + return ok +} + +func (fs fieldsSet) List() []field { + fields := []field{} + for f := range fs { + fields = append(fields, f) + } + return fields +} + +func (fs fieldsSet) Difference(o fieldsSet) fieldsSet { + n := fieldsSet{} + for f := range fs { + if !o.Has(f) { + n.Add(f) + } + } + return n +} + +func (fs fieldsSet) String() string { + s := []string{} + for k := range fs { + s = append(s, string(k)) + } + return strings.Join(s, ", ") +} + +type union struct { + deduceInvalidDiscriminator bool + d *discriminator + dn discriminatedNames + f []field +} + +func newUnion(su *schema.Union) *union { + u := &union{} + if su.Discriminator != nil { + u.d = &discriminator{name: *su.Discriminator} + } + f2d := map[field]discriminated{} + for _, f := range su.Fields { + u.f = append(u.f, field(f.FieldName)) + f2d[field(f.FieldName)] = discriminated(f.DiscriminatorValue) + } + u.dn = newDiscriminatedName(f2d) + u.deduceInvalidDiscriminator = su.DeduceInvalidDiscriminator + return u +} + +// clear removes all the fields in map that are part of the union, but +// the one we decided to keep. +func (u *union) clear(m value.Map, f field) { + for _, fieldName := range u.f { + if field(fieldName) != f { + m.Delete(string(fieldName)) + } + } +} + +func (u *union) Normalize(old, new, out value.Map) error { + os := newFieldsSet(old, u.f) + ns := newFieldsSet(new, u.f) + diff := ns.Difference(os) + + if u.d.Get(old) != u.d.Get(new) && u.d.Get(new) != "" { + if len(diff) == 1 && u.d.Get(new) != u.dn.toDiscriminated(*diff.One()) { + return fmt.Errorf("discriminator (%v) and field changed (%v) don't match", u.d.Get(new), diff.One()) + } + if len(diff) > 1 { + return fmt.Errorf("multiple new fields added: %v", diff) + } + u.clear(out, u.dn.toField(u.d.Get(new))) + return nil + } + + if len(ns) > 1 { + return fmt.Errorf("multiple fields set without discriminator change: %v", ns) + } + + // Set discriminiator if it needs to be deduced. + if u.deduceInvalidDiscriminator && len(ns) == 1 { + u.d.Set(out, u.dn.toDiscriminated(*ns.One())) + } + + return nil +} + +func (u *union) NormalizeApply(applied, merged, out value.Map) error { + as := newFieldsSet(applied, u.f) + if len(as) > 1 { + return fmt.Errorf("more than one field of union applied: %v", as) + } + if len(as) == 0 { + // None is set, just leave. + return nil + } + // We have exactly one, discriminiator must match if set + if u.d.Get(applied) != "" && u.d.Get(applied) != u.dn.toDiscriminated(*as.One()) { + return fmt.Errorf("applied discriminator (%v) doesn't match applied field (%v)", u.d.Get(applied), *as.One()) + } + + // Update discriminiator if needed + if u.deduceInvalidDiscriminator { + u.d.Set(out, u.dn.toDiscriminated(*as.One())) + } + // Clear others fields. + u.clear(out, *as.One()) + + return nil +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go new file mode 100644 index 000000000..403ec8fe0 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go @@ -0,0 +1,195 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "sync" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +var vPool = sync.Pool{ + New: func() interface{} { return &validatingObjectWalker{} }, +} + +func (tv TypedValue) walker() *validatingObjectWalker { + v := vPool.Get().(*validatingObjectWalker) + v.value = tv.value + v.schema = tv.schema + v.typeRef = tv.typeRef + if v.allocator == nil { + v.allocator = value.NewFreelistAllocator() + } + return v +} + +func (v *validatingObjectWalker) finished() { + v.schema = nil + v.typeRef = schema.TypeRef{} + vPool.Put(v) +} + +type validatingObjectWalker struct { + value value.Value + schema *schema.Schema + typeRef schema.TypeRef + + // Allocate only as many walkers as needed for the depth by storing them here. + spareWalkers *[]*validatingObjectWalker + allocator value.Allocator +} + +func (v *validatingObjectWalker) prepareDescent(tr schema.TypeRef) *validatingObjectWalker { + if v.spareWalkers == nil { + // first descent. + v.spareWalkers = &[]*validatingObjectWalker{} + } + var v2 *validatingObjectWalker + if n := len(*v.spareWalkers); n > 0 { + v2, *v.spareWalkers = (*v.spareWalkers)[n-1], (*v.spareWalkers)[:n-1] + } else { + v2 = &validatingObjectWalker{} + } + *v2 = *v + v2.typeRef = tr + return v2 +} + +func (v *validatingObjectWalker) finishDescent(v2 *validatingObjectWalker) { + // if the descent caused a realloc, ensure that we reuse the buffer + // for the next sibling. + *v.spareWalkers = append(*v.spareWalkers, v2) +} + +func (v *validatingObjectWalker) validate(prefixFn func() string) ValidationErrors { + return resolveSchema(v.schema, v.typeRef, v.value, v).WithLazyPrefix(prefixFn) +} + +func validateScalar(t *schema.Scalar, v value.Value, prefix string) (errs ValidationErrors) { + if v == nil { + return nil + } + if v.IsNull() { + return nil + } + switch *t { + case schema.Numeric: + if !v.IsFloat() && !v.IsInt() { + // TODO: should the schema separate int and float? + return errorf("%vexpected numeric (int or float), got %T", prefix, v) + } + case schema.String: + if !v.IsString() { + return errorf("%vexpected string, got %#v", prefix, v) + } + case schema.Boolean: + if !v.IsBool() { + return errorf("%vexpected boolean, got %v", prefix, v) + } + } + return nil +} + +func (v *validatingObjectWalker) doScalar(t *schema.Scalar) ValidationErrors { + if errs := validateScalar(t, v.value, ""); len(errs) > 0 { + return errs + } + return nil +} + +func (v *validatingObjectWalker) visitListItems(t *schema.List, list value.List) (errs ValidationErrors) { + observedKeys := fieldpath.MakePathElementSet(list.Length()) + for i := 0; i < list.Length(); i++ { + child := list.AtUsing(v.allocator, i) + defer v.allocator.Free(child) + var pe fieldpath.PathElement + if t.ElementRelationship != schema.Associative { + pe.Index = &i + } else { + var err error + pe, err = listItemToPathElement(v.allocator, v.schema, t, i, child) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + return + } + if observedKeys.Has(pe) { + errs = append(errs, errorf("duplicate entries for key %v", pe.String())...) + } + observedKeys.Insert(pe) + } + v2 := v.prepareDescent(t.ElementType) + v2.value = child + errs = append(errs, v2.validate(pe.String)...) + v.finishDescent(v2) + } + return errs +} + +func (v *validatingObjectWalker) doList(t *schema.List) (errs ValidationErrors) { + list, err := listValue(v.allocator, v.value) + if err != nil { + return errorf(err.Error()) + } + + if list == nil { + return nil + } + + defer v.allocator.Free(list) + errs = v.visitListItems(t, list) + + return errs +} + +func (v *validatingObjectWalker) visitMapItems(t *schema.Map, m value.Map) (errs ValidationErrors) { + m.IterateUsing(v.allocator, func(key string, val value.Value) bool { + pe := fieldpath.PathElement{FieldName: &key} + tr := t.ElementType + if sf, ok := t.FindField(key); ok { + tr = sf.Type + } else if (t.ElementType == schema.TypeRef{}) { + errs = append(errs, errorf("field not declared in schema").WithPrefix(pe.String())...) + return false + } + v2 := v.prepareDescent(tr) + v2.value = val + // Giving pe.String as a parameter actually increases the allocations. + errs = append(errs, v2.validate(func() string { return pe.String() })...) + v.finishDescent(v2) + return true + }) + return errs +} + +func (v *validatingObjectWalker) doMap(t *schema.Map) (errs ValidationErrors) { + m, err := mapValue(v.allocator, v.value) + if err != nil { + return errorf(err.Error()) + } + if m == nil { + return nil + } + defer v.allocator.Free(m) + errs = v.visitMapItems(t, m) + + return errs +} From a3f0f9adb2acb34f51c2a62ccf0841153ebcce52 Mon Sep 17 00:00:00 2001 From: Stephen Greene Date: Fri, 11 Dec 2020 15:36:49 -0500 Subject: [PATCH 3/3] template: Explicitly set webhook retry backoff defaults pkg/cmd/infra/router/template.go: https://github.com/kubernetes/kubernetes/pull/95705 added the requirement for specifying the `WebhookRetryBackoff` field in authentication/authorizer DelegatingAuthenticatorConfig{} struct used for metrics authentication. This commit explicitly sets the field to the previous implicitly used defaults, available in "k8s.io/apiserver/pkg/server/options". This commit is purely for compatability with kubernetes apiserver 1.20 and does not change behavior. --- pkg/cmd/infra/router/template.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/cmd/infra/router/template.go b/pkg/cmd/infra/router/template.go index 7e40f8787..d569d8606 100644 --- a/pkg/cmd/infra/router/template.go +++ b/pkg/cmd/infra/router/template.go @@ -26,6 +26,7 @@ import ( "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/authorization/authorizerfactory" "k8s.io/apiserver/pkg/server/healthz" + authoptions "k8s.io/apiserver/pkg/server/options" authenticationclient "k8s.io/client-go/kubernetes/typed/authentication/v1" authorizationclient "k8s.io/client-go/kubernetes/typed/authorization/v1" @@ -524,6 +525,7 @@ func (o *TemplateRouterOptions) Run(stopCh <-chan struct{}) error { SubjectAccessReviewClient: client.SubjectAccessReviews(), AllowCacheTTL: 2 * time.Minute, DenyCacheTTL: 5 * time.Second, + WebhookRetryBackoff: authoptions.DefaultAuthWebhookRetryBackoff(), }.New() if err != nil { return err @@ -536,6 +538,7 @@ func (o *TemplateRouterOptions) Run(stopCh <-chan struct{}) error { Anonymous: true, TokenAccessReviewClient: tokenClient.TokenReviews(), CacheTTL: 10 * time.Second, + WebhookRetryBackoff: authoptions.DefaultAuthWebhookRetryBackoff(), }.New() if err != nil { return err